From 600b19da6efa886fbbcb01589a804fb165410a2e Mon Sep 17 00:00:00 2001 From: Hernando Castano Date: Mon, 1 Apr 2019 08:13:30 +0200 Subject: [PATCH] Fix sloppy project wide code formatting --- build.rs | 6 +- core/basic-authorship/src/basic_authorship.rs | 536 +- core/basic-authorship/src/lib.rs | 2 +- core/cli/src/error.rs | 36 +- core/cli/src/informant.rs | 318 +- core/cli/src/lib.rs | 1389 ++--- core/cli/src/params.rs | 1054 ++-- core/cli/src/traits.rs | 24 +- core/client/db/src/cache/list_cache.rs | 3840 +++++++++----- core/client/db/src/cache/list_entry.rs | 366 +- core/client/db/src/cache/list_storage.rs | 677 +-- core/client/db/src/cache/mod.rs | 428 +- core/client/db/src/lib.rs | 4338 +++++++++------- core/client/db/src/light.rs | 2255 ++++---- core/client/db/src/storage_cache.rs | 829 +-- core/client/db/src/utils.rs | 417 +- core/client/src/backend.rs | 300 +- core/client/src/block_builder/api.rs | 32 +- .../client/src/block_builder/block_builder.rs | 153 +- core/client/src/blockchain.rs | 336 +- core/client/src/call_executor.rs | 560 +- core/client/src/children.rs | 149 +- core/client/src/cht.rs | 614 ++- core/client/src/client.rs | 4619 ++++++++++------- core/client/src/error.rs | 272 +- core/client/src/genesis.rs | 433 +- core/client/src/in_mem.rs | 1405 ++--- core/client/src/leaves.rs | 629 +-- core/client/src/lib.rs | 36 +- core/client/src/light/backend.rs | 944 ++-- core/client/src/light/blockchain.rs | 519 +- core/client/src/light/call_executor.rs | 1023 ++-- core/client/src/light/fetcher.rs | 1489 +++--- core/client/src/light/mod.rs | 84 +- core/client/src/notifications.rs | 499 +- core/client/src/runtime_api.rs | 185 +- core/consensus/aura/primitives/src/lib.rs | 20 +- core/consensus/aura/slots/src/lib.rs | 398 +- core/consensus/aura/slots/src/slots.rs | 208 +- core/consensus/aura/src/lib.rs | 1605 +++--- core/consensus/authorities/src/lib.rs | 14 +- core/consensus/common/src/block_import.rs | 280 +- core/consensus/common/src/error.rs | 178 +- core/consensus/common/src/evaluation.rs | 91 +- core/consensus/common/src/import_queue.rs | 1113 ++-- core/consensus/common/src/lib.rs | 101 +- core/consensus/common/src/offline_tracker.rs | 208 +- core/consensus/rhd/src/error.rs | 64 +- core/consensus/rhd/src/lib.rs | 2891 ++++++----- core/consensus/rhd/src/service.rs | 271 +- core/executor/src/allocator.rs | 923 ++-- core/executor/src/error.rs | 120 +- core/executor/src/lib.rs | 30 +- core/executor/src/native_executor.rs | 267 +- core/executor/src/sandbox.rs | 1243 +++-- core/executor/src/wasm_executor.rs | 925 ++-- core/executor/src/wasm_utils.rs | 103 +- core/finality-grandpa/primitives/src/lib.rs | 130 +- core/finality-grandpa/src/authorities.rs | 1471 +++--- core/finality-grandpa/src/aux_schema.rs | 536 +- core/finality-grandpa/src/communication.rs | 798 +-- .../finality-grandpa/src/consensus_changes.rs | 89 +- core/finality-grandpa/src/environment.rs | 871 ++-- core/finality-grandpa/src/finality_proof.rs | 833 +-- core/finality-grandpa/src/import.rs | 1054 ++-- core/finality-grandpa/src/justification.rs | 366 +- core/finality-grandpa/src/lib.rs | 1490 +++--- .../src/service_integration.rs | 21 +- core/finality-grandpa/src/tests.rs | 2026 ++++---- core/finality-grandpa/src/until_imported.rs | 1023 ++-- core/inherents/src/lib.rs | 921 ++-- core/inherents/src/pool.rs | 66 +- core/keyring/src/ed25519.rs | 239 +- core/keyring/src/lib.rs | 4 +- core/keyring/src/sr25519.rs | 251 +- core/keystore/src/lib.rs | 249 +- core/network-libp2p/src/behaviour.rs | 766 +-- core/network-libp2p/src/config.rs | 387 +- .../src/custom_proto/behaviour.rs | 1779 ++++--- .../src/custom_proto/handler.rs | 1550 +++--- .../src/custom_proto/upgrade.rs | 701 +-- core/network-libp2p/src/lib.rs | 150 +- core/network-libp2p/src/service_task.rs | 726 +-- core/network-libp2p/src/transport.rs | 67 +- core/network-libp2p/tests/test.rs | 457 +- core/network/src/blocks.rs | 586 ++- core/network/src/chain.rs | 243 +- core/network/src/config.rs | 78 +- core/network/src/consensus_gossip.rs | 1029 ++-- core/network/src/error.rs | 18 +- core/network/src/lib.rs | 26 +- core/network/src/message.rs | 592 ++- core/network/src/on_demand.rs | 2093 ++++---- core/network/src/protocol.rs | 1977 +++---- core/network/src/service.rs | 1017 ++-- core/network/src/specialization.rs | 37 +- core/network/src/sync.rs | 2093 ++++---- core/network/src/test/block_import.rs | 122 +- core/network/src/test/mod.rs | 1596 +++--- core/network/src/test/sync.rs | 861 +-- core/network/src/util.rs | 79 +- core/offchain/primitives/src/lib.rs | 10 +- core/offchain/src/api.rs | 114 +- core/offchain/src/lib.rs | 144 +- core/panic-handler/src/lib.rs | 115 +- core/peerset/src/lib.rs | 509 +- core/primitives/src/changes_trie.rs | 254 +- core/primitives/src/crypto.rs | 900 ++-- core/primitives/src/ed25519.rs | 909 ++-- core/primitives/src/hash.rs | 160 +- core/primitives/src/hasher.rs | 62 +- core/primitives/src/hashing.rs | 92 +- core/primitives/src/hexdisplay.rs | 92 +- core/primitives/src/lib.rs | 167 +- core/primitives/src/sandbox.rs | 210 +- core/primitives/src/sr25519.rs | 960 ++-- core/primitives/src/storage.rs | 89 +- core/primitives/src/u32_trait.rs | 169 +- core/primitives/src/uint.rs | 132 +- core/rpc-servers/src/lib.rs | 97 +- core/rpc/src/author/error.rs | 52 +- core/rpc/src/author/mod.rs | 252 +- core/rpc/src/author/tests.rs | 196 +- core/rpc/src/chain/error.rs | 38 +- core/rpc/src/chain/mod.rs | 383 +- core/rpc/src/chain/number.rs | 58 +- core/rpc/src/chain/tests.rs | 428 +- core/rpc/src/errors.rs | 22 +- core/rpc/src/helpers.rs | 13 +- core/rpc/src/metadata.rs | 36 +- core/rpc/src/state/error.rs | 48 +- core/rpc/src/state/mod.rs | 922 ++-- core/rpc/src/state/tests.rs | 387 +- core/rpc/src/subscriptions.rs | 123 +- core/rpc/src/system/error.rs | 50 +- core/rpc/src/system/helpers.rs | 121 +- core/rpc/src/system/mod.rs | 181 +- core/rpc/src/system/tests.rs | 313 +- core/serializer/src/lib.rs | 13 +- core/service/src/chain_ops.rs | 326 +- core/service/src/chain_spec.rs | 362 +- core/service/src/components.rs | 965 ++-- core/service/src/config.rs | 187 +- core/service/src/error.rs | 30 +- core/service/src/lib.rs | 851 +-- core/service/test/src/lib.rs | 541 +- core/sr-api-macros/benches/bench.rs | 86 +- core/sr-api-macros/src/compile_fail_tests.rs | 706 +-- core/sr-api-macros/src/decl_runtime_apis.rs | 1191 ++--- core/sr-api-macros/src/impl_runtime_apis.rs | 1047 ++-- core/sr-api-macros/src/lib.rs | 8 +- core/sr-api-macros/src/utils.rs | 202 +- core/sr-api-macros/tests/decl_and_impl.rs | 157 +- core/sr-api-macros/tests/runtime_calls.rs | 94 +- core/sr-io/src/lib.rs | 17 +- core/sr-primitives/src/generic/block.rs | 90 +- .../src/generic/checked_extrinsic.rs | 45 +- core/sr-primitives/src/generic/digest.rs | 372 +- core/sr-primitives/src/generic/era.rs | 303 +- core/sr-primitives/src/generic/header.rs | 287 +- core/sr-primitives/src/generic/mod.rs | 58 +- core/sr-primitives/src/generic/tests.rs | 58 +- .../src/generic/unchecked_extrinsic.rs | 266 +- .../unchecked_mortal_compact_extrinsic.rs | 636 ++- .../src/generic/unchecked_mortal_extrinsic.rs | 611 ++- core/sr-primitives/src/lib.rs | 722 +-- core/sr-primitives/src/testing.rs | 330 +- core/sr-primitives/src/traits.rs | 872 ++-- .../sr-primitives/src/transaction_validity.rs | 60 +- core/sr-sandbox/src/lib.rs | 227 +- core/sr-std/build.rs | 12 +- core/sr-std/src/lib.rs | 29 +- core/sr-version/src/lib.rs | 217 +- core/state-db/src/lib.rs | 870 ++-- core/state-db/src/noncanonical.rs | 1552 +++--- core/state-db/src/pruning.rs | 696 +-- core/state-db/src/test.rs | 107 +- core/state-machine/src/backend.rs | 552 +- core/state-machine/src/basic.rs | 292 +- core/state-machine/src/changes_trie/build.rs | 816 ++- .../src/changes_trie/build_iterator.rs | 413 +- .../src/changes_trie/changes_iterator.rs | 1053 ++-- core/state-machine/src/changes_trie/input.rs | 163 +- core/state-machine/src/changes_trie/mod.rs | 56 +- core/state-machine/src/changes_trie/prune.rs | 544 +- .../state-machine/src/changes_trie/storage.rs | 195 +- core/state-machine/src/ext.rs | 815 +-- core/state-machine/src/lib.rs | 1643 +++--- core/state-machine/src/overlayed_changes.rs | 1016 ++-- core/state-machine/src/proving_backend.rs | 456 +- core/state-machine/src/testing.rs | 356 +- core/state-machine/src/trie_backend.rs | 496 +- .../state-machine/src/trie_backend_essence.rs | 492 +- core/telemetry/src/lib.rs | 382 +- core/test-client/src/block_builder_ext.rs | 29 +- core/test-client/src/client_ext.rs | 122 +- core/test-client/src/lib.rs | 279 +- core/test-client/src/trait_tests.rs | 752 +-- core/test-runtime/src/genesismap.rs | 105 +- core/test-runtime/src/lib.rs | 755 +-- core/test-runtime/src/system.rs | 748 +-- core/transaction-pool/graph/src/base_pool.rs | 1636 +++--- core/transaction-pool/graph/src/error.rs | 97 +- core/transaction-pool/graph/src/future.rs | 382 +- core/transaction-pool/graph/src/lib.rs | 7 +- core/transaction-pool/graph/src/listener.rs | 142 +- core/transaction-pool/graph/src/pool.rs | 1931 +++---- core/transaction-pool/graph/src/ready.rs | 1105 ++-- core/transaction-pool/graph/src/rotator.rs | 330 +- core/transaction-pool/graph/src/watcher.rs | 190 +- core/transaction-pool/src/api.rs | 96 +- core/transaction-pool/src/error.rs | 25 +- core/transaction-pool/src/tests.rs | 226 +- core/trie/benches/bench.rs | 22 +- core/trie/src/error.rs | 18 +- core/trie/src/lib.rs | 940 ++-- core/trie/src/node_codec.rs | 221 +- core/trie/src/node_header.rs | 104 +- core/trie/src/trie_stream.rs | 139 +- core/util/fork-tree/src/lib.rs | 1561 +++--- node-template/build.rs | 6 +- node-template/runtime/src/lib.rs | 359 +- node-template/runtime/src/template.rs | 193 +- node-template/src/chain_spec.rs | 135 +- node-template/src/cli.rs | 152 +- node-template/src/main.rs | 24 +- node-template/src/service.rs | 175 +- node/cli/build.rs | 49 +- node/cli/src/chain_spec.rs | 285 +- node/cli/src/lib.rs | 177 +- node/cli/src/service.rs | 366 +- node/executor/src/lib.rs | 1705 +++--- node/primitives/src/lib.rs | 13 +- node/runtime/src/lib.rs | 409 +- node/src/main.rs | 53 +- srml/assets/src/lib.rs | 420 +- srml/aura/src/lib.rs | 303 +- srml/aura/src/mock.rs | 82 +- srml/aura/src/tests.rs | 122 +- srml/balances/src/lib.rs | 1494 +++--- srml/balances/src/mock.rs | 198 +- srml/balances/src/tests.rs | 983 ++-- srml/consensus/src/lib.rs | 472 +- srml/consensus/src/mock.rs | 67 +- srml/consensus/src/tests.rs | 149 +- srml/contract/src/account_db.rs | 481 +- srml/contract/src/exec.rs | 2431 ++++----- srml/contract/src/gas.rs | 494 +- srml/contract/src/lib.rs | 708 +-- srml/contract/src/tests.rs | 697 +-- srml/contract/src/wasm/code_cache.rs | 81 +- srml/contract/src/wasm/env_def/macros.rs | 266 +- srml/contract/src/wasm/env_def/mod.rs | 85 +- srml/contract/src/wasm/mod.rs | 1004 ++-- srml/contract/src/wasm/prepare.rs | 770 +-- srml/contract/src/wasm/runtime.rs | 230 +- srml/council/src/lib.rs | 259 +- srml/council/src/motions.rs | 712 ++- srml/council/src/seats.rs | 2947 ++++++----- srml/council/src/voting.rs | 1067 ++-- srml/democracy/src/lib.rs | 2281 ++++---- srml/democracy/src/vote_threshold.rs | 167 +- srml/example/src/lib.rs | 543 +- srml/executive/src/lib.rs | 458 +- srml/finality-tracker/src/lib.rs | 607 ++- srml/grandpa/src/lib.rs | 531 +- srml/grandpa/src/mock.rs | 78 +- srml/grandpa/src/tests.rs | 300 +- srml/indices/src/address.rs | 198 +- srml/indices/src/lib.rs | 274 +- srml/indices/src/mock.rs | 104 +- srml/indices/src/tests.rs | 66 +- srml/metadata/src/lib.rs | 304 +- srml/session/src/lib.rs | 795 +-- srml/staking/src/lib.rs | 1491 +++--- srml/staking/src/mock.rs | 422 +- srml/staking/src/phragmen.rs | 661 +-- srml/staking/src/tests.rs | 4287 ++++++++------- srml/sudo/src/lib.rs | 87 +- srml/support/procedural/src/lib.rs | 4 +- srml/support/procedural/src/storage/impls.rs | 1242 ++--- srml/support/procedural/src/storage/mod.rs | 171 +- .../procedural/src/storage/transformation.rs | 1695 +++--- .../procedural/tools/derive/src/lib.rs | 354 +- srml/support/procedural/tools/src/lib.rs | 122 +- srml/support/procedural/tools/src/syn_ext.rs | 425 +- srml/support/src/dispatch.rs | 245 +- srml/support/src/double_map.rs | 208 +- srml/support/src/event.rs | 478 +- srml/support/src/hashable.rs | 24 +- srml/support/src/inherent.rs | 3 +- srml/support/src/lib.rs | 616 +-- srml/support/src/metadata.rs | 409 +- srml/support/src/origin.rs | 96 +- srml/support/src/storage/generator.rs | 1294 ++--- srml/support/src/storage/mod.rs | 1034 ++-- .../support/src/storage/unhashed/generator.rs | 227 +- srml/support/src/storage/unhashed/mod.rs | 145 +- srml/support/src/traits.rs | 697 ++- srml/support/test/src/lib.rs | 1 + srml/support/test/tests/instance.rs | 790 +-- srml/system/src/lib.rs | 948 ++-- srml/timestamp/src/lib.rs | 554 +- srml/treasury/src/lib.rs | 825 +-- subkey/src/main.rs | 386 +- subkey/src/vanity.rs | 253 +- 306 files changed, 90266 insertions(+), 76287 deletions(-) diff --git a/build.rs b/build.rs index 273700c525..1cc141f112 100644 --- a/build.rs +++ b/build.rs @@ -14,11 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use vergen::{ConstantsFlags, generate_cargo_keys}; +use vergen::{generate_cargo_keys, ConstantsFlags}; const ERROR_MSG: &str = "Failed to generate metadata files"; fn main() { - generate_cargo_keys(ConstantsFlags::all()).expect(ERROR_MSG); - println!("cargo:rerun-if-changed=.git/HEAD"); + generate_cargo_keys(ConstantsFlags::all()).expect(ERROR_MSG); + println!("cargo:rerun-if-changed=.git/HEAD"); } diff --git a/core/basic-authorship/src/basic_authorship.rs b/core/basic-authorship/src/basic_authorship.rs index e9b6c909ad..e334902a9f 100644 --- a/core/basic-authorship/src/basic_authorship.rs +++ b/core/basic-authorship/src/basic_authorship.rs @@ -18,189 +18,217 @@ // FIXME #1021 move this into substrate-consensus-common // -use std::{self, time, sync::Arc}; +use std::{self, sync::Arc, time}; -use log::{info, debug, warn, trace}; +use log::{debug, info, trace, warn}; use client::{ - self, error, Client as SubstrateClient, CallExecutor, - block_builder::api::BlockBuilder as BlockBuilderApi, runtime_api::Core, + self, block_builder::api::BlockBuilder as BlockBuilderApi, error, runtime_api::Core, + CallExecutor, Client as SubstrateClient, }; use codec::Decode; use consensus_common::{self, evaluation}; -use primitives::{H256, Blake2Hasher, ExecutionContext}; +use inherents::{pool::InherentsPool, InherentData}; +use primitives::{Blake2Hasher, ExecutionContext, H256}; +use runtime_primitives::generic::BlockId; use runtime_primitives::traits::{ - Block as BlockT, Hash as HashT, Header as HeaderT, ProvideRuntimeApi, AuthorityIdFor + AuthorityIdFor, Block as BlockT, Hash as HashT, Header as HeaderT, ProvideRuntimeApi, }; -use runtime_primitives::generic::BlockId; use runtime_primitives::ApplyError; -use transaction_pool::txpool::{self, Pool as TransactionPool}; -use inherents::{InherentData, pool::InherentsPool}; use substrate_telemetry::{telemetry, CONSENSUS_INFO}; +use transaction_pool::txpool::{self, Pool as TransactionPool}; /// Build new blocks. pub trait BlockBuilder { - /// Push an extrinsic onto the block. Fails if the extrinsic is invalid. - fn push_extrinsic(&mut self, extrinsic: ::Extrinsic) -> Result<(), error::Error>; + /// Push an extrinsic onto the block. Fails if the extrinsic is invalid. + fn push_extrinsic( + &mut self, + extrinsic: ::Extrinsic, + ) -> Result<(), error::Error>; } /// Local client abstraction for the consensus. -pub trait AuthoringApi: Send + Sync + ProvideRuntimeApi where - ::Api: Core +pub trait AuthoringApi: Send + Sync + ProvideRuntimeApi +where + ::Api: Core, { - /// The block used for this API type. - type Block: BlockT; - /// The error used by this API type. - type Error: std::error::Error; - - /// Build a block on top of the given, with inherent extrinsics pre-pushed. - fn build_block) -> ()>( - &self, - at: &BlockId, - inherent_data: InherentData, - build_ctx: F, - ) -> Result; + /// The block used for this API type. + type Block: BlockT; + /// The error used by this API type. + type Error: std::error::Error; + + /// Build a block on top of the given, with inherent extrinsics pre-pushed. + fn build_block) -> ()>( + &self, + at: &BlockId, + inherent_data: InherentData, + build_ctx: F, + ) -> Result; } impl<'a, B, E, Block, RA> BlockBuilder - for client::block_builder::BlockBuilder<'a, Block, SubstrateClient> + for client::block_builder::BlockBuilder<'a, Block, SubstrateClient> where - B: client::backend::Backend + 'static, - E: CallExecutor + Send + Sync + Clone + 'static, - Block: BlockT, - RA: Send + Sync + 'static, - SubstrateClient : ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: BlockBuilderApi, + B: client::backend::Backend + 'static, + E: CallExecutor + Send + Sync + Clone + 'static, + Block: BlockT, + RA: Send + Sync + 'static, + SubstrateClient: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: BlockBuilderApi, { - fn push_extrinsic(&mut self, extrinsic: ::Extrinsic) -> Result<(), error::Error> { - client::block_builder::BlockBuilder::push(self, extrinsic).map_err(Into::into) - } + fn push_extrinsic( + &mut self, + extrinsic: ::Extrinsic, + ) -> Result<(), error::Error> { + client::block_builder::BlockBuilder::push(self, extrinsic).map_err(Into::into) + } } -impl AuthoringApi for SubstrateClient where - B: client::backend::Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + Clone + 'static, - Block: BlockT, - RA: Send + Sync + 'static, - SubstrateClient : ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: BlockBuilderApi, +impl AuthoringApi for SubstrateClient +where + B: client::backend::Backend + Send + Sync + 'static, + E: CallExecutor + Send + Sync + Clone + 'static, + Block: BlockT, + RA: Send + Sync + 'static, + SubstrateClient: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: BlockBuilderApi, { - type Block = Block; - type Error = client::error::Error; - - fn build_block) -> ()>( - &self, - at: &BlockId, - inherent_data: InherentData, - mut build_ctx: F, - ) -> Result { - let mut block_builder = self.new_block_at(at)?; - - let runtime_api = self.runtime_api(); - // We don't check the API versions any further here since the dispatch compatibility - // check should be enough. - runtime_api.inherent_extrinsics_with_context(at, ExecutionContext::BlockConstruction, inherent_data)? - .into_iter().try_for_each(|i| block_builder.push(i))?; - - build_ctx(&mut block_builder); - - block_builder.bake().map_err(Into::into) - } + type Block = Block; + type Error = client::error::Error; + + fn build_block) -> ()>( + &self, + at: &BlockId, + inherent_data: InherentData, + mut build_ctx: F, + ) -> Result { + let mut block_builder = self.new_block_at(at)?; + + let runtime_api = self.runtime_api(); + // We don't check the API versions any further here since the dispatch compatibility + // check should be enough. + runtime_api + .inherent_extrinsics_with_context( + at, + ExecutionContext::BlockConstruction, + inherent_data, + )? + .into_iter() + .try_for_each(|i| block_builder.push(i))?; + + build_ctx(&mut block_builder); + + block_builder.bake().map_err(Into::into) + } } /// Proposer factory. -pub struct ProposerFactory where A: txpool::ChainApi { - /// The client instance. - pub client: Arc, - /// The transaction pool. - pub transaction_pool: Arc>, - /// The inherents pool - pub inherents_pool: Arc::Extrinsic>>, +pub struct ProposerFactory +where + A: txpool::ChainApi, +{ + /// The client instance. + pub client: Arc, + /// The transaction pool. + pub transaction_pool: Arc>, + /// The inherents pool + pub inherents_pool: Arc::Extrinsic>>, } -impl consensus_common::Environment<::Block> for ProposerFactory where - C: AuthoringApi, - ::Api: BlockBuilderApi<::Block>, - A: txpool::ChainApi::Block>, - client::error::Error: From<::Error>, - Proposer<::Block, C, A>: consensus_common::Proposer<::Block>, +impl consensus_common::Environment<::Block> for ProposerFactory +where + C: AuthoringApi, + ::Api: BlockBuilderApi<::Block>, + A: txpool::ChainApi::Block>, + client::error::Error: From<::Error>, + Proposer<::Block, C, A>: + consensus_common::Proposer<::Block>, { - type Proposer = Proposer<::Block, C, A>; - type Error = error::Error; - - fn init( - &self, - parent_header: &<::Block as BlockT>::Header, - _: &[AuthorityIdFor<::Block>], - ) -> Result { - let parent_hash = parent_header.hash(); - - let id = BlockId::hash(parent_hash); - - info!("Starting consensus session on top of parent {:?}", parent_hash); - - let proposer = Proposer { - client: self.client.clone(), - parent_hash, - parent_id: id, - parent_number: *parent_header.number(), - transaction_pool: self.transaction_pool.clone(), - inherents_pool: self.inherents_pool.clone(), - now: Box::new(time::Instant::now), - }; - - Ok(proposer) - } + type Proposer = Proposer<::Block, C, A>; + type Error = error::Error; + + fn init( + &self, + parent_header: &<::Block as BlockT>::Header, + _: &[AuthorityIdFor<::Block>], + ) -> Result { + let parent_hash = parent_header.hash(); + + let id = BlockId::hash(parent_hash); + + info!( + "Starting consensus session on top of parent {:?}", + parent_hash + ); + + let proposer = Proposer { + client: self.client.clone(), + parent_hash, + parent_id: id, + parent_number: *parent_header.number(), + transaction_pool: self.transaction_pool.clone(), + inherents_pool: self.inherents_pool.clone(), + now: Box::new(time::Instant::now), + }; + + Ok(proposer) + } } /// The proposer logic. pub struct Proposer { - client: Arc, - parent_hash: ::Hash, - parent_id: BlockId, - parent_number: <::Header as HeaderT>::Number, - transaction_pool: Arc>, - inherents_pool: Arc::Extrinsic>>, - now: Box time::Instant>, + client: Arc, + parent_hash: ::Hash, + parent_id: BlockId, + parent_number: <::Header as HeaderT>::Number, + transaction_pool: Arc>, + inherents_pool: Arc::Extrinsic>>, + now: Box time::Instant>, } -impl consensus_common::Proposer<::Block> for Proposer where - Block: BlockT, - C: AuthoringApi, - ::Api: BlockBuilderApi, - A: txpool::ChainApi, - client::error::Error: From<::Error> +impl consensus_common::Proposer<::Block> for Proposer +where + Block: BlockT, + C: AuthoringApi, + ::Api: BlockBuilderApi, + A: txpool::ChainApi, + client::error::Error: From<::Error>, { - type Create = Result<::Block, error::Error>; - type Error = error::Error; - - fn propose(&self, inherent_data: InherentData, max_duration: time::Duration) - -> Result<::Block, error::Error> - { - // leave some time for evaluation and block finalization (33%) - let deadline = (self.now)() + max_duration - max_duration / 3; - self.propose_with(inherent_data, deadline) - } + type Create = Result<::Block, error::Error>; + type Error = error::Error; + + fn propose( + &self, + inherent_data: InherentData, + max_duration: time::Duration, + ) -> Result<::Block, error::Error> { + // leave some time for evaluation and block finalization (33%) + let deadline = (self.now)() + max_duration - max_duration / 3; + self.propose_with(inherent_data, deadline) + } } -impl Proposer where - Block: BlockT, - C: AuthoringApi, - ::Api: BlockBuilderApi, - A: txpool::ChainApi, - client::error::Error: From<::Error>, +impl Proposer +where + Block: BlockT, + C: AuthoringApi, + ::Api: BlockBuilderApi, + A: txpool::ChainApi, + client::error::Error: From<::Error>, { - fn propose_with(&self, inherent_data: InherentData, deadline: time::Instant) - -> Result<::Block, error::Error> - { - use runtime_primitives::traits::BlakeTwo256; - - /// If the block is full we will attempt to push at most - /// this number of transactions before quitting for real. - /// It allows us to increase block utilization. - const MAX_SKIPPED_TRANSACTIONS: usize = 8; - - let block = self.client.build_block( + fn propose_with( + &self, + inherent_data: InherentData, + deadline: time::Instant, + ) -> Result<::Block, error::Error> { + use runtime_primitives::traits::BlakeTwo256; + + /// If the block is full we will attempt to push at most + /// this number of transactions before quitting for real. + /// It allows us to increase block utilization. + const MAX_SKIPPED_TRANSACTIONS: usize = 8; + + let block = self.client.build_block( &self.parent_id, inherent_data, |block_builder| { @@ -259,115 +287,125 @@ impl Proposer where self.transaction_pool.remove_invalid(&unqueue_invalid); })?; - info!("Prepared block for proposing at {} [hash: {:?}; parent_hash: {}; extrinsics: [{}]]", - block.header().number(), - <::Block as BlockT>::Hash::from(block.header().hash()), - block.header().parent_hash(), - block.extrinsics() - .iter() - .map(|xt| format!("{}", BlakeTwo256::hash_of(xt))) - .collect::>() - .join(", ") - ); - telemetry!(CONSENSUS_INFO; "prepared_block_for_proposing"; - "number" => ?block.header().number(), - "hash" => ?<::Block as BlockT>::Hash::from(block.header().hash()), - ); - - let substrate_block = Decode::decode(&mut block.encode().as_slice()) - .expect("blocks are defined to serialize to substrate blocks correctly; qed"); - - assert!(evaluation::evaluate_initial( - &substrate_block, - &self.parent_hash, - self.parent_number, - ).is_ok()); - - Ok(substrate_block) - } + info!( + "Prepared block for proposing at {} [hash: {:?}; parent_hash: {}; extrinsics: [{}]]", + block.header().number(), + <::Block as BlockT>::Hash::from(block.header().hash()), + block.header().parent_hash(), + block + .extrinsics() + .iter() + .map(|xt| format!("{}", BlakeTwo256::hash_of(xt))) + .collect::>() + .join(", ") + ); + telemetry!(CONSENSUS_INFO; "prepared_block_for_proposing"; + "number" => ?block.header().number(), + "hash" => ?<::Block as BlockT>::Hash::from(block.header().hash()), + ); + + let substrate_block = Decode::decode(&mut block.encode().as_slice()) + .expect("blocks are defined to serialize to substrate blocks correctly; qed"); + + assert!(evaluation::evaluate_initial( + &substrate_block, + &self.parent_hash, + self.parent_number, + ) + .is_ok()); + + Ok(substrate_block) + } } #[cfg(test)] mod tests { - use super::*; - - use codec::Encode; - use std::cell::RefCell; - use consensus_common::{Environment, Proposer}; - use test_client::{self, runtime::{Extrinsic, Transfer}, AccountKeyring}; - - fn extrinsic(nonce: u64) -> Extrinsic { - let tx = Transfer { - amount: Default::default(), - nonce, - from: AccountKeyring::Alice.into(), - to: Default::default(), - }; - let signature = AccountKeyring::from_public(&tx.from).unwrap().sign(&tx.encode()).into(); - Extrinsic::Transfer(tx, signature) - } - - #[test] - fn should_cease_building_block_when_deadline_is_reached() { - // given - let client = Arc::new(test_client::new()); - let chain_api = transaction_pool::ChainApi::new(client.clone()); - let txpool = Arc::new(TransactionPool::new(Default::default(), chain_api)); - - txpool.submit_at(&BlockId::number(0), vec![extrinsic(0), extrinsic(1)]).unwrap(); - - let proposer_factory = ProposerFactory { - client: client.clone(), - transaction_pool: txpool.clone(), - inherents_pool: Default::default(), - }; - - let mut proposer = proposer_factory.init( - &client.header(&BlockId::number(0)).unwrap().unwrap(), - &[] - ).unwrap(); - - // when - let cell = RefCell::new(time::Instant::now()); - proposer.now = Box::new(move || { - let new = *cell.borrow() + time::Duration::from_secs(2); - cell.replace(new) - }); - let deadline = time::Duration::from_secs(3); - let block = proposer.propose(Default::default(), deadline).unwrap(); - - // then - // block should have some extrinsics although we have some more in the pool. - assert_eq!(block.extrinsics().len(), 1); - assert_eq!(txpool.ready().count(), 2); - } - - #[test] - fn should_include_inherents_from_the_pool() { - // given - let client = Arc::new(test_client::new()); - let chain_api = transaction_pool::ChainApi::new(client.clone()); - let txpool = Arc::new(TransactionPool::new(Default::default(), chain_api)); - let inpool = Arc::new(InherentsPool::default()); - - let proposer_factory = ProposerFactory { - client: client.clone(), - transaction_pool: txpool.clone(), - inherents_pool: inpool.clone(), - }; - - inpool.add(extrinsic(0)); - - let proposer = proposer_factory.init( - &client.header(&BlockId::number(0)).unwrap().unwrap(), - &[] - ).unwrap(); - - // when - let deadline = time::Duration::from_secs(3); - let block = proposer.propose(Default::default(), deadline).unwrap(); - - // then - assert_eq!(block.extrinsics().len(), 1); - } + use super::*; + + use codec::Encode; + use consensus_common::{Environment, Proposer}; + use std::cell::RefCell; + use test_client::{ + self, + runtime::{Extrinsic, Transfer}, + AccountKeyring, + }; + + fn extrinsic(nonce: u64) -> Extrinsic { + let tx = Transfer { + amount: Default::default(), + nonce, + from: AccountKeyring::Alice.into(), + to: Default::default(), + }; + let signature = AccountKeyring::from_public(&tx.from) + .unwrap() + .sign(&tx.encode()) + .into(); + Extrinsic::Transfer(tx, signature) + } + + #[test] + fn should_cease_building_block_when_deadline_is_reached() { + // given + let client = Arc::new(test_client::new()); + let chain_api = transaction_pool::ChainApi::new(client.clone()); + let txpool = Arc::new(TransactionPool::new(Default::default(), chain_api)); + + txpool + .submit_at(&BlockId::number(0), vec![extrinsic(0), extrinsic(1)]) + .unwrap(); + + let proposer_factory = ProposerFactory { + client: client.clone(), + transaction_pool: txpool.clone(), + inherents_pool: Default::default(), + }; + + let mut proposer = proposer_factory + .init(&client.header(&BlockId::number(0)).unwrap().unwrap(), &[]) + .unwrap(); + + // when + let cell = RefCell::new(time::Instant::now()); + proposer.now = Box::new(move || { + let new = *cell.borrow() + time::Duration::from_secs(2); + cell.replace(new) + }); + let deadline = time::Duration::from_secs(3); + let block = proposer.propose(Default::default(), deadline).unwrap(); + + // then + // block should have some extrinsics although we have some more in the pool. + assert_eq!(block.extrinsics().len(), 1); + assert_eq!(txpool.ready().count(), 2); + } + + #[test] + fn should_include_inherents_from_the_pool() { + // given + let client = Arc::new(test_client::new()); + let chain_api = transaction_pool::ChainApi::new(client.clone()); + let txpool = Arc::new(TransactionPool::new(Default::default(), chain_api)); + let inpool = Arc::new(InherentsPool::default()); + + let proposer_factory = ProposerFactory { + client: client.clone(), + transaction_pool: txpool.clone(), + inherents_pool: inpool.clone(), + }; + + inpool.add(extrinsic(0)); + + let proposer = proposer_factory + .init(&client.header(&BlockId::number(0)).unwrap().unwrap(), &[]) + .unwrap(); + + // when + let deadline = time::Duration::from_secs(3); + let block = proposer.propose(Default::default(), deadline).unwrap(); + + // then + assert_eq!(block.extrinsics().len(), 1); + } } diff --git a/core/basic-authorship/src/lib.rs b/core/basic-authorship/src/lib.rs index 88a55c3bac..dbe0c7139f 100644 --- a/core/basic-authorship/src/lib.rs +++ b/core/basic-authorship/src/lib.rs @@ -20,4 +20,4 @@ mod basic_authorship; -pub use crate::basic_authorship::{ProposerFactory, BlockBuilder, AuthoringApi, Proposer}; +pub use crate::basic_authorship::{AuthoringApi, BlockBuilder, Proposer, ProposerFactory}; diff --git a/core/cli/src/error.rs b/core/cli/src/error.rs index e368cc6d96..efa2cc429b 100644 --- a/core/cli/src/error.rs +++ b/core/cli/src/error.rs @@ -21,23 +21,25 @@ #![allow(deprecated)] use client; -use error_chain::{error_chain, error_chain_processing, impl_error_chain_processed, - impl_extract_backtrace, impl_error_chain_kind}; +use error_chain::{ + error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed, + impl_extract_backtrace, +}; error_chain! { - foreign_links { - Io(::std::io::Error) #[doc="IO error"]; - Cli(::clap::Error) #[doc="CLI error"]; - Service(::service::Error) #[doc="Substrate service error"]; - } - links { - Client(client::error::Error, client::error::ErrorKind) #[doc="Client error"]; - } - errors { - /// Input error. - Input(m: String) { - description("Invalid input"), - display("{}", m), - } - } + foreign_links { + Io(::std::io::Error) #[doc="IO error"]; + Cli(::clap::Error) #[doc="CLI error"]; + Service(::service::Error) #[doc="Substrate service error"]; + } + links { + Client(client::error::Error, client::error::ErrorKind) #[doc="Client error"]; + } + errors { + /// Input error. + Input(m: String) { + description("Invalid input"), + display("{}", m), + } + } } diff --git a/core/cli/src/informant.rs b/core/cli/src/informant.rs index 260615b2c1..4dd68b1820 100644 --- a/core/cli/src/informant.rs +++ b/core/cli/src/informant.rs @@ -17,177 +17,193 @@ //! Console informant. Prints sync progress and block events. Runs on the calling thread. use ansi_term::Colour; +use client::{backend::Backend, BlockchainEvents}; +use futures::{Future, Stream}; +use log::{info, warn}; +use network::{SyncProvider, SyncState}; +use service::{Components, Service}; use std::fmt; use std::time; -use futures::{Future, Stream}; -use service::{Service, Components}; -use tokio::runtime::TaskExecutor; -use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; -use network::{SyncState, SyncProvider}; -use client::{backend::Backend, BlockchainEvents}; use substrate_telemetry::{telemetry, SUBSTRATE_INFO}; -use log::{info, warn}; +use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; +use tokio::runtime::TaskExecutor; use runtime_primitives::generic::BlockId; -use runtime_primitives::traits::{Header, As}; +use runtime_primitives::traits::{As, Header}; /// Spawn informant on the event loop -pub fn start(service: &Service, exit: ::exit_future::Exit, handle: TaskExecutor) where - C: Components, +pub fn start(service: &Service, exit: ::exit_future::Exit, handle: TaskExecutor) +where + C: Components, { - let network = service.network(); - let client = service.client(); - let txpool = service.transaction_pool(); - let mut last_number = None; - let mut last_update = time::Instant::now(); - - let mut sys = System::new(); - let self_pid = get_current_pid(); - - let display_notifications = network.status().for_each(move |sync_status| { - - if let Ok(info) = client.info() { - let best_number: u64 = info.chain.best_number.as_(); - let best_hash = info.chain.best_hash; - let num_peers = sync_status.num_peers; - let speed = move || speed(best_number, last_number, last_update); - last_update = time::Instant::now(); - let (status, target) = match (sync_status.sync.state, sync_status.sync.best_seen_block) { - (SyncState::Idle, _) => ("Idle".into(), "".into()), - (SyncState::Downloading, None) => (format!("Syncing{}", speed()), "".into()), - (SyncState::Downloading, Some(n)) => (format!("Syncing{}", speed()), format!(", target=#{}", n)), - }; - last_number = Some(best_number); - let txpool_status = txpool.status(); - let finalized_number: u64 = info.chain.finalized_number.as_(); - let bandwidth_download = network.average_download_per_sec(); - let bandwidth_upload = network.average_upload_per_sec(); - info!( - target: "substrate", - "{}{} ({} peers), best: #{} ({}), finalized #{} ({}), ⬇ {} ⬆ {}", - Colour::White.bold().paint(&status), - target, - Colour::White.bold().paint(format!("{}", sync_status.num_peers)), - Colour::White.paint(format!("{}", best_number)), - best_hash, - Colour::White.paint(format!("{}", finalized_number)), - info.chain.finalized_hash, - TransferRateFormat(bandwidth_download), - TransferRateFormat(bandwidth_upload), - ); - - // get cpu usage and memory usage of this process - let (cpu_usage, memory) = if sys.refresh_process(self_pid) { - let proc = sys.get_process(self_pid).expect("Above refresh_process succeeds, this should be Some(), qed"); - (proc.cpu_usage(), proc.memory()) - } else { (0.0, 0) }; - - let network_state = serde_json::to_string(&network.network_state()).unwrap_or_default(); - - telemetry!( - SUBSTRATE_INFO; - "system.interval"; - "network_state" => network_state, - "status" => format!("{}{}", status, target), - "peers" => num_peers, - "height" => best_number, - "best" => ?best_hash, - "txcount" => txpool_status.ready, - "cpu" => cpu_usage, - "memory" => memory, - "finalized_height" => finalized_number, - "finalized_hash" => ?info.chain.finalized_hash, - "bandwidth_download" => bandwidth_download, - "bandwidth_upload" => bandwidth_upload, - ); - } else { - warn!("Error getting best block information"); - } - - Ok(()) - }); - - let client = service.client(); - let mut last = match client.info() { - Ok(info) => Some((info.chain.best_number, info.chain.best_hash)), - Err(e) => { warn!("Error getting best block information: {:?}", e); None } - }; - - let display_block_import = client.import_notification_stream().for_each(move |n| { - // detect and log reorganizations. - if let Some((ref last_num, ref last_hash)) = last { - if n.header.parent_hash() != last_hash { - let tree_route = ::client::blockchain::tree_route( - client.backend().blockchain(), - BlockId::Hash(last_hash.clone()), - BlockId::Hash(n.hash), - ); - - match tree_route { - Ok(ref t) if !t.retracted().is_empty() => info!( - "Reorg from #{},{} to #{},{}, common ancestor #{},{}", - last_num, last_hash, - n.header.number(), n.hash, - t.common_block().number, t.common_block().hash, - ), - Ok(_) => {}, - Err(e) => warn!("Error computing tree route: {}", e), - } - } - } - - last = Some((n.header.number().clone(), n.hash.clone())); - - info!(target: "substrate", "Imported #{} ({})", n.header.number(), n.hash); - Ok(()) - }); - - let txpool = service.transaction_pool(); - let display_txpool_import = txpool.import_notification_stream().for_each(move |_| { + let network = service.network(); + let client = service.client(); + let txpool = service.transaction_pool(); + let mut last_number = None; + let mut last_update = time::Instant::now(); + + let mut sys = System::new(); + let self_pid = get_current_pid(); + + let display_notifications = network.status().for_each(move |sync_status| { + if let Ok(info) = client.info() { + let best_number: u64 = info.chain.best_number.as_(); + let best_hash = info.chain.best_hash; + let num_peers = sync_status.num_peers; + let speed = move || speed(best_number, last_number, last_update); + last_update = time::Instant::now(); + let (status, target) = match (sync_status.sync.state, sync_status.sync.best_seen_block) + { + (SyncState::Idle, _) => ("Idle".into(), "".into()), + (SyncState::Downloading, None) => (format!("Syncing{}", speed()), "".into()), + (SyncState::Downloading, Some(n)) => { + (format!("Syncing{}", speed()), format!(", target=#{}", n)) + } + }; + last_number = Some(best_number); + let txpool_status = txpool.status(); + let finalized_number: u64 = info.chain.finalized_number.as_(); + let bandwidth_download = network.average_download_per_sec(); + let bandwidth_upload = network.average_upload_per_sec(); + info!( + target: "substrate", + "{}{} ({} peers), best: #{} ({}), finalized #{} ({}), ⬇ {} ⬆ {}", + Colour::White.bold().paint(&status), + target, + Colour::White.bold().paint(format!("{}", sync_status.num_peers)), + Colour::White.paint(format!("{}", best_number)), + best_hash, + Colour::White.paint(format!("{}", finalized_number)), + info.chain.finalized_hash, + TransferRateFormat(bandwidth_download), + TransferRateFormat(bandwidth_upload), + ); + + // get cpu usage and memory usage of this process + let (cpu_usage, memory) = if sys.refresh_process(self_pid) { + let proc = sys + .get_process(self_pid) + .expect("Above refresh_process succeeds, this should be Some(), qed"); + (proc.cpu_usage(), proc.memory()) + } else { + (0.0, 0) + }; + + let network_state = serde_json::to_string(&network.network_state()).unwrap_or_default(); + + telemetry!( + SUBSTRATE_INFO; + "system.interval"; + "network_state" => network_state, + "status" => format!("{}{}", status, target), + "peers" => num_peers, + "height" => best_number, + "best" => ?best_hash, + "txcount" => txpool_status.ready, + "cpu" => cpu_usage, + "memory" => memory, + "finalized_height" => finalized_number, + "finalized_hash" => ?info.chain.finalized_hash, + "bandwidth_download" => bandwidth_download, + "bandwidth_upload" => bandwidth_upload, + ); + } else { + warn!("Error getting best block information"); + } + + Ok(()) + }); + + let client = service.client(); + let mut last = match client.info() { + Ok(info) => Some((info.chain.best_number, info.chain.best_hash)), + Err(e) => { + warn!("Error getting best block information: {:?}", e); + None + } + }; + + let display_block_import = client.import_notification_stream().for_each(move |n| { + // detect and log reorganizations. + if let Some((ref last_num, ref last_hash)) = last { + if n.header.parent_hash() != last_hash { + let tree_route = ::client::blockchain::tree_route( + client.backend().blockchain(), + BlockId::Hash(last_hash.clone()), + BlockId::Hash(n.hash), + ); + + match tree_route { + Ok(ref t) if !t.retracted().is_empty() => info!( + "Reorg from #{},{} to #{},{}, common ancestor #{},{}", + last_num, + last_hash, + n.header.number(), + n.hash, + t.common_block().number, + t.common_block().hash, + ), + Ok(_) => {} + Err(e) => warn!("Error computing tree route: {}", e), + } + } + } + + last = Some((n.header.number().clone(), n.hash.clone())); + + info!(target: "substrate", "Imported #{} ({})", n.header.number(), n.hash); + Ok(()) + }); + + let txpool = service.transaction_pool(); + let display_txpool_import = txpool.import_notification_stream().for_each(move |_| { let status = txpool.status(); telemetry!(SUBSTRATE_INFO; "txpool.import"; "ready" => status.ready, "future" => status.future); Ok(()) }); - let informant_work = display_notifications.join3(display_block_import, display_txpool_import); - handle.spawn(exit.until(informant_work).map(|_| ())); + let informant_work = display_notifications.join3(display_block_import, display_txpool_import); + handle.spawn(exit.until(informant_work).map(|_| ())); } fn speed(best_number: u64, last_number: Option, last_update: time::Instant) -> String { - let since_last_millis = last_update.elapsed().as_secs() * 1000; - let since_last_subsec_millis = last_update.elapsed().subsec_millis() as u64; - let speed = match last_number { - Some(num) => (best_number.saturating_sub(num) * 10_000 / (since_last_millis + since_last_subsec_millis)) as f64, - None => 0.0 - }; - - if speed < 1.0 { - "".into() - } else { - format!(" {:4.1} bps", speed / 10.0) - } + let since_last_millis = last_update.elapsed().as_secs() * 1000; + let since_last_subsec_millis = last_update.elapsed().subsec_millis() as u64; + let speed = match last_number { + Some(num) => { + (best_number.saturating_sub(num) * 10_000 + / (since_last_millis + since_last_subsec_millis)) as f64 + } + None => 0.0, + }; + + if speed < 1.0 { + "".into() + } else { + format!(" {:4.1} bps", speed / 10.0) + } } /// Contains a number of bytes per second. Implements `fmt::Display` and shows this number of bytes /// per second in a nice way. struct TransferRateFormat(u64); impl fmt::Display for TransferRateFormat { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // Special case 0. - if self.0 == 0 { - return write!(f, "0") - } - - // Under 0.1 kiB, display plain bytes. - if self.0 < 100 { - return write!(f, "{} B/s", self.0) - } - - // Under 1.0 MiB/sec, display the value in kiB/sec. - if self.0 < 1024 * 1024 { - return write!(f, "{:.1}kiB/s", self.0 as f64 / 1024.0) - } - - write!(f, "{:.1}MiB/s", self.0 as f64 / (1024.0 * 1024.0)) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // Special case 0. + if self.0 == 0 { + return write!(f, "0"); + } + + // Under 0.1 kiB, display plain bytes. + if self.0 < 100 { + return write!(f, "{} B/s", self.0); + } + + // Under 1.0 MiB/sec, display the value in kiB/sec. + if self.0 < 1024 * 1024 { + return write!(f, "{:.1}kiB/s", self.0 as f64 / 1024.0); + } + + write!(f, "{:.1}MiB/s", self.0 as f64 / (1024.0 * 1024.0)) + } } diff --git a/core/cli/src/lib.rs b/core/cli/src/lib.rs index d2c3cccebe..da7c296c74 100644 --- a/core/cli/src/lib.rs +++ b/core/cli/src/lib.rs @@ -21,44 +21,48 @@ #[macro_use] mod traits; -mod params; pub mod error; pub mod informant; +mod params; use client::ExecutionStrategies; -use runtime_primitives::traits::As; -use service::{ - ServiceFactory, FactoryFullConfiguration, RuntimeGenesis, - FactoryGenesis, PruningMode, ChainSpec, -}; use network::{ - self, multiaddr::Protocol, - config::{NetworkConfiguration, NonReservedPeerMode, NodeKeyConfig}, - build_multiaddr, + self, build_multiaddr, + config::{NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode}, + multiaddr::Protocol, }; use primitives::H256; +use runtime_primitives::traits::As; +use service::{ + ChainSpec, FactoryFullConfiguration, FactoryGenesis, PruningMode, RuntimeGenesis, + ServiceFactory, +}; use std::{ - io::{Write, Read, stdin, stdout}, iter, fs::{self, File}, net::{Ipv4Addr, SocketAddr}, - path::{Path, PathBuf}, str::FromStr, + fs::{self, File}, + io::{stdin, stdout, Read, Write}, + iter, + net::{Ipv4Addr, SocketAddr}, + path::{Path, PathBuf}, + str::FromStr, }; +use app_dirs::{AppDataType, AppInfo}; +use error_chain::bail; +use lazy_static::lazy_static; +use log::info; use names::{Generator, Name}; +use params::{ + BuildSpecCmd, ExportBlocksCmd, ImportBlocksCmd, MergeParameters, NetworkConfigurationParams, + NodeKeyParams, NodeKeyType, PurgeChainCmd, RevertCmd, RunCmd, SharedParams, + TransactionPoolParams, +}; +pub use params::{CoreParams, NoCustom}; use regex::Regex; -use structopt::{StructOpt, clap::AppSettings}; #[doc(hidden)] pub use structopt::clap::App; -use params::{ - RunCmd, PurgeChainCmd, RevertCmd, ImportBlocksCmd, ExportBlocksCmd, BuildSpecCmd, - NetworkConfigurationParams, SharedParams, MergeParameters, TransactionPoolParams, - NodeKeyParams, NodeKeyType -}; -pub use params::{NoCustom, CoreParams}; -pub use traits::{GetLogFilter, AugmentClap}; -use app_dirs::{AppInfo, AppDataType}; -use error_chain::bail; -use log::info; -use lazy_static::lazy_static; +use structopt::{clap::AppSettings, StructOpt}; +pub use traits::{AugmentClap, GetLogFilter}; use futures::Future; use substrate_telemetry::TelemetryEndpoints; @@ -78,98 +82,106 @@ const NODE_KEY_ED25519_FILE: &str = "secret_ed25519"; /// Executable version. Used to pass version information from the root crate. pub struct VersionInfo { - /// Implemtation name. - pub name: &'static str, - /// Implementation version. - pub version: &'static str, - /// SCM Commit hash. - pub commit: &'static str, - /// Executable file name. - pub executable_name: &'static str, - /// Executable file description. - pub description: &'static str, - /// Executable file author. - pub author: &'static str, - /// Support URL. - pub support_url: &'static str, + /// Implemtation name. + pub name: &'static str, + /// Implementation version. + pub version: &'static str, + /// SCM Commit hash. + pub commit: &'static str, + /// Executable file name. + pub executable_name: &'static str, + /// Executable file description. + pub description: &'static str, + /// Executable file author. + pub author: &'static str, + /// Support URL. + pub support_url: &'static str, } /// Something that can be converted into an exit signal. pub trait IntoExit { - /// Exit signal type. - type Exit: Future + Send + 'static; - /// Convert into exit signal. - fn into_exit(self) -> Self::Exit; + /// Exit signal type. + type Exit: Future + Send + 'static; + /// Convert into exit signal. + fn into_exit(self) -> Self::Exit; } fn get_chain_key(cli: &SharedParams) -> String { - match cli.chain { - Some(ref chain) => chain.clone(), - None => if cli.dev { "dev".into() } else { "".into() } - } + match cli.chain { + Some(ref chain) => chain.clone(), + None => { + if cli.dev { + "dev".into() + } else { + "".into() + } + } + } } fn generate_node_name() -> String { - let result = loop { - let node_name = Generator::with_naming(Name::Numbered).next().unwrap(); - let count = node_name.chars().count(); + let result = loop { + let node_name = Generator::with_naming(Name::Numbered).next().unwrap(); + let count = node_name.chars().count(); - if count < NODE_NAME_MAX_LENGTH { - break node_name - } - }; + if count < NODE_NAME_MAX_LENGTH { + break node_name; + } + }; - result + result } fn load_spec(cli: &SharedParams, factory: F) -> error::Result> - where G: RuntimeGenesis, F: FnOnce(&str) -> Result>, String>, +where + G: RuntimeGenesis, + F: FnOnce(&str) -> Result>, String>, { - let chain_key = get_chain_key(cli); - let spec = match factory(&chain_key)? { - Some(spec) => spec, - None => ChainSpec::from_json_file(PathBuf::from(chain_key))? - }; - Ok(spec) + let chain_key = get_chain_key(cli); + let spec = match factory(&chain_key)? { + Some(spec) => spec, + None => ChainSpec::from_json_file(PathBuf::from(chain_key))?, + }; + Ok(spec) } fn base_path(cli: &SharedParams, version: &VersionInfo) -> PathBuf { - cli.base_path.clone() - .unwrap_or_else(|| - app_dirs::get_app_root( - AppDataType::UserData, - &AppInfo { - name: version.executable_name, - author: version.author - } - ).expect("app directories exist on all supported platforms; qed") - ) + cli.base_path.clone().unwrap_or_else(|| { + app_dirs::get_app_root( + AppDataType::UserData, + &AppInfo { + name: version.executable_name, + author: version.author, + }, + ) + .expect("app directories exist on all supported platforms; qed") + }) } fn input_err>(msg: T) -> error::Error { - error::ErrorKind::Input(msg.into()).into() + error::ErrorKind::Input(msg.into()).into() } /// Check whether a node name is considered as valid fn is_node_name_valid(_name: &str) -> Result<(), &str> { - let name = _name.to_string(); - if name.chars().count() >= NODE_NAME_MAX_LENGTH { - return Err("Node name too long"); - } + let name = _name.to_string(); + if name.chars().count() >= NODE_NAME_MAX_LENGTH { + return Err("Node name too long"); + } - let invalid_chars = r"[\\.@]"; - let re = Regex::new(invalid_chars).unwrap(); - if re.is_match(&name) { - return Err("Node name should not contain invalid chars such as '.' and '@'"); - } + let invalid_chars = r"[\\.@]"; + let re = Regex::new(invalid_chars).unwrap(); + if re.is_match(&name) { + return Err("Node name should not contain invalid chars such as '.' and '@'"); + } - let invalid_patterns = r"(https?:\\/+)?(www)+"; - let re = Regex::new(invalid_patterns).unwrap(); - if re.is_match(&name) { - return Err("Node name should not contain urls"); - } + let invalid_patterns = r"(https?:\\/+)?(www)+"; + let re = Regex::new(invalid_patterns).unwrap(); + if re.is_match(&name) { + return Err("Node name should not contain urls"); + } - Ok(()) + Ok(()) } /// Parse command line interface arguments and executes the desired command. @@ -188,324 +200,359 @@ fn is_node_name_valid(_name: &str) -> Result<(), &str> { /// parameters are visible to the user as if they were normal run command parameters. If no custom /// parameters are required, `NoCustom` can be used as type here. pub fn parse_and_execute<'a, F, CC, RP, S, RS, E, I, T>( - spec_factory: S, - version: &VersionInfo, - impl_name: &'static str, - args: I, - exit: E, - run_service: RS, + spec_factory: S, + version: &VersionInfo, + impl_name: &'static str, + args: I, + exit: E, + run_service: RS, ) -> error::Result> where - F: ServiceFactory, - S: FnOnce(&str) -> Result>>, String>, - CC: StructOpt + Clone + GetLogFilter, - RP: StructOpt + Clone + AugmentClap, - E: IntoExit, - RS: FnOnce(E, RP, FactoryFullConfiguration) -> Result<(), String>, - I: IntoIterator, - T: Into + Clone, + F: ServiceFactory, + S: FnOnce(&str) -> Result>>, String>, + CC: StructOpt + Clone + GetLogFilter, + RP: StructOpt + Clone + AugmentClap, + E: IntoExit, + RS: FnOnce(E, RP, FactoryFullConfiguration) -> Result<(), String>, + I: IntoIterator, + T: Into + Clone, { - panic_handler::set(version.support_url); - - let full_version = service::config::full_version_from_strs( - version.version, - version.commit - ); - - let matches = CoreParams::::clap() - .name(version.executable_name) - .author(version.author) - .about(version.description) - .version(&(full_version + "\n")[..]) - .setting(AppSettings::GlobalVersion) - .setting(AppSettings::ArgsNegateSubcommands) - .setting(AppSettings::SubcommandsNegateReqs) - .get_matches_from(args); - let cli_args = CoreParams::::from_clap(&matches); - - init_logger(cli_args.get_log_filter().as_ref().map(|v| v.as_ref()).unwrap_or("")); - fdlimit::raise_fd_limit(); - - match cli_args { - params::CoreParams::Run(params) => run_node::( - params, spec_factory, exit, run_service, impl_name, version, - ).map(|_| None), - params::CoreParams::BuildSpec(params) => - build_spec::(params, spec_factory, version).map(|_| None), - params::CoreParams::ExportBlocks(params) => - export_blocks::(params, spec_factory, exit, version).map(|_| None), - params::CoreParams::ImportBlocks(params) => - import_blocks::(params, spec_factory, exit, version).map(|_| None), - params::CoreParams::PurgeChain(params) => - purge_chain::(params, spec_factory, version).map(|_| None), - params::CoreParams::Revert(params) => - revert_chain::(params, spec_factory, version).map(|_| None), - params::CoreParams::Custom(params) => Ok(Some(params)), - } + panic_handler::set(version.support_url); + + let full_version = service::config::full_version_from_strs(version.version, version.commit); + + let matches = CoreParams::::clap() + .name(version.executable_name) + .author(version.author) + .about(version.description) + .version(&(full_version + "\n")[..]) + .setting(AppSettings::GlobalVersion) + .setting(AppSettings::ArgsNegateSubcommands) + .setting(AppSettings::SubcommandsNegateReqs) + .get_matches_from(args); + let cli_args = CoreParams::::from_clap(&matches); + + init_logger( + cli_args + .get_log_filter() + .as_ref() + .map(|v| v.as_ref()) + .unwrap_or(""), + ); + fdlimit::raise_fd_limit(); + + match cli_args { + params::CoreParams::Run(params) => { + run_node::(params, spec_factory, exit, run_service, impl_name, version) + .map(|_| None) + } + params::CoreParams::BuildSpec(params) => { + build_spec::(params, spec_factory, version).map(|_| None) + } + params::CoreParams::ExportBlocks(params) => { + export_blocks::(params, spec_factory, exit, version).map(|_| None) + } + params::CoreParams::ImportBlocks(params) => { + import_blocks::(params, spec_factory, exit, version).map(|_| None) + } + params::CoreParams::PurgeChain(params) => { + purge_chain::(params, spec_factory, version).map(|_| None) + } + params::CoreParams::Revert(params) => { + revert_chain::(params, spec_factory, version).map(|_| None) + } + params::CoreParams::Custom(params) => Ok(Some(params)), + } } /// Create a `NodeKeyConfig` from the given `NodeKeyParams` in the context /// of an optional network config storage directory. -fn node_key_config

(params: NodeKeyParams, net_config_dir: &Option

) - -> error::Result +fn node_key_config

( + params: NodeKeyParams, + net_config_dir: &Option

, +) -> error::Result where - P: AsRef + P: AsRef, { - match params.node_key_type { - NodeKeyType::Secp256k1 => - params.node_key.as_ref().map(parse_secp256k1_secret).unwrap_or_else(|| - Ok(params.node_key_file - .or_else(|| net_config_file(net_config_dir, NODE_KEY_SECP256K1_FILE)) - .map(network::Secret::File) - .unwrap_or(network::Secret::New))) - .map(NodeKeyConfig::Secp256k1), - - NodeKeyType::Ed25519 => - params.node_key.as_ref().map(parse_ed25519_secret).unwrap_or_else(|| - Ok(params.node_key_file - .or_else(|| net_config_file(net_config_dir, NODE_KEY_ED25519_FILE)) - .map(network::Secret::File) - .unwrap_or(network::Secret::New))) - .map(NodeKeyConfig::Ed25519) - } + match params.node_key_type { + NodeKeyType::Secp256k1 => params + .node_key + .as_ref() + .map(parse_secp256k1_secret) + .unwrap_or_else(|| { + Ok(params + .node_key_file + .or_else(|| net_config_file(net_config_dir, NODE_KEY_SECP256K1_FILE)) + .map(network::Secret::File) + .unwrap_or(network::Secret::New)) + }) + .map(NodeKeyConfig::Secp256k1), + + NodeKeyType::Ed25519 => params + .node_key + .as_ref() + .map(parse_ed25519_secret) + .unwrap_or_else(|| { + Ok(params + .node_key_file + .or_else(|| net_config_file(net_config_dir, NODE_KEY_ED25519_FILE)) + .map(network::Secret::File) + .unwrap_or(network::Secret::New)) + }) + .map(NodeKeyConfig::Ed25519), + } } fn net_config_file

(net_config_dir: &Option

, name: &str) -> Option where - P: AsRef + P: AsRef, { - net_config_dir.as_ref().map(|d| d.as_ref().join(name)) + net_config_dir.as_ref().map(|d| d.as_ref().join(name)) } /// Create an error caused by an invalid node key argument. fn invalid_node_key(e: impl std::fmt::Display) -> error::Error { - input_err(format!("Invalid node key: {}", e)) + input_err(format!("Invalid node key: {}", e)) } /// Parse a Secp256k1 secret key from a hex string into a `network::Secret`. fn parse_secp256k1_secret(hex: &String) -> error::Result { - H256::from_str(hex).map_err(invalid_node_key).and_then(|bytes| - network::identity::secp256k1::SecretKey::from_bytes(bytes) - .map(network::Secret::Input) - .map_err(invalid_node_key)) + H256::from_str(hex) + .map_err(invalid_node_key) + .and_then(|bytes| { + network::identity::secp256k1::SecretKey::from_bytes(bytes) + .map(network::Secret::Input) + .map_err(invalid_node_key) + }) } /// Parse a Ed25519 secret key from a hex string into a `network::Secret`. fn parse_ed25519_secret(hex: &String) -> error::Result { - H256::from_str(&hex).map_err(invalid_node_key).and_then(|bytes| - network::identity::ed25519::SecretKey::from_bytes(bytes) - .map(network::Secret::Input) - .map_err(invalid_node_key)) + H256::from_str(&hex) + .map_err(invalid_node_key) + .and_then(|bytes| { + network::identity::ed25519::SecretKey::from_bytes(bytes) + .map(network::Secret::Input) + .map_err(invalid_node_key) + }) } /// Fill the given `PoolConfiguration` by looking at the cli parameters. fn fill_transaction_pool_configuration( - options: &mut FactoryFullConfiguration, - params: TransactionPoolParams, + options: &mut FactoryFullConfiguration, + params: TransactionPoolParams, ) -> error::Result<()> { - // ready queue - options.transaction_pool.ready.count = params.pool_limit; - options.transaction_pool.ready.total_bytes = params.pool_kbytes * 1024; + // ready queue + options.transaction_pool.ready.count = params.pool_limit; + options.transaction_pool.ready.total_bytes = params.pool_kbytes * 1024; - // future queue - let factor = 10; - options.transaction_pool.future.count = params.pool_limit / factor; - options.transaction_pool.future.total_bytes = params.pool_kbytes * 1024 / factor; + // future queue + let factor = 10; + options.transaction_pool.future.count = params.pool_limit / factor; + options.transaction_pool.future.total_bytes = params.pool_kbytes * 1024 / factor; - Ok(()) + Ok(()) } /// Fill the given `NetworkConfiguration` by looking at the cli parameters. fn fill_network_configuration( - cli: NetworkConfigurationParams, - base_path: &Path, - chain_spec_id: &str, - config: &mut NetworkConfiguration, - client_id: String, + cli: NetworkConfigurationParams, + base_path: &Path, + chain_spec_id: &str, + config: &mut NetworkConfiguration, + client_id: String, ) -> error::Result<()> { - config.boot_nodes.extend(cli.bootnodes.into_iter()); - config.config_path = Some( - network_path(&base_path, chain_spec_id).to_string_lossy().into() - ); - config.net_config_path = config.config_path.clone(); - config.reserved_nodes.extend(cli.reserved_nodes.into_iter()); - if !config.reserved_nodes.is_empty() { - config.non_reserved_mode = NonReservedPeerMode::Deny; - } + config.boot_nodes.extend(cli.bootnodes.into_iter()); + config.config_path = Some( + network_path(&base_path, chain_spec_id) + .to_string_lossy() + .into(), + ); + config.net_config_path = config.config_path.clone(); + config.reserved_nodes.extend(cli.reserved_nodes.into_iter()); + if !config.reserved_nodes.is_empty() { + config.non_reserved_mode = NonReservedPeerMode::Deny; + } - for addr in cli.listen_addr.iter() { - let addr = addr.parse().map_err(|_| "Invalid listen multiaddress")?; - config.listen_addresses.push(addr); - } + for addr in cli.listen_addr.iter() { + let addr = addr.parse().map_err(|_| "Invalid listen multiaddress")?; + config.listen_addresses.push(addr); + } - if config.listen_addresses.is_empty() { - let port = match cli.port { - Some(port) => port, - None => 30333, - }; + if config.listen_addresses.is_empty() { + let port = match cli.port { + Some(port) => port, + None => 30333, + }; - config.listen_addresses = vec![ - iter::once(Protocol::Ip4(Ipv4Addr::new(0, 0, 0, 0))) - .chain(iter::once(Protocol::Tcp(port))) - .collect() - ]; - } + config.listen_addresses = vec![iter::once(Protocol::Ip4(Ipv4Addr::new(0, 0, 0, 0))) + .chain(iter::once(Protocol::Tcp(port))) + .collect()]; + } - config.public_addresses = Vec::new(); + config.public_addresses = Vec::new(); - config.client_version = client_id; - config.node_key = node_key_config(cli.node_key_params, &config.net_config_path)?; + config.client_version = client_id; + config.node_key = node_key_config(cli.node_key_params, &config.net_config_path)?; - config.in_peers = cli.in_peers; - config.out_peers = cli.out_peers; + config.in_peers = cli.in_peers; + config.out_peers = cli.out_peers; - config.enable_mdns = !cli.no_mdns; + config.enable_mdns = !cli.no_mdns; - Ok(()) + Ok(()) } fn create_run_node_config( - cli: RunCmd, spec_factory: S, impl_name: &'static str, version: &VersionInfo + cli: RunCmd, + spec_factory: S, + impl_name: &'static str, + version: &VersionInfo, ) -> error::Result> where - F: ServiceFactory, - S: FnOnce(&str) -> Result>>, String>, + F: ServiceFactory, + S: FnOnce(&str) -> Result>>, String>, { - let spec = load_spec(&cli.shared_params, spec_factory)?; - let mut config = service::Configuration::default_with_spec(spec.clone()); - - config.impl_name = impl_name; - config.impl_commit = version.commit; - config.impl_version = version.version; - - config.name = match cli.name.or(cli.keyring.account.map(|a| a.to_string())) { - None => generate_node_name(), - Some(name) => name, - }; - match is_node_name_valid(&config.name) { - Ok(_) => (), - Err(msg) => bail!( - input_err( - format!("Invalid node name '{}'. Reason: {}. If unsure, use none.", - config.name, - msg - ) - ) - ) - } - - let base_path = base_path(&cli.shared_params, version); - - config.keystore_path = cli.keystore_path - .unwrap_or_else(|| keystore_path(&base_path, config.chain_spec.id())) - .to_string_lossy() - .into(); - - config.database_path = - db_path(&base_path, config.chain_spec.id()).to_string_lossy().into(); - config.database_cache_size = cli.database_cache_size; - config.pruning = match cli.pruning { - Some(ref s) if s == "archive" => PruningMode::ArchiveAll, - None => PruningMode::default(), - Some(s) => PruningMode::keep_blocks( - s.parse().map_err(|_| input_err("Invalid pruning mode specified"))? - ), - }; - - let role = - if cli.light { - service::Roles::LIGHT - } else if cli.validator || cli.shared_params.dev { - service::Roles::AUTHORITY - } else { - service::Roles::FULL - }; - - let exec = cli.execution_strategies; - config.execution_strategies = ExecutionStrategies { - syncing: exec.syncing_execution.into(), - importing: exec.importing_execution.into(), - block_construction: exec.block_construction_execution.into(), - offchain_worker: exec.offchain_worker_execution.into(), - other: exec.other_execution.into(), - }; - - config.offchain_worker = match (cli.offchain_worker, role) { - (params::OffchainWorkerEnabled::WhenValidating, service::Roles::AUTHORITY) => true, - (params::OffchainWorkerEnabled::Always, _) => true, - (params::OffchainWorkerEnabled::Never, _) => false, - (params::OffchainWorkerEnabled::WhenValidating, _) => false, - }; - - config.roles = role; - config.disable_grandpa = cli.no_grandpa; - - let client_id = config.client_id(); - fill_network_configuration( - cli.network_config, - &base_path, - spec.id(), - &mut config.network, - client_id, - )?; - - fill_transaction_pool_configuration::( - &mut config, - cli.pool_config, - )?; - - if let Some(key) = cli.key { - config.keys.push(key); - } - - if cli.shared_params.dev { - config.keys.push("//Alice".into()); - } - - if let Some(account) = cli.keyring.account { - config.keys.push(format!("//{}", account)); - } - - let rpc_interface: &str = if cli.rpc_external { "0.0.0.0" } else { "127.0.0.1" }; - let ws_interface: &str = if cli.ws_external { "0.0.0.0" } else { "127.0.0.1" }; - - config.rpc_http = Some( - parse_address(&format!("{}:{}", rpc_interface, 9933), cli.rpc_port)? - ); - config.rpc_ws = Some( - parse_address(&format!("{}:{}", ws_interface, 9944), cli.ws_port)? - ); - - // Override telemetry - if cli.no_telemetry { - config.telemetry_endpoints = None; - } else if !cli.telemetry_endpoints.is_empty() { - config.telemetry_endpoints = Some(TelemetryEndpoints::new(cli.telemetry_endpoints)); - } - - config.force_authoring = cli.force_authoring; - - Ok(config) + let spec = load_spec(&cli.shared_params, spec_factory)?; + let mut config = service::Configuration::default_with_spec(spec.clone()); + + config.impl_name = impl_name; + config.impl_commit = version.commit; + config.impl_version = version.version; + + config.name = match cli.name.or(cli.keyring.account.map(|a| a.to_string())) { + None => generate_node_name(), + Some(name) => name, + }; + match is_node_name_valid(&config.name) { + Ok(_) => (), + Err(msg) => bail!(input_err(format!( + "Invalid node name '{}'. Reason: {}. If unsure, use none.", + config.name, msg + ))), + } + + let base_path = base_path(&cli.shared_params, version); + + config.keystore_path = cli + .keystore_path + .unwrap_or_else(|| keystore_path(&base_path, config.chain_spec.id())) + .to_string_lossy() + .into(); + + config.database_path = db_path(&base_path, config.chain_spec.id()) + .to_string_lossy() + .into(); + config.database_cache_size = cli.database_cache_size; + config.pruning = match cli.pruning { + Some(ref s) if s == "archive" => PruningMode::ArchiveAll, + None => PruningMode::default(), + Some(s) => PruningMode::keep_blocks( + s.parse() + .map_err(|_| input_err("Invalid pruning mode specified"))?, + ), + }; + + let role = if cli.light { + service::Roles::LIGHT + } else if cli.validator || cli.shared_params.dev { + service::Roles::AUTHORITY + } else { + service::Roles::FULL + }; + + let exec = cli.execution_strategies; + config.execution_strategies = ExecutionStrategies { + syncing: exec.syncing_execution.into(), + importing: exec.importing_execution.into(), + block_construction: exec.block_construction_execution.into(), + offchain_worker: exec.offchain_worker_execution.into(), + other: exec.other_execution.into(), + }; + + config.offchain_worker = match (cli.offchain_worker, role) { + (params::OffchainWorkerEnabled::WhenValidating, service::Roles::AUTHORITY) => true, + (params::OffchainWorkerEnabled::Always, _) => true, + (params::OffchainWorkerEnabled::Never, _) => false, + (params::OffchainWorkerEnabled::WhenValidating, _) => false, + }; + + config.roles = role; + config.disable_grandpa = cli.no_grandpa; + + let client_id = config.client_id(); + fill_network_configuration( + cli.network_config, + &base_path, + spec.id(), + &mut config.network, + client_id, + )?; + + fill_transaction_pool_configuration::(&mut config, cli.pool_config)?; + + if let Some(key) = cli.key { + config.keys.push(key); + } + + if cli.shared_params.dev { + config.keys.push("//Alice".into()); + } + + if let Some(account) = cli.keyring.account { + config.keys.push(format!("//{}", account)); + } + + let rpc_interface: &str = if cli.rpc_external { + "0.0.0.0" + } else { + "127.0.0.1" + }; + let ws_interface: &str = if cli.ws_external { + "0.0.0.0" + } else { + "127.0.0.1" + }; + + config.rpc_http = Some(parse_address( + &format!("{}:{}", rpc_interface, 9933), + cli.rpc_port, + )?); + config.rpc_ws = Some(parse_address( + &format!("{}:{}", ws_interface, 9944), + cli.ws_port, + )?); + + // Override telemetry + if cli.no_telemetry { + config.telemetry_endpoints = None; + } else if !cli.telemetry_endpoints.is_empty() { + config.telemetry_endpoints = Some(TelemetryEndpoints::new(cli.telemetry_endpoints)); + } + + config.force_authoring = cli.force_authoring; + + Ok(config) } fn run_node( - cli: MergeParameters, - spec_factory: S, - exit: E, - run_service: RS, - impl_name: &'static str, - version: &VersionInfo, + cli: MergeParameters, + spec_factory: S, + exit: E, + run_service: RS, + impl_name: &'static str, + version: &VersionInfo, ) -> error::Result<()> where - RP: StructOpt + Clone, - F: ServiceFactory, - E: IntoExit, - S: FnOnce(&str) -> Result>>, String>, - RS: FnOnce(E, RP, FactoryFullConfiguration) -> Result<(), String>, - { - let config = create_run_node_config::(cli.left, spec_factory, impl_name, version)?; + RP: StructOpt + Clone, + F: ServiceFactory, + E: IntoExit, + S: FnOnce(&str) -> Result>>, String>, + RS: FnOnce(E, RP, FactoryFullConfiguration) -> Result<(), String>, +{ + let config = create_run_node_config::(cli.left, spec_factory, impl_name, version)?; - run_service(exit, cli.right, config).map_err(Into::into) + run_service(exit, cli.right, config).map_err(Into::into) } // @@ -517,389 +564,407 @@ where // 9926-9949 Unassigned fn with_default_boot_node( - spec: &mut ChainSpec>, - cli: BuildSpecCmd, - version: &VersionInfo, + spec: &mut ChainSpec>, + cli: BuildSpecCmd, + version: &VersionInfo, ) -> error::Result<()> where - F: ServiceFactory + F: ServiceFactory, { - if spec.boot_nodes().is_empty() { - let base_path = base_path(&cli.shared_params, version); - let storage_path = network_path(&base_path, spec.id()); - let node_key = node_key_config(cli.node_key_params, &Some(storage_path))?; - let keys = node_key.into_keypair()?; - let peer_id = keys.public().into_peer_id(); - let addr = build_multiaddr![ - Ip4([127, 0, 0, 1]), - Tcp(30333u16), - P2p(peer_id) - ]; - spec.add_boot_node(addr) - } - Ok(()) -} - -fn build_spec( - cli: BuildSpecCmd, - spec_factory: S, - version: &VersionInfo, -) -> error::Result<()> + if spec.boot_nodes().is_empty() { + let base_path = base_path(&cli.shared_params, version); + let storage_path = network_path(&base_path, spec.id()); + let node_key = node_key_config(cli.node_key_params, &Some(storage_path))?; + let keys = node_key.into_keypair()?; + let peer_id = keys.public().into_peer_id(); + let addr = build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(30333u16), P2p(peer_id)]; + spec.add_boot_node(addr) + } + Ok(()) +} + +fn build_spec(cli: BuildSpecCmd, spec_factory: S, version: &VersionInfo) -> error::Result<()> where - F: ServiceFactory, - S: FnOnce(&str) -> Result>>, String>, + F: ServiceFactory, + S: FnOnce(&str) -> Result>>, String>, { - info!("Building chain spec"); - let raw_output = cli.raw; - let mut spec = load_spec(&cli.shared_params, spec_factory)?; - with_default_boot_node::(&mut spec, cli, version)?; - let json = service::chain_ops::build_spec::>(spec, raw_output)?; + info!("Building chain spec"); + let raw_output = cli.raw; + let mut spec = load_spec(&cli.shared_params, spec_factory)?; + with_default_boot_node::(&mut spec, cli, version)?; + let json = service::chain_ops::build_spec::>(spec, raw_output)?; - print!("{}", json); + print!("{}", json); - Ok(()) + Ok(()) } fn create_config_with_db_path( - spec_factory: S, cli: &SharedParams, version: &VersionInfo, + spec_factory: S, + cli: &SharedParams, + version: &VersionInfo, ) -> error::Result> where - F: ServiceFactory, - S: FnOnce(&str) -> Result>>, String>, + F: ServiceFactory, + S: FnOnce(&str) -> Result>>, String>, { - let spec = load_spec(cli, spec_factory)?; - let base_path = base_path(cli, version); + let spec = load_spec(cli, spec_factory)?; + let base_path = base_path(cli, version); - let mut config = service::Configuration::default_with_spec(spec.clone()); - config.database_path = db_path(&base_path, spec.id()).to_string_lossy().into(); + let mut config = service::Configuration::default_with_spec(spec.clone()); + config.database_path = db_path(&base_path, spec.id()).to_string_lossy().into(); - Ok(config) + Ok(config) } fn export_blocks( - cli: ExportBlocksCmd, - spec_factory: S, - exit: E, - version: &VersionInfo, + cli: ExportBlocksCmd, + spec_factory: S, + exit: E, + version: &VersionInfo, ) -> error::Result<()> where - F: ServiceFactory, - E: IntoExit, - S: FnOnce(&str) -> Result>>, String>, + F: ServiceFactory, + E: IntoExit, + S: FnOnce(&str) -> Result>>, String>, { - let config = create_config_with_db_path::(spec_factory, &cli.shared_params, version)?; + let config = create_config_with_db_path::(spec_factory, &cli.shared_params, version)?; - info!("DB path: {}", config.database_path); - let from = cli.from.unwrap_or(1); - let to = cli.to; - let json = cli.json; + info!("DB path: {}", config.database_path); + let from = cli.from.unwrap_or(1); + let to = cli.to; + let json = cli.json; - let file: Box = match cli.output { - Some(filename) => Box::new(File::create(filename)?), - None => Box::new(stdout()), - }; + let file: Box = match cli.output { + Some(filename) => Box::new(File::create(filename)?), + None => Box::new(stdout()), + }; - service::chain_ops::export_blocks::( - config, exit.into_exit(), file, As::sa(from), to.map(As::sa), json - ).map_err(Into::into) + service::chain_ops::export_blocks::( + config, + exit.into_exit(), + file, + As::sa(from), + to.map(As::sa), + json, + ) + .map_err(Into::into) } fn import_blocks( - cli: ImportBlocksCmd, - spec_factory: S, - exit: E, - version: &VersionInfo, + cli: ImportBlocksCmd, + spec_factory: S, + exit: E, + version: &VersionInfo, ) -> error::Result<()> where - F: ServiceFactory, - E: IntoExit, - S: FnOnce(&str) -> Result>>, String>, + F: ServiceFactory, + E: IntoExit, + S: FnOnce(&str) -> Result>>, String>, { - let config = create_config_with_db_path::(spec_factory, &cli.shared_params, version)?; + let config = create_config_with_db_path::(spec_factory, &cli.shared_params, version)?; - let file: Box = match cli.input { - Some(filename) => Box::new(File::open(filename)?), - None => Box::new(stdin()), - }; + let file: Box = match cli.input { + Some(filename) => Box::new(File::open(filename)?), + None => Box::new(stdin()), + }; - service::chain_ops::import_blocks::(config, exit.into_exit(), file).map_err(Into::into) + service::chain_ops::import_blocks::(config, exit.into_exit(), file).map_err(Into::into) } -fn revert_chain( - cli: RevertCmd, - spec_factory: S, - version: &VersionInfo, -) -> error::Result<()> +fn revert_chain(cli: RevertCmd, spec_factory: S, version: &VersionInfo) -> error::Result<()> where - F: ServiceFactory, - S: FnOnce(&str) -> Result>>, String>, + F: ServiceFactory, + S: FnOnce(&str) -> Result>>, String>, { - let config = create_config_with_db_path::(spec_factory, &cli.shared_params, version)?; - let blocks = cli.num; - Ok(service::chain_ops::revert_chain::(config, As::sa(blocks))?) + let config = create_config_with_db_path::(spec_factory, &cli.shared_params, version)?; + let blocks = cli.num; + Ok(service::chain_ops::revert_chain::( + config, + As::sa(blocks), + )?) } fn purge_chain( - cli: PurgeChainCmd, - spec_factory: S, - version: &VersionInfo, + cli: PurgeChainCmd, + spec_factory: S, + version: &VersionInfo, ) -> error::Result<()> where - F: ServiceFactory, - S: FnOnce(&str) -> Result>>, String>, + F: ServiceFactory, + S: FnOnce(&str) -> Result>>, String>, { - let config = create_config_with_db_path::(spec_factory, &cli.shared_params, version)?; - let db_path = config.database_path; + let config = create_config_with_db_path::(spec_factory, &cli.shared_params, version)?; + let db_path = config.database_path; - if cli.yes == false { - print!("Are you sure to remove {:?}? (y/n)", &db_path); - stdout().flush().expect("failed to flush stdout"); + if cli.yes == false { + print!("Are you sure to remove {:?}? (y/n)", &db_path); + stdout().flush().expect("failed to flush stdout"); - let mut input = String::new(); - stdin().read_line(&mut input)?; - let input = input.trim(); + let mut input = String::new(); + stdin().read_line(&mut input)?; + let input = input.trim(); - match input.chars().nth(0) { - Some('y') | Some('Y') => {}, - _ => { - println!("Aborted"); - return Ok(()); - }, - } - } + match input.chars().nth(0) { + Some('y') | Some('Y') => {} + _ => { + println!("Aborted"); + return Ok(()); + } + } + } - fs::remove_dir_all(&db_path)?; - println!("{:?} removed.", &db_path); + fs::remove_dir_all(&db_path)?; + println!("{:?} removed.", &db_path); - Ok(()) + Ok(()) } -fn parse_address( - address: &str, - port: Option, -) -> Result { - let mut address: SocketAddr = address.parse().map_err( - |_| format!("Invalid address: {}", address) - )?; - if let Some(port) = port { - address.set_port(port); - } +fn parse_address(address: &str, port: Option) -> Result { + let mut address: SocketAddr = address + .parse() + .map_err(|_| format!("Invalid address: {}", address))?; + if let Some(port) = port { + address.set_port(port); + } - Ok(address) + Ok(address) } fn keystore_path(base_path: &Path, chain_id: &str) -> PathBuf { - let mut path = base_path.to_owned(); - path.push("chains"); - path.push(chain_id); - path.push("keystore"); - path + let mut path = base_path.to_owned(); + path.push("chains"); + path.push(chain_id); + path.push("keystore"); + path } fn db_path(base_path: &Path, chain_id: &str) -> PathBuf { - let mut path = base_path.to_owned(); - path.push("chains"); - path.push(chain_id); - path.push("db"); - path + let mut path = base_path.to_owned(); + path.push("chains"); + path.push(chain_id); + path.push("db"); + path } fn network_path(base_path: &Path, chain_id: &str) -> PathBuf { - let mut path = base_path.to_owned(); - path.push("chains"); - path.push(chain_id); - path.push("network"); - path + let mut path = base_path.to_owned(); + path.push("chains"); + path.push(chain_id); + path.push("network"); + path } fn init_logger(pattern: &str) { - use ansi_term::Colour; - - let mut builder = env_logger::Builder::new(); - // Disable info logging by default for some modules: - builder.filter(Some("ws"), log::LevelFilter::Off); - builder.filter(Some("hyper"), log::LevelFilter::Warn); - // Enable info for others. - builder.filter(None, log::LevelFilter::Info); - - if let Ok(lvl) = std::env::var("RUST_LOG") { - builder.parse(&lvl); - } - - builder.parse(pattern); - let isatty = atty::is(atty::Stream::Stderr); - let enable_color = isatty; - - builder.format(move |buf, record| { - let now = time::now(); - let timestamp = - time::strftime("%Y-%m-%d %H:%M:%S", &now) - .expect("Error formatting log timestamp"); - - let mut output = if log::max_level() <= log::LevelFilter::Info { - format!("{} {}", Colour::Black.bold().paint(timestamp), record.args()) - } else { - let name = ::std::thread::current() - .name() - .map_or_else(Default::default, |x| format!("{}", Colour::Blue.bold().paint(x))); - let millis = (now.tm_nsec as f32 / 1000000.0).round() as usize; - let timestamp = format!("{}.{:03}", timestamp, millis); - format!( - "{} {} {} {} {}", - Colour::Black.bold().paint(timestamp), - name, - record.level(), - record.target(), - record.args() - ) - }; - - if !enable_color { - output = kill_color(output.as_ref()); - } - - if !isatty && record.level() <= log::Level::Info && atty::is(atty::Stream::Stdout) { - // duplicate INFO/WARN output to console - println!("{}", output); - } - writeln!(buf, "{}", output) - }); - - builder.init(); + use ansi_term::Colour; + + let mut builder = env_logger::Builder::new(); + // Disable info logging by default for some modules: + builder.filter(Some("ws"), log::LevelFilter::Off); + builder.filter(Some("hyper"), log::LevelFilter::Warn); + // Enable info for others. + builder.filter(None, log::LevelFilter::Info); + + if let Ok(lvl) = std::env::var("RUST_LOG") { + builder.parse(&lvl); + } + + builder.parse(pattern); + let isatty = atty::is(atty::Stream::Stderr); + let enable_color = isatty; + + builder.format(move |buf, record| { + let now = time::now(); + let timestamp = + time::strftime("%Y-%m-%d %H:%M:%S", &now).expect("Error formatting log timestamp"); + + let mut output = if log::max_level() <= log::LevelFilter::Info { + format!( + "{} {}", + Colour::Black.bold().paint(timestamp), + record.args() + ) + } else { + let name = ::std::thread::current() + .name() + .map_or_else(Default::default, |x| { + format!("{}", Colour::Blue.bold().paint(x)) + }); + let millis = (now.tm_nsec as f32 / 1000000.0).round() as usize; + let timestamp = format!("{}.{:03}", timestamp, millis); + format!( + "{} {} {} {} {}", + Colour::Black.bold().paint(timestamp), + name, + record.level(), + record.target(), + record.args() + ) + }; + + if !enable_color { + output = kill_color(output.as_ref()); + } + + if !isatty && record.level() <= log::Level::Info && atty::is(atty::Stream::Stdout) { + // duplicate INFO/WARN output to console + println!("{}", output); + } + writeln!(buf, "{}", output) + }); + + builder.init(); } fn kill_color(s: &str) -> String { - lazy_static! { - static ref RE: Regex = Regex::new("\x1b\\[[^m]+m").expect("Error initializing color regex"); - } - RE.replace_all(s, "").to_string() + lazy_static! { + static ref RE: Regex = Regex::new("\x1b\\[[^m]+m").expect("Error initializing color regex"); + } + RE.replace_all(s, "").to_string() } #[cfg(test)] mod tests { - use super::*; - use tempdir::TempDir; - use network::identity::{secp256k1, ed25519}; - - #[test] - fn tests_node_name_good() { - assert!(is_node_name_valid("short name").is_ok()); - } - - #[test] - fn tests_node_name_bad() { - assert!(is_node_name_valid("long names are not very cool for the ui").is_err()); - assert!(is_node_name_valid("Dots.not.Ok").is_err()); - assert!(is_node_name_valid("http://visit.me").is_err()); - assert!(is_node_name_valid("https://visit.me").is_err()); - assert!(is_node_name_valid("www.visit.me").is_err()); - assert!(is_node_name_valid("email@domain").is_err()); - } - - #[test] - fn test_node_key_config_input() { - fn secret_input(net_config_dir: Option) -> error::Result<()> { - NodeKeyType::variants().into_iter().try_for_each(|t| { - let node_key_type = NodeKeyType::from_str(t).unwrap(); - let sk = match node_key_type { - NodeKeyType::Secp256k1 => secp256k1::SecretKey::generate().as_ref().to_vec(), - NodeKeyType::Ed25519 => ed25519::SecretKey::generate().as_ref().to_vec() - }; - let params = NodeKeyParams { - node_key_type, - node_key: Some(format!("{:x}", H256::from_slice(sk.as_ref()))), - node_key_file: None - }; - node_key_config(params, &net_config_dir).and_then(|c| match c { - NodeKeyConfig::Secp256k1(network::Secret::Input(ref ski)) - if node_key_type == NodeKeyType::Secp256k1 && - &sk[..] == ski.as_ref() => Ok(()), - NodeKeyConfig::Ed25519(network::Secret::Input(ref ski)) - if node_key_type == NodeKeyType::Ed25519 && - &sk[..] == ski.as_ref() => Ok(()), - _ => Err(input_err("Unexpected node key config")) - }) - }) - } - - assert!(secret_input(None).is_ok()); - assert!(secret_input(Some("x".to_string())).is_ok()); - } - - #[test] - fn test_node_key_config_file() { - fn secret_file(net_config_dir: Option) -> error::Result<()> { - NodeKeyType::variants().into_iter().try_for_each(|t| { - let node_key_type = NodeKeyType::from_str(t).unwrap(); - let tmp = TempDir::new("alice")?; - let file = tmp.path().join(format!("{}_mysecret", t)).to_path_buf(); - let params = NodeKeyParams { - node_key_type, - node_key: None, - node_key_file: Some(file.clone()) - }; - node_key_config(params, &net_config_dir).and_then(|c| match c { - NodeKeyConfig::Secp256k1(network::Secret::File(ref f)) - if node_key_type == NodeKeyType::Secp256k1 && f == &file => Ok(()), - NodeKeyConfig::Ed25519(network::Secret::File(ref f)) - if node_key_type == NodeKeyType::Ed25519 && f == &file => Ok(()), - _ => Err(input_err("Unexpected node key config")) - }) - }) - } - - assert!(secret_file(None).is_ok()); - assert!(secret_file(Some("x".to_string())).is_ok()); - } - - #[test] - fn test_node_key_config_default() { - fn with_def_params(f: F) -> error::Result<()> - where - F: Fn(NodeKeyParams) -> error::Result<()> - { - NodeKeyType::variants().into_iter().try_for_each(|t| { - let node_key_type = NodeKeyType::from_str(t).unwrap(); - f(NodeKeyParams { - node_key_type, - node_key: None, - node_key_file: None - }) - }) - } - - fn no_config_dir() -> error::Result<()> { - with_def_params(|params| { - let typ = params.node_key_type; - node_key_config::(params, &None) - .and_then(|c| match c { - NodeKeyConfig::Secp256k1(network::Secret::New) - if typ == NodeKeyType::Secp256k1 => Ok(()), - NodeKeyConfig::Ed25519(network::Secret::New) - if typ == NodeKeyType::Ed25519 => Ok(()), - _ => Err(input_err("Unexpected node key config")) - }) - }) - } - - fn some_config_dir(net_config_dir: String) -> error::Result<()> { - with_def_params(|params| { - let dir = PathBuf::from(net_config_dir.clone()); - let typ = params.node_key_type; - node_key_config(params, &Some(net_config_dir.clone())) - .and_then(move |c| match c { - NodeKeyConfig::Secp256k1(network::Secret::File(ref f)) - if typ == NodeKeyType::Secp256k1 && - f == &dir.join(NODE_KEY_SECP256K1_FILE) => Ok(()), - NodeKeyConfig::Ed25519(network::Secret::File(ref f)) - if typ == NodeKeyType::Ed25519 && - f == &dir.join(NODE_KEY_ED25519_FILE) => Ok(()), - _ => Err(input_err("Unexpected node key config")) - }) - }) - } - - assert!(no_config_dir().is_ok()); - assert!(some_config_dir("x".to_string()).is_ok()); - } + use super::*; + use network::identity::{ed25519, secp256k1}; + use tempdir::TempDir; + + #[test] + fn tests_node_name_good() { + assert!(is_node_name_valid("short name").is_ok()); + } + + #[test] + fn tests_node_name_bad() { + assert!(is_node_name_valid("long names are not very cool for the ui").is_err()); + assert!(is_node_name_valid("Dots.not.Ok").is_err()); + assert!(is_node_name_valid("http://visit.me").is_err()); + assert!(is_node_name_valid("https://visit.me").is_err()); + assert!(is_node_name_valid("www.visit.me").is_err()); + assert!(is_node_name_valid("email@domain").is_err()); + } + + #[test] + fn test_node_key_config_input() { + fn secret_input(net_config_dir: Option) -> error::Result<()> { + NodeKeyType::variants().into_iter().try_for_each(|t| { + let node_key_type = NodeKeyType::from_str(t).unwrap(); + let sk = match node_key_type { + NodeKeyType::Secp256k1 => secp256k1::SecretKey::generate().as_ref().to_vec(), + NodeKeyType::Ed25519 => ed25519::SecretKey::generate().as_ref().to_vec(), + }; + let params = NodeKeyParams { + node_key_type, + node_key: Some(format!("{:x}", H256::from_slice(sk.as_ref()))), + node_key_file: None, + }; + node_key_config(params, &net_config_dir).and_then(|c| match c { + NodeKeyConfig::Secp256k1(network::Secret::Input(ref ski)) + if node_key_type == NodeKeyType::Secp256k1 && &sk[..] == ski.as_ref() => + { + Ok(()) + } + NodeKeyConfig::Ed25519(network::Secret::Input(ref ski)) + if node_key_type == NodeKeyType::Ed25519 && &sk[..] == ski.as_ref() => + { + Ok(()) + } + _ => Err(input_err("Unexpected node key config")), + }) + }) + } + + assert!(secret_input(None).is_ok()); + assert!(secret_input(Some("x".to_string())).is_ok()); + } + + #[test] + fn test_node_key_config_file() { + fn secret_file(net_config_dir: Option) -> error::Result<()> { + NodeKeyType::variants().into_iter().try_for_each(|t| { + let node_key_type = NodeKeyType::from_str(t).unwrap(); + let tmp = TempDir::new("alice")?; + let file = tmp.path().join(format!("{}_mysecret", t)).to_path_buf(); + let params = NodeKeyParams { + node_key_type, + node_key: None, + node_key_file: Some(file.clone()), + }; + node_key_config(params, &net_config_dir).and_then(|c| match c { + NodeKeyConfig::Secp256k1(network::Secret::File(ref f)) + if node_key_type == NodeKeyType::Secp256k1 && f == &file => + { + Ok(()) + } + NodeKeyConfig::Ed25519(network::Secret::File(ref f)) + if node_key_type == NodeKeyType::Ed25519 && f == &file => + { + Ok(()) + } + _ => Err(input_err("Unexpected node key config")), + }) + }) + } + + assert!(secret_file(None).is_ok()); + assert!(secret_file(Some("x".to_string())).is_ok()); + } + + #[test] + fn test_node_key_config_default() { + fn with_def_params(f: F) -> error::Result<()> + where + F: Fn(NodeKeyParams) -> error::Result<()>, + { + NodeKeyType::variants().into_iter().try_for_each(|t| { + let node_key_type = NodeKeyType::from_str(t).unwrap(); + f(NodeKeyParams { + node_key_type, + node_key: None, + node_key_file: None, + }) + }) + } + + fn no_config_dir() -> error::Result<()> { + with_def_params(|params| { + let typ = params.node_key_type; + node_key_config::(params, &None).and_then(|c| match c { + NodeKeyConfig::Secp256k1(network::Secret::New) + if typ == NodeKeyType::Secp256k1 => + { + Ok(()) + } + NodeKeyConfig::Ed25519(network::Secret::New) if typ == NodeKeyType::Ed25519 => { + Ok(()) + } + _ => Err(input_err("Unexpected node key config")), + }) + }) + } + + fn some_config_dir(net_config_dir: String) -> error::Result<()> { + with_def_params(|params| { + let dir = PathBuf::from(net_config_dir.clone()); + let typ = params.node_key_type; + node_key_config(params, &Some(net_config_dir.clone())).and_then(move |c| match c { + NodeKeyConfig::Secp256k1(network::Secret::File(ref f)) + if typ == NodeKeyType::Secp256k1 + && f == &dir.join(NODE_KEY_SECP256K1_FILE) => + { + Ok(()) + } + NodeKeyConfig::Ed25519(network::Secret::File(ref f)) + if typ == NodeKeyType::Ed25519 && f == &dir.join(NODE_KEY_ED25519_FILE) => + { + Ok(()) + } + _ => Err(input_err("Unexpected node key config")), + }) + }) + } + + assert!(no_config_dir().is_ok()); + assert!(some_config_dir("x".to_string()).is_ok()); + } } diff --git a/core/cli/src/params.rs b/core/cli/src/params.rs index 321cf17efb..4e78ae0589 100644 --- a/core/cli/src/params.rs +++ b/core/cli/src/params.rs @@ -16,458 +16,475 @@ use crate::traits::{AugmentClap, GetLogFilter}; -use std::path::PathBuf; -use structopt::{StructOpt, clap::{arg_enum, _clap_count_exprs, App, AppSettings, SubCommand, Arg}}; use client; +use std::path::PathBuf; +use structopt::{ + clap::{arg_enum, App, AppSettings, Arg, SubCommand, _clap_count_exprs}, + StructOpt, +}; /// Auxialary macro to implement `GetLogFilter` for all types that have the `shared_params` field. macro_rules! impl_get_log_filter { - ( $type:ident ) => { - impl $crate::GetLogFilter for $type { - fn get_log_filter(&self) -> Option { - self.shared_params.get_log_filter() - } - } - } + ( $type:ident ) => { + impl $crate::GetLogFilter for $type { + fn get_log_filter(&self) -> Option { + self.shared_params.get_log_filter() + } + } + }; } arg_enum! { - /// How to execute blocks - #[derive(Debug, Clone)] - pub enum ExecutionStrategy { - Native, - Wasm, - Both, - NativeElseWasm, - NativeWhenPossible, - } + /// How to execute blocks + #[derive(Debug, Clone)] + pub enum ExecutionStrategy { + Native, + Wasm, + Both, + NativeElseWasm, + NativeWhenPossible, + } } impl Into for ExecutionStrategy { - fn into(self) -> client::ExecutionStrategy { - match self { - ExecutionStrategy::Native => client::ExecutionStrategy::NativeWhenPossible, - ExecutionStrategy::Wasm => client::ExecutionStrategy::AlwaysWasm, - ExecutionStrategy::Both => client::ExecutionStrategy::Both, - ExecutionStrategy::NativeElseWasm => client::ExecutionStrategy::NativeElseWasm, - ExecutionStrategy::NativeWhenPossible => client::ExecutionStrategy::NativeWhenPossible, - } - } + fn into(self) -> client::ExecutionStrategy { + match self { + ExecutionStrategy::Native => client::ExecutionStrategy::NativeWhenPossible, + ExecutionStrategy::Wasm => client::ExecutionStrategy::AlwaysWasm, + ExecutionStrategy::Both => client::ExecutionStrategy::Both, + ExecutionStrategy::NativeElseWasm => client::ExecutionStrategy::NativeElseWasm, + ExecutionStrategy::NativeWhenPossible => client::ExecutionStrategy::NativeWhenPossible, + } + } } arg_enum! { - /// How to execute blocks - #[derive(Debug, Clone)] - pub enum OffchainWorkerEnabled { - Always, - Never, - WhenValidating, - } + /// How to execute blocks + #[derive(Debug, Clone)] + pub enum OffchainWorkerEnabled { + Always, + Never, + WhenValidating, + } } /// Shared parameters used by all `CoreParams`. #[derive(Debug, StructOpt, Clone)] pub struct SharedParams { - /// Specify the chain specification (one of dev, local or staging) - #[structopt(long = "chain", value_name = "CHAIN_SPEC")] - pub chain: Option, + /// Specify the chain specification (one of dev, local or staging) + #[structopt(long = "chain", value_name = "CHAIN_SPEC")] + pub chain: Option, - /// Specify the development chain - #[structopt(long = "dev")] - pub dev: bool, + /// Specify the development chain + #[structopt(long = "dev")] + pub dev: bool, - /// Specify custom base path. - #[structopt(long = "base-path", short = "d", value_name = "PATH", parse(from_os_str))] - pub base_path: Option, + /// Specify custom base path. + #[structopt( + long = "base-path", + short = "d", + value_name = "PATH", + parse(from_os_str) + )] + pub base_path: Option, - /// Sets a custom logging filter - #[structopt(short = "l", long = "log", value_name = "LOG_PATTERN")] - pub log: Option, + /// Sets a custom logging filter + #[structopt(short = "l", long = "log", value_name = "LOG_PATTERN")] + pub log: Option, } impl GetLogFilter for SharedParams { - fn get_log_filter(&self) -> Option { - self.log.clone() - } + fn get_log_filter(&self) -> Option { + self.log.clone() + } } /// Parameters used to create the network configuration. #[derive(Debug, StructOpt, Clone)] pub struct NetworkConfigurationParams { - /// Specify a list of bootnodes - #[structopt(long = "bootnodes", value_name = "URL")] - pub bootnodes: Vec, + /// Specify a list of bootnodes + #[structopt(long = "bootnodes", value_name = "URL")] + pub bootnodes: Vec, - /// Specify a list of reserved node addresses - #[structopt(long = "reserved-nodes", value_name = "URL")] - pub reserved_nodes: Vec, + /// Specify a list of reserved node addresses + #[structopt(long = "reserved-nodes", value_name = "URL")] + pub reserved_nodes: Vec, - /// Listen on this multiaddress - #[structopt(long = "listen-addr", value_name = "LISTEN_ADDR")] - pub listen_addr: Vec, + /// Listen on this multiaddress + #[structopt(long = "listen-addr", value_name = "LISTEN_ADDR")] + pub listen_addr: Vec, - /// Specify p2p protocol TCP port. Only used if --listen-addr is not specified. - #[structopt(long = "port", value_name = "PORT")] - pub port: Option, + /// Specify p2p protocol TCP port. Only used if --listen-addr is not specified. + #[structopt(long = "port", value_name = "PORT")] + pub port: Option, - /// Specify the number of outgoing connections we're trying to maintain - #[structopt(long = "out-peers", value_name = "OUT_PEERS", default_value = "25")] - pub out_peers: u32, + /// Specify the number of outgoing connections we're trying to maintain + #[structopt(long = "out-peers", value_name = "OUT_PEERS", default_value = "25")] + pub out_peers: u32, - /// Specify the maximum number of incoming connections we're accepting - #[structopt(long = "in-peers", value_name = "IN_PEERS", default_value = "25")] - pub in_peers: u32, + /// Specify the maximum number of incoming connections we're accepting + #[structopt(long = "in-peers", value_name = "IN_PEERS", default_value = "25")] + pub in_peers: u32, - /// By default, the network will use mDNS to discover other nodes on the local network. This - /// disables it. - #[structopt(long = "no-mdns")] - pub no_mdns: bool, + /// By default, the network will use mDNS to discover other nodes on the local network. This + /// disables it. + #[structopt(long = "no-mdns")] + pub no_mdns: bool, - #[allow(missing_docs)] - #[structopt(flatten)] - pub node_key_params: NodeKeyParams + #[allow(missing_docs)] + #[structopt(flatten)] + pub node_key_params: NodeKeyParams, } arg_enum! { - #[derive(Debug, Copy, Clone, PartialEq, Eq)] - pub enum NodeKeyType { - Secp256k1, - Ed25519 - } + #[derive(Debug, Copy, Clone, PartialEq, Eq)] + pub enum NodeKeyType { + Secp256k1, + Ed25519 + } } /// Parameters used to create the `NodeKeyConfig`, which determines the keypair /// used for libp2p networking. #[derive(Debug, StructOpt, Clone)] pub struct NodeKeyParams { - /// The secret key to use for libp2p networking. - /// - /// The value is a string that is parsed according to the choice of - /// `--node-key-type` as follows: - /// - /// `secp256k1`: - /// The value is parsed as a hex-encoded Secp256k1 32 bytes secret key, - /// i.e. 64 hex characters. - /// - /// `ed25519`: - /// The value is parsed as a hex-encoded Ed25519 32 bytes secret key, - /// i.e. 64 hex characters. - /// - /// The value of this option takes precedence over `--node-key-file`. - /// - /// WARNING: Secrets provided as command-line arguments are easily exposed. - /// Use of this option should be limited to development and testing. To use - /// an externally managed secret key, use `--node-key-file` instead. - #[structopt(long = "node-key", value_name = "KEY")] - pub node_key: Option, - - /// The type of secret key to use for libp2p networking. - /// - /// The secret key of the node is obtained as follows: - /// - /// * If the `--node-key` option is given, the value is parsed as a secret key - /// according to the type. See the documentation for `--node-key`. - /// - /// * If the `--node-key-file` option is given, the secret key is read from the - /// specified file. See the documentation for `--node-key-file`. - /// - /// * Otherwise, the secret key is read from a file with a predetermined, - /// type-specific name from the chain-specific network config directory - /// inside the base directory specified by `--base-dir`. If this file does - /// not exist, it is created with a newly generated secret key of the - /// chosen type. - /// - /// The node's secret key determines the corresponding public key and hence the - /// node's peer ID in the context of libp2p. - /// - /// NOTE: The current default key type is `secp256k1` for a transition period only - /// but will eventually change to `ed25519` in a future release. To continue using - /// `secp256k1` keys, use `--node-key-type=secp256k1`. - #[structopt( - long = "node-key-type", - value_name = "TYPE", - raw( - possible_values = "&NodeKeyType::variants()", - case_insensitive = "true", - default_value = r#""Secp256k1""# - ) - )] - pub node_key_type: NodeKeyType, - - /// The file from which to read the node's secret key to use for libp2p networking. - /// - /// The contents of the file are parsed according to the choice of `--node-key-type` - /// as follows: - /// - /// `secp256k1`: - /// The file must contain an unencoded 32 bytes Secp256k1 secret key. - /// - /// `ed25519`: - /// The file must contain an unencoded 32 bytes Ed25519 secret key. - /// - /// If the file does not exist, it is created with a newly generated secret key of - /// the chosen type. - #[structopt(long = "node-key-file", value_name = "FILE")] - pub node_key_file: Option + /// The secret key to use for libp2p networking. + /// + /// The value is a string that is parsed according to the choice of + /// `--node-key-type` as follows: + /// + /// `secp256k1`: + /// The value is parsed as a hex-encoded Secp256k1 32 bytes secret key, + /// i.e. 64 hex characters. + /// + /// `ed25519`: + /// The value is parsed as a hex-encoded Ed25519 32 bytes secret key, + /// i.e. 64 hex characters. + /// + /// The value of this option takes precedence over `--node-key-file`. + /// + /// WARNING: Secrets provided as command-line arguments are easily exposed. + /// Use of this option should be limited to development and testing. To use + /// an externally managed secret key, use `--node-key-file` instead. + #[structopt(long = "node-key", value_name = "KEY")] + pub node_key: Option, + + /// The type of secret key to use for libp2p networking. + /// + /// The secret key of the node is obtained as follows: + /// + /// * If the `--node-key` option is given, the value is parsed as a secret key + /// according to the type. See the documentation for `--node-key`. + /// + /// * If the `--node-key-file` option is given, the secret key is read from the + /// specified file. See the documentation for `--node-key-file`. + /// + /// * Otherwise, the secret key is read from a file with a predetermined, + /// type-specific name from the chain-specific network config directory + /// inside the base directory specified by `--base-dir`. If this file does + /// not exist, it is created with a newly generated secret key of the + /// chosen type. + /// + /// The node's secret key determines the corresponding public key and hence the + /// node's peer ID in the context of libp2p. + /// + /// NOTE: The current default key type is `secp256k1` for a transition period only + /// but will eventually change to `ed25519` in a future release. To continue using + /// `secp256k1` keys, use `--node-key-type=secp256k1`. + #[structopt( + long = "node-key-type", + value_name = "TYPE", + raw( + possible_values = "&NodeKeyType::variants()", + case_insensitive = "true", + default_value = r#""Secp256k1""# + ) + )] + pub node_key_type: NodeKeyType, + + /// The file from which to read the node's secret key to use for libp2p networking. + /// + /// The contents of the file are parsed according to the choice of `--node-key-type` + /// as follows: + /// + /// `secp256k1`: + /// The file must contain an unencoded 32 bytes Secp256k1 secret key. + /// + /// `ed25519`: + /// The file must contain an unencoded 32 bytes Ed25519 secret key. + /// + /// If the file does not exist, it is created with a newly generated secret key of + /// the chosen type. + #[structopt(long = "node-key-file", value_name = "FILE")] + pub node_key_file: Option, } /// Parameters used to create the pool configuration. #[derive(Debug, StructOpt, Clone)] pub struct TransactionPoolParams { - /// Maximum number of transactions in the transaction pool. - #[structopt(long = "pool-limit", value_name = "COUNT", default_value = "512")] - pub pool_limit: usize, - /// Maximum number of kilobytes of all transactions stored in the pool. - #[structopt(long = "pool-kbytes", value_name = "COUNT", default_value="10240")] - pub pool_kbytes: usize, + /// Maximum number of transactions in the transaction pool. + #[structopt(long = "pool-limit", value_name = "COUNT", default_value = "512")] + pub pool_limit: usize, + /// Maximum number of kilobytes of all transactions stored in the pool. + #[structopt(long = "pool-kbytes", value_name = "COUNT", default_value = "10240")] + pub pool_kbytes: usize, } /// Execution strategies parameters. #[derive(Debug, StructOpt, Clone)] pub struct ExecutionStrategies { - /// The means of execution used when calling into the runtime while syncing blocks. - #[structopt( - long = "syncing-execution", - value_name = "STRATEGY", - raw( - possible_values = "&ExecutionStrategy::variants()", - case_insensitive = "true", - default_value = r#""NativeElseWasm""# - ) - )] - pub syncing_execution: ExecutionStrategy, - - /// The means of execution used when calling into the runtime while importing blocks. - #[structopt( - long = "importing-execution", - value_name = "STRATEGY", - raw( - possible_values = "&ExecutionStrategy::variants()", - case_insensitive = "true", - default_value = r#""NativeElseWasm""# - ) - )] - pub importing_execution: ExecutionStrategy, - - /// The means of execution used when calling into the runtime while constructing blocks. - #[structopt( - long = "block-construction-execution", - value_name = "STRATEGY", - raw( - possible_values = "&ExecutionStrategy::variants()", - case_insensitive = "true", - default_value = r#""Wasm""# - ) - )] - pub block_construction_execution: ExecutionStrategy, - - /// The means of execution used when calling into the runtime while constructing blocks. - #[structopt( - long = "offchain-worker-execution", - value_name = "STRATEGY", - raw( - possible_values = "&ExecutionStrategy::variants()", - case_insensitive = "true", - default_value = r#""NativeWhenPossible""# - ) - )] - pub offchain_worker_execution: ExecutionStrategy, - - /// The means of execution used when calling into the runtime while not syncing, importing or constructing blocks. - #[structopt( - long = "other-execution", - value_name = "STRATEGY", - raw( - possible_values = "&ExecutionStrategy::variants()", - case_insensitive = "true", - default_value = r#""Wasm""# - ) - )] - pub other_execution: ExecutionStrategy, + /// The means of execution used when calling into the runtime while syncing blocks. + #[structopt( + long = "syncing-execution", + value_name = "STRATEGY", + raw( + possible_values = "&ExecutionStrategy::variants()", + case_insensitive = "true", + default_value = r#""NativeElseWasm""# + ) + )] + pub syncing_execution: ExecutionStrategy, + + /// The means of execution used when calling into the runtime while importing blocks. + #[structopt( + long = "importing-execution", + value_name = "STRATEGY", + raw( + possible_values = "&ExecutionStrategy::variants()", + case_insensitive = "true", + default_value = r#""NativeElseWasm""# + ) + )] + pub importing_execution: ExecutionStrategy, + + /// The means of execution used when calling into the runtime while constructing blocks. + #[structopt( + long = "block-construction-execution", + value_name = "STRATEGY", + raw( + possible_values = "&ExecutionStrategy::variants()", + case_insensitive = "true", + default_value = r#""Wasm""# + ) + )] + pub block_construction_execution: ExecutionStrategy, + + /// The means of execution used when calling into the runtime while constructing blocks. + #[structopt( + long = "offchain-worker-execution", + value_name = "STRATEGY", + raw( + possible_values = "&ExecutionStrategy::variants()", + case_insensitive = "true", + default_value = r#""NativeWhenPossible""# + ) + )] + pub offchain_worker_execution: ExecutionStrategy, + + /// The means of execution used when calling into the runtime while not syncing, importing or constructing blocks. + #[structopt( + long = "other-execution", + value_name = "STRATEGY", + raw( + possible_values = "&ExecutionStrategy::variants()", + case_insensitive = "true", + default_value = r#""Wasm""# + ) + )] + pub other_execution: ExecutionStrategy, } /// The `run` command used to run a node. #[derive(Debug, StructOpt, Clone)] pub struct RunCmd { - /// Specify custom keystore path - #[structopt(long = "keystore-path", value_name = "PATH", parse(from_os_str))] - pub keystore_path: Option, - - /// Specify additional key seed - #[structopt(long = "key", value_name = "STRING")] - pub key: Option, - - /// Enable validator mode - #[structopt(long = "validator")] - pub validator: bool, - - /// Disable GRANDPA when running in validator mode - #[structopt(long = "no-grandpa")] - pub no_grandpa: bool, - - /// Run in light client mode - #[structopt(long = "light")] - pub light: bool, - - /// Limit the memory the database cache can use - #[structopt(long = "db-cache", value_name = "MiB")] - pub database_cache_size: Option, - - /// Listen to all RPC interfaces (default is local) - #[structopt(long = "rpc-external")] - pub rpc_external: bool, - - /// Listen to all Websocket interfaces (default is local) - #[structopt(long = "ws-external")] - pub ws_external: bool, - - /// Specify HTTP RPC server TCP port - #[structopt(long = "rpc-port", value_name = "PORT")] - pub rpc_port: Option, - - /// Specify WebSockets RPC server TCP port - #[structopt(long = "ws-port", value_name = "PORT")] - pub ws_port: Option, - - /// Specify the pruning mode, a number of blocks to keep or 'archive'. Default is 256. - #[structopt(long = "pruning", value_name = "PRUNING_MODE")] - pub pruning: Option, - - /// The human-readable name for this node, as reported to the telemetry server, if enabled - #[structopt(long = "name", value_name = "NAME")] - pub name: Option, - - /// Disable connecting to the Substrate telemetry server (telemetry is on by default on global chains). - #[structopt(long = "no-telemetry")] - pub no_telemetry: bool, - - /// The URL of the telemetry server to connect to. This flag can be passed multiple times - /// as a mean to specify multiple telemetry endpoints. Verbosity levels range from 0-9, with - /// 0 denoting the least verbosity. If no verbosity level is specified the default is 0. - #[structopt(long = "telemetry-url", value_name = "URL VERBOSITY", parse(try_from_str = "parse_telemetry_endpoints"))] - pub telemetry_endpoints: Vec<(String, u8)>, - - /// Should execute offchain workers on every block. By default it's only enabled for nodes that are authoring new - /// blocks. - #[structopt( - long = "offchain-worker", - value_name = "ENABLED", - raw( - possible_values = "&OffchainWorkerEnabled::variants()", - case_insensitive = "true", - default_value = r#""WhenValidating""# - ) - )] - pub offchain_worker: OffchainWorkerEnabled, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub execution_strategies: ExecutionStrategies, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub network_config: NetworkConfigurationParams, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub pool_config: TransactionPoolParams, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub keyring: Keyring, - - /// Enable authoring even when offline. - #[structopt(long = "force-authoring")] - pub force_authoring: bool, + /// Specify custom keystore path + #[structopt(long = "keystore-path", value_name = "PATH", parse(from_os_str))] + pub keystore_path: Option, + + /// Specify additional key seed + #[structopt(long = "key", value_name = "STRING")] + pub key: Option, + + /// Enable validator mode + #[structopt(long = "validator")] + pub validator: bool, + + /// Disable GRANDPA when running in validator mode + #[structopt(long = "no-grandpa")] + pub no_grandpa: bool, + + /// Run in light client mode + #[structopt(long = "light")] + pub light: bool, + + /// Limit the memory the database cache can use + #[structopt(long = "db-cache", value_name = "MiB")] + pub database_cache_size: Option, + + /// Listen to all RPC interfaces (default is local) + #[structopt(long = "rpc-external")] + pub rpc_external: bool, + + /// Listen to all Websocket interfaces (default is local) + #[structopt(long = "ws-external")] + pub ws_external: bool, + + /// Specify HTTP RPC server TCP port + #[structopt(long = "rpc-port", value_name = "PORT")] + pub rpc_port: Option, + + /// Specify WebSockets RPC server TCP port + #[structopt(long = "ws-port", value_name = "PORT")] + pub ws_port: Option, + + /// Specify the pruning mode, a number of blocks to keep or 'archive'. Default is 256. + #[structopt(long = "pruning", value_name = "PRUNING_MODE")] + pub pruning: Option, + + /// The human-readable name for this node, as reported to the telemetry server, if enabled + #[structopt(long = "name", value_name = "NAME")] + pub name: Option, + + /// Disable connecting to the Substrate telemetry server (telemetry is on by default on global chains). + #[structopt(long = "no-telemetry")] + pub no_telemetry: bool, + + /// The URL of the telemetry server to connect to. This flag can be passed multiple times + /// as a mean to specify multiple telemetry endpoints. Verbosity levels range from 0-9, with + /// 0 denoting the least verbosity. If no verbosity level is specified the default is 0. + #[structopt( + long = "telemetry-url", + value_name = "URL VERBOSITY", + parse(try_from_str = "parse_telemetry_endpoints") + )] + pub telemetry_endpoints: Vec<(String, u8)>, + + /// Should execute offchain workers on every block. By default it's only enabled for nodes that are authoring new + /// blocks. + #[structopt( + long = "offchain-worker", + value_name = "ENABLED", + raw( + possible_values = "&OffchainWorkerEnabled::variants()", + case_insensitive = "true", + default_value = r#""WhenValidating""# + ) + )] + pub offchain_worker: OffchainWorkerEnabled, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub execution_strategies: ExecutionStrategies, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub network_config: NetworkConfigurationParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub pool_config: TransactionPoolParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub keyring: Keyring, + + /// Enable authoring even when offline. + #[structopt(long = "force-authoring")] + pub force_authoring: bool, } /// Stores all required Cli values for a keyring test account. struct KeyringTestAccountCliValues { - help: String, - conflicts_with: Vec, - name: String, - variant: keyring::AuthorityKeyring, + help: String, + conflicts_with: Vec, + name: String, + variant: keyring::AuthorityKeyring, } lazy_static::lazy_static! { - /// The Cli values for all test accounts. - static ref TEST_ACCOUNTS_CLI_VALUES: Vec = { - keyring::AuthorityKeyring::iter().map(|a| { - let help = format!("Shortcut for `--key //{} --name {}`.", a, a); - let conflicts_with = keyring::AuthorityKeyring::iter() - .filter(|b| a != *b) - .map(|b| b.to_string().to_lowercase()) - .chain(["name", "key"].iter().map(|s| s.to_string())) - .collect::>(); - let name = a.to_string().to_lowercase(); - - KeyringTestAccountCliValues { - help, - conflicts_with, - name, - variant: a, - } - }).collect() - }; + /// The Cli values for all test accounts. + static ref TEST_ACCOUNTS_CLI_VALUES: Vec = { + keyring::AuthorityKeyring::iter().map(|a| { + let help = format!("Shortcut for `--key //{} --name {}`.", a, a); + let conflicts_with = keyring::AuthorityKeyring::iter() + .filter(|b| a != *b) + .map(|b| b.to_string().to_lowercase()) + .chain(["name", "key"].iter().map(|s| s.to_string())) + .collect::>(); + let name = a.to_string().to_lowercase(); + + KeyringTestAccountCliValues { + help, + conflicts_with, + name, + variant: a, + } + }).collect() + }; } /// Wrapper for exposing the keyring test accounts into the Cli. #[derive(Debug, Clone)] pub struct Keyring { - pub account: Option, + pub account: Option, } impl StructOpt for Keyring { - fn clap<'a, 'b>() -> App<'a, 'b> { - unimplemented!("Should not be called for `TestAccounts`.") - } + fn clap<'a, 'b>() -> App<'a, 'b> { + unimplemented!("Should not be called for `TestAccounts`.") + } - fn from_clap(m: &::structopt::clap::ArgMatches) -> Self { - Keyring { - account: TEST_ACCOUNTS_CLI_VALUES.iter().find(|a| m.is_present(&a.name)).map(|a| a.variant), - } - } + fn from_clap(m: &::structopt::clap::ArgMatches) -> Self { + Keyring { + account: TEST_ACCOUNTS_CLI_VALUES + .iter() + .find(|a| m.is_present(&a.name)) + .map(|a| a.variant), + } + } } impl AugmentClap for Keyring { - fn augment_clap<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b> { - TEST_ACCOUNTS_CLI_VALUES.iter().fold(app, |app, a| { - let conflicts_with_strs = a.conflicts_with.iter().map(|s| s.as_str()).collect::>(); - - app.arg( - Arg::with_name(&a.name) - .long(&a.name) - .help(&a.help) - .conflicts_with_all(&conflicts_with_strs) - .takes_value(false) - ) - }) - } + fn augment_clap<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b> { + TEST_ACCOUNTS_CLI_VALUES.iter().fold(app, |app, a| { + let conflicts_with_strs = a + .conflicts_with + .iter() + .map(|s| s.as_str()) + .collect::>(); + + app.arg( + Arg::with_name(&a.name) + .long(&a.name) + .help(&a.help) + .conflicts_with_all(&conflicts_with_strs) + .takes_value(false), + ) + }) + } } impl Keyring { - fn is_subcommand() -> bool { - false - } + fn is_subcommand() -> bool { + false + } } /// Default to verbosity level 0, if none is provided. fn parse_telemetry_endpoints(s: &str) -> Result<(String, u8), Box> { - let pos = s.find(' '); - match pos { - None => { - Ok((s.to_owned(), 0)) - }, - Some(pos_) => { - let verbosity = s[pos_ + 1..].parse()?; - let url = s[..pos_].parse()?; - Ok((url, verbosity)) - } - } + let pos = s.find(' '); + match pos { + None => Ok((s.to_owned(), 0)), + Some(pos_) => { + let verbosity = s[pos_ + 1..].parse()?; + let url = s[..pos_].parse()?; + Ok((url, verbosity)) + } + } } impl_augment_clap!(RunCmd); @@ -476,17 +493,17 @@ impl_get_log_filter!(RunCmd); /// The `build-spec` command used to build a specification. #[derive(Debug, StructOpt, Clone)] pub struct BuildSpecCmd { - /// Force raw genesis storage output. - #[structopt(long = "raw")] - pub raw: bool, + /// Force raw genesis storage output. + #[structopt(long = "raw")] + pub raw: bool, - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, - #[allow(missing_docs)] - #[structopt(flatten)] - pub node_key_params: NodeKeyParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub node_key_params: NodeKeyParams, } impl_get_log_filter!(BuildSpecCmd); @@ -494,25 +511,25 @@ impl_get_log_filter!(BuildSpecCmd); /// The `export-blocks` command used to export blocks. #[derive(Debug, StructOpt, Clone)] pub struct ExportBlocksCmd { - /// Output file name or stdout if unspecified. - #[structopt(parse(from_os_str))] - pub output: Option, + /// Output file name or stdout if unspecified. + #[structopt(parse(from_os_str))] + pub output: Option, - /// Specify starting block number. 1 by default. - #[structopt(long = "from", value_name = "BLOCK")] - pub from: Option, + /// Specify starting block number. 1 by default. + #[structopt(long = "from", value_name = "BLOCK")] + pub from: Option, - /// Specify last block number. Best block by default. - #[structopt(long = "to", value_name = "BLOCK")] - pub to: Option, + /// Specify last block number. Best block by default. + #[structopt(long = "to", value_name = "BLOCK")] + pub to: Option, - /// Use JSON output rather than binary. - #[structopt(long = "json")] - pub json: bool, + /// Use JSON output rather than binary. + #[structopt(long = "json")] + pub json: bool, - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, } impl_get_log_filter!(ExportBlocksCmd); @@ -520,17 +537,17 @@ impl_get_log_filter!(ExportBlocksCmd); /// The `import-blocks` command used to import blocks. #[derive(Debug, StructOpt, Clone)] pub struct ImportBlocksCmd { - /// Input file or stdin if unspecified. - #[structopt(parse(from_os_str))] - pub input: Option, + /// Input file or stdin if unspecified. + #[structopt(parse(from_os_str))] + pub input: Option, - /// The default number of 64KB pages to ever allocate for Wasm execution. Don't alter this unless you know what you're doing. - #[structopt(long = "default-heap-pages", value_name = "COUNT")] - pub default_heap_pages: Option, + /// The default number of 64KB pages to ever allocate for Wasm execution. Don't alter this unless you know what you're doing. + #[structopt(long = "default-heap-pages", value_name = "COUNT")] + pub default_heap_pages: Option, - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, } impl_get_log_filter!(ImportBlocksCmd); @@ -538,13 +555,13 @@ impl_get_log_filter!(ImportBlocksCmd); /// The `revert` command used revert the chain to a previos state. #[derive(Debug, StructOpt, Clone)] pub struct RevertCmd { - /// Number of blocks to revert. - #[structopt(default_value = "256")] - pub num: u64, + /// Number of blocks to revert. + #[structopt(default_value = "256")] + pub num: u64, - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, } impl_get_log_filter!(RevertCmd); @@ -552,13 +569,13 @@ impl_get_log_filter!(RevertCmd); /// The `purge-chain` command used to remove the whole chain. #[derive(Debug, StructOpt, Clone)] pub struct PurgeChainCmd { - /// Skip interactive prompt by answering yes automatically. - #[structopt(short = "y")] - pub yes: bool, + /// Skip interactive prompt by answering yes automatically. + #[structopt(short = "y")] + pub yes: bool, - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, } impl_get_log_filter!(PurgeChainCmd); @@ -570,88 +587,95 @@ impl_get_log_filter!(PurgeChainCmd); /// `Run` are exported as main executable parameters. #[derive(Debug, Clone)] pub enum CoreParams { - /// Run a node. - Run(MergeParameters), + /// Run a node. + Run(MergeParameters), - /// Build a spec.json file, outputing to stdout. - BuildSpec(BuildSpecCmd), + /// Build a spec.json file, outputing to stdout. + BuildSpec(BuildSpecCmd), - /// Export blocks to a file. - ExportBlocks(ExportBlocksCmd), + /// Export blocks to a file. + ExportBlocks(ExportBlocksCmd), - /// Import blocks from file. - ImportBlocks(ImportBlocksCmd), + /// Import blocks from file. + ImportBlocks(ImportBlocksCmd), - /// Revert chain to the previous state. - Revert(RevertCmd), + /// Revert chain to the previous state. + Revert(RevertCmd), - /// Remove the whole chain data. - PurgeChain(PurgeChainCmd), + /// Remove the whole chain data. + PurgeChain(PurgeChainCmd), - /// Further custom subcommands. - Custom(CC), + /// Further custom subcommands. + Custom(CC), } -impl StructOpt for CoreParams where - CC: StructOpt + GetLogFilter, - RP: StructOpt + AugmentClap +impl StructOpt for CoreParams +where + CC: StructOpt + GetLogFilter, + RP: StructOpt + AugmentClap, +{ + fn clap<'a, 'b>() -> App<'a, 'b> { + RP::augment_clap(RunCmd::augment_clap( + CC::clap().unset_setting(AppSettings::SubcommandRequiredElseHelp), + )) + .subcommand( + BuildSpecCmd::augment_clap(SubCommand::with_name("build-spec")) + .about("Build a spec.json file, outputing to stdout."), + ) + .subcommand( + ExportBlocksCmd::augment_clap(SubCommand::with_name("export-blocks")) + .about("Export blocks to a file."), + ) + .subcommand( + ImportBlocksCmd::augment_clap(SubCommand::with_name("import-blocks")) + .about("Import blocks from file."), + ) + .subcommand( + RevertCmd::augment_clap(SubCommand::with_name("revert")) + .about("Revert chain to the previous state."), + ) + .subcommand( + PurgeChainCmd::augment_clap(SubCommand::with_name("purge-chain")) + .about("Remove the whole chain data."), + ) + } + + fn from_clap(matches: &::structopt::clap::ArgMatches) -> Self { + match matches.subcommand() { + ("build-spec", Some(matches)) => { + CoreParams::BuildSpec(BuildSpecCmd::from_clap(matches)) + } + ("export-blocks", Some(matches)) => { + CoreParams::ExportBlocks(ExportBlocksCmd::from_clap(matches)) + } + ("import-blocks", Some(matches)) => { + CoreParams::ImportBlocks(ImportBlocksCmd::from_clap(matches)) + } + ("revert", Some(matches)) => CoreParams::Revert(RevertCmd::from_clap(matches)), + ("purge-chain", Some(matches)) => { + CoreParams::PurgeChain(PurgeChainCmd::from_clap(matches)) + } + (_, None) => CoreParams::Run(MergeParameters::from_clap(matches)), + _ => CoreParams::Custom(CC::from_clap(matches)), + } + } +} + +impl GetLogFilter for CoreParams +where + CC: GetLogFilter, { - fn clap<'a, 'b>() -> App<'a, 'b> { - RP::augment_clap( - RunCmd::augment_clap( - CC::clap().unset_setting(AppSettings::SubcommandRequiredElseHelp) - ) - ).subcommand( - BuildSpecCmd::augment_clap(SubCommand::with_name("build-spec")) - .about("Build a spec.json file, outputing to stdout.") - ) - .subcommand( - ExportBlocksCmd::augment_clap(SubCommand::with_name("export-blocks")) - .about("Export blocks to a file.") - ) - .subcommand( - ImportBlocksCmd::augment_clap(SubCommand::with_name("import-blocks")) - .about("Import blocks from file.") - ) - .subcommand( - RevertCmd::augment_clap(SubCommand::with_name("revert")) - .about("Revert chain to the previous state.") - ) - .subcommand( - PurgeChainCmd::augment_clap(SubCommand::with_name("purge-chain")) - .about("Remove the whole chain data.") - ) - } - - fn from_clap(matches: &::structopt::clap::ArgMatches) -> Self { - match matches.subcommand() { - ("build-spec", Some(matches)) => - CoreParams::BuildSpec(BuildSpecCmd::from_clap(matches)), - ("export-blocks", Some(matches)) => - CoreParams::ExportBlocks(ExportBlocksCmd::from_clap(matches)), - ("import-blocks", Some(matches)) => - CoreParams::ImportBlocks(ImportBlocksCmd::from_clap(matches)), - ("revert", Some(matches)) => CoreParams::Revert(RevertCmd::from_clap(matches)), - ("purge-chain", Some(matches)) => - CoreParams::PurgeChain(PurgeChainCmd::from_clap(matches)), - (_, None) => CoreParams::Run(MergeParameters::from_clap(matches)), - _ => CoreParams::Custom(CC::from_clap(matches)), - } - } -} - -impl GetLogFilter for CoreParams where CC: GetLogFilter { - fn get_log_filter(&self) -> Option { - match self { - CoreParams::Run(c) => c.left.get_log_filter(), - CoreParams::BuildSpec(c) => c.get_log_filter(), - CoreParams::ExportBlocks(c) => c.get_log_filter(), - CoreParams::ImportBlocks(c) => c.get_log_filter(), - CoreParams::PurgeChain(c) => c.get_log_filter(), - CoreParams::Revert(c) => c.get_log_filter(), - CoreParams::Custom(c) => c.get_log_filter(), - } - } + fn get_log_filter(&self) -> Option { + match self { + CoreParams::Run(c) => c.left.get_log_filter(), + CoreParams::BuildSpec(c) => c.get_log_filter(), + CoreParams::ExportBlocks(c) => c.get_log_filter(), + CoreParams::ImportBlocks(c) => c.get_log_filter(), + CoreParams::PurgeChain(c) => c.get_log_filter(), + CoreParams::Revert(c) => c.get_log_filter(), + CoreParams::Custom(c) => c.get_log_filter(), + } + } } /// A special commandline parameter that expands to nothing. @@ -660,45 +684,49 @@ impl GetLogFilter for CoreParams where CC: GetLogFilter { pub struct NoCustom {} impl StructOpt for NoCustom { - fn clap<'a, 'b>() -> App<'a, 'b> { - App::new("NoCustom") - } + fn clap<'a, 'b>() -> App<'a, 'b> { + App::new("NoCustom") + } - fn from_clap(_: &::structopt::clap::ArgMatches) -> Self { - NoCustom {} - } + fn from_clap(_: &::structopt::clap::ArgMatches) -> Self { + NoCustom {} + } } impl AugmentClap for NoCustom { - fn augment_clap<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b> { - app - } + fn augment_clap<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b> { + app + } } impl GetLogFilter for NoCustom { - fn get_log_filter(&self) -> Option { - None - } + fn get_log_filter(&self) -> Option { + None + } } /// Merge all CLI parameters of `L` and `R` into the same level. #[derive(Clone, Debug)] pub struct MergeParameters { - /// The left side parameters. - pub left: L, - /// The right side parameters. - pub right: R, -} - -impl StructOpt for MergeParameters where L: StructOpt + AugmentClap, R: StructOpt { - fn clap<'a, 'b>() -> App<'a, 'b> { - L::augment_clap(R::clap()) - } - - fn from_clap(matches: &::structopt::clap::ArgMatches) -> Self { - MergeParameters { - left: L::from_clap(matches), - right: R::from_clap(matches), - } - } + /// The left side parameters. + pub left: L, + /// The right side parameters. + pub right: R, +} + +impl StructOpt for MergeParameters +where + L: StructOpt + AugmentClap, + R: StructOpt, +{ + fn clap<'a, 'b>() -> App<'a, 'b> { + L::augment_clap(R::clap()) + } + + fn from_clap(matches: &::structopt::clap::ArgMatches) -> Self { + MergeParameters { + left: L::from_clap(matches), + right: R::from_clap(matches), + } + } } diff --git a/core/cli/src/traits.rs b/core/cli/src/traits.rs index ddb389e454..ad746271ef 100644 --- a/core/cli/src/traits.rs +++ b/core/cli/src/traits.rs @@ -14,31 +14,31 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use structopt::{StructOpt, clap::App}; +use structopt::{clap::App, StructOpt}; /// Something that can augment a clapp app with further parameters. /// `derive(StructOpt)` is implementing this function by default, so a macro `impl_augment_clap!` /// is provided to simplify the implementation of this trait. pub trait AugmentClap: StructOpt { - /// Augment the given clap `App` with further parameters. - fn augment_clap<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b>; + /// Augment the given clap `App` with further parameters. + fn augment_clap<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b>; } /// Macro for implementing the `AugmentClap` trait. /// This requires that the given type uses `derive(StructOpt)`! #[macro_export] macro_rules! impl_augment_clap { - ( $type:ident ) => { - impl $crate::AugmentClap for $type { - fn augment_clap<'a, 'b>(app: $crate::App<'a, 'b>) -> $crate::App<'a, 'b> { - $type::augment_clap(app) - } - } - } + ( $type:ident ) => { + impl $crate::AugmentClap for $type { + fn augment_clap<'a, 'b>(app: $crate::App<'a, 'b>) -> $crate::App<'a, 'b> { + $type::augment_clap(app) + } + } + }; } /// Returns the log filter given by the user as commandline argument. pub trait GetLogFilter { - /// Returns the set log filter. - fn get_log_filter(&self) -> Option; + /// Returns the set log filter. + fn get_log_filter(&self) -> Option; } diff --git a/core/client/db/src/cache/list_cache.rs b/core/client/db/src/cache/list_cache.rs index 1e641534f9..5a35176874 100644 --- a/core/client/db/src/cache/list_cache.rs +++ b/core/client/db/src/cache/list_cache.rs @@ -44,1343 +44,2593 @@ use std::collections::BTreeSet; use log::warn; use client::error::{ErrorKind as ClientErrorKind, Result as ClientResult}; -use runtime_primitives::traits::{Block as BlockT, NumberFor, As, Zero}; +use runtime_primitives::traits::{As, Block as BlockT, NumberFor, Zero}; -use crate::cache::{CacheItemT, ComplexBlockId}; use crate::cache::list_entry::{Entry, StorageEntry}; -use crate::cache::list_storage::{Storage, StorageTransaction, Metadata}; +use crate::cache::list_storage::{Metadata, Storage, StorageTransaction}; +use crate::cache::{CacheItemT, ComplexBlockId}; /// List-based cache. pub struct ListCache> { - /// Cache storage. - storage: S, - /// Prune depth. - prune_depth: NumberFor, - /// Best finalized block. - best_finalized_block: ComplexBlockId, - /// Best finalized entry (if exists). - best_finalized_entry: Option>, - /// All unfinalized 'forks'. - unfinalized: Vec>, + /// Cache storage. + storage: S, + /// Prune depth. + prune_depth: NumberFor, + /// Best finalized block. + best_finalized_block: ComplexBlockId, + /// Best finalized entry (if exists). + best_finalized_entry: Option>, + /// All unfinalized 'forks'. + unfinalized: Vec>, } /// All possible list cache operations that could be performed after transaction is committed. #[derive(Debug)] #[cfg_attr(test, derive(PartialEq))] pub enum CommitOperation { - /// New block is appended to the fork without changing the cached value. - AppendNewBlock(usize, ComplexBlockId), - /// New block is appended to the fork with the different value. - AppendNewEntry(usize, Entry), - /// New fork is added with the given head entry. - AddNewFork(Entry), - /// New block is finalized and possibly: - /// - new entry is finalized AND/OR - /// - some forks are destroyed - BlockFinalized(ComplexBlockId, Option>, BTreeSet), + /// New block is appended to the fork without changing the cached value. + AppendNewBlock(usize, ComplexBlockId), + /// New block is appended to the fork with the different value. + AppendNewEntry(usize, Entry), + /// New fork is added with the given head entry. + AddNewFork(Entry), + /// New block is finalized and possibly: + /// - new entry is finalized AND/OR + /// - some forks are destroyed + BlockFinalized( + ComplexBlockId, + Option>, + BTreeSet, + ), } /// Single fork of list-based cache. #[derive(Debug)] #[cfg_attr(test, derive(PartialEq))] pub struct Fork { - /// The best block of this fork. We do not save this field in the database to avoid - /// extra updates => it could be None after restart. It will be either filled when - /// the block is appended to this fork, or the whole fork will be abandoned when the - /// block from the other fork is finalized - best_block: Option>, - /// The head entry of this fork. - head: Entry, + /// The best block of this fork. We do not save this field in the database to avoid + /// extra updates => it could be None after restart. It will be either filled when + /// the block is appended to this fork, or the whole fork will be abandoned when the + /// block from the other fork is finalized + best_block: Option>, + /// The head entry of this fork. + head: Entry, } /// Outcome of Fork::try_append_or_fork. #[derive(Debug)] #[cfg_attr(test, derive(PartialEq))] pub enum ForkAppendResult { - /// New entry should be appended to the end of the fork. - Append, - /// New entry should be forked from the fork, starting with entry at given block. - Fork(ComplexBlockId), + /// New entry should be appended to the end of the fork. + Append, + /// New entry should be forked from the fork, starting with entry at given block. + Fork(ComplexBlockId), } impl> ListCache { - /// Create new db list cache entry. - pub fn new(storage: S, prune_depth: NumberFor, best_finalized_block: ComplexBlockId) -> Self { - let (best_finalized_entry, unfinalized) = storage.read_meta() - .and_then(|meta| read_forks(&storage, meta)) - .unwrap_or_else(|error| { - warn!(target: "db", "Unable to initialize list cache: {}. Restarting", error); - (None, Vec::new()) - }); - - ListCache { - storage, - prune_depth, - best_finalized_block, - best_finalized_entry, - unfinalized, - } - } - - /// Get reference to the storage. - pub fn storage(&self) -> &S { - &self.storage - } - - /// Get value valid at block. - pub fn value_at_block(&self, at: &ComplexBlockId) -> ClientResult> { - let head = if at.number <= self.best_finalized_block.number { - // if the block is older than the best known finalized block - // => we're should search for the finalized value - - // BUT since we're not guaranteeing to provide correct values for forks - // behind the finalized block, check if the block is finalized first - if !chain::is_finalized_block(&self.storage, at, As::sa(::std::u64::MAX))? { - return Ok(None); - } - - self.best_finalized_entry.as_ref() - } else if self.unfinalized.is_empty() { - // there are no unfinalized entries - // => we should search for the finalized value - self.best_finalized_entry.as_ref() - } else { - // there are unfinalized entries - // => find the fork containing given block and read from this fork - // IF there's no matching fork, ensure that this isn't a block from a fork that has forked - // behind the best finalized block and search at finalized fork - - match self.find_unfinalized_fork(at)? { - Some(fork) => Some(&fork.head), - None => match self.best_finalized_entry.as_ref() { - Some(best_finalized_entry) if chain::is_connected_to_block(&self.storage, &best_finalized_entry.valid_from, at)? => - Some(best_finalized_entry), - _ => None, - }, - } - }; - - match head { - Some(head) => head.search_best_before(&self.storage, at.number, true) - .map(|e| e.and_then(|e| e.0.value)), - None => Ok(None), - } - } - - /// When new block is inserted into database. - pub fn on_block_insert>( - &self, - tx: &mut Tx, - parent: ComplexBlockId, - block: ComplexBlockId, - value: Option, - is_final: bool, - ) -> ClientResult>> { - // this guarantee is currently provided by LightStorage && we're relying on it here - debug_assert!(!is_final || self.best_finalized_block.hash == parent.hash); - - // we do not store any values behind finalized - if block.number != Zero::zero() && self.best_finalized_block.number >= block.number { - return Ok(None); - } - - // if the block is not final, it is possibly appended to/forking from existing unfinalized fork - if !is_final { - let mut fork_and_action = None; - - // first: try to find fork that is known to has the best block we're appending to - for (index, fork) in self.unfinalized.iter().enumerate() { - if fork.try_append(&parent) { - fork_and_action = Some((index, ForkAppendResult::Append)); - break; - } - } - - // if not found, check cases: - // - we're appending to the fork for the first time after restart; - // - we're forking existing unfinalized fork from the middle; - if fork_and_action.is_none() { - let best_finalized_entry_block = self.best_finalized_entry.as_ref().map(|f| f.valid_from.number); - for (index, fork) in self.unfinalized.iter().enumerate() { - if let Some(action) = fork.try_append_or_fork(&self.storage, &parent, best_finalized_entry_block)? { - fork_and_action = Some((index, action)); - break; - } - } - } - - // if we have found matching unfinalized fork => early exit - match fork_and_action { - // append to unfinalized fork - Some((index, ForkAppendResult::Append)) => { - let new_storage_entry = match self.unfinalized[index].head.try_update(value) { - Some(new_storage_entry) => new_storage_entry, - None => return Ok(Some(CommitOperation::AppendNewBlock(index, block))), - }; - - tx.insert_storage_entry(&block, &new_storage_entry); - let operation = CommitOperation::AppendNewEntry(index, new_storage_entry.into_entry(block)); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - return Ok(Some(operation)); - }, - // fork from the middle of unfinalized fork - Some((_, ForkAppendResult::Fork(prev_valid_from))) => { - // it is possible that we're inserting extra (but still required) fork here - let new_storage_entry = StorageEntry { - prev_valid_from: Some(prev_valid_from), - value, - }; - - tx.insert_storage_entry(&block, &new_storage_entry); - let operation = CommitOperation::AddNewFork(new_storage_entry.into_entry(block)); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - return Ok(Some(operation)); - }, - None => (), - } - } - - // if we're here, then one of following is true: - // - either we're inserting final block => all ancestors are already finalized AND the only thing we can do - // is to try to update last finalized entry - // - either we're inserting non-final blocks that has no ancestors in any known unfinalized forks - - let new_storage_entry = match self.best_finalized_entry.as_ref() { - Some(best_finalized_entry) => best_finalized_entry.try_update(value), - None if value.is_some() => Some(StorageEntry { prev_valid_from: None, value }), - None => None, - }; - - if !is_final { - return Ok(match new_storage_entry { - Some(new_storage_entry) => { - tx.insert_storage_entry(&block, &new_storage_entry); - let operation = CommitOperation::AddNewFork(new_storage_entry.into_entry(block)); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - Some(operation) - }, - None => None, - }); - } - - // cleanup database from abandoned unfinalized forks and obsolete finalized entries - let abandoned_forks = self.destroy_abandoned_forks(tx, &block); - self.prune_finalized_entries(tx, &block); - - match new_storage_entry { - Some(new_storage_entry) => { - tx.insert_storage_entry(&block, &new_storage_entry); - let operation = CommitOperation::BlockFinalized(block.clone(), Some(new_storage_entry.into_entry(block)), abandoned_forks); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - Ok(Some(operation)) - }, - None => Ok(Some(CommitOperation::BlockFinalized(block, None, abandoned_forks))), - } - } - - /// When previously inserted block is finalized. - pub fn on_block_finalize>( - &self, - tx: &mut Tx, - parent: ComplexBlockId, - block: ComplexBlockId, - ) -> ClientResult>> { - // this guarantee is currently provided by LightStorage && we're relying on it here - debug_assert_eq!(self.best_finalized_block.hash, parent.hash); - - // there could be at most one entry that is finalizing - let finalizing_entry = self.storage.read_entry(&block)? - .map(|entry| entry.into_entry(block.clone())); - - // cleanup database from abandoned unfinalized forks and obsolete finalized entries - let abandoned_forks = self.destroy_abandoned_forks(tx, &block); - self.prune_finalized_entries(tx, &block); - - let update_meta = finalizing_entry.is_some(); - let operation = CommitOperation::BlockFinalized(block, finalizing_entry, abandoned_forks); - if update_meta { - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - } - Ok(Some(operation)) - } - - /// When transaction is committed. - pub fn on_transaction_commit(&mut self, op: CommitOperation) { - match op { - CommitOperation::AppendNewBlock(index, best_block) => { - let mut fork = self.unfinalized.get_mut(index) - .expect("ListCache is a crate-private type; + /// Create new db list cache entry. + pub fn new( + storage: S, + prune_depth: NumberFor, + best_finalized_block: ComplexBlockId, + ) -> Self { + let (best_finalized_entry, unfinalized) = storage + .read_meta() + .and_then(|meta| read_forks(&storage, meta)) + .unwrap_or_else(|error| { + warn!(target: "db", "Unable to initialize list cache: {}. Restarting", error); + (None, Vec::new()) + }); + + ListCache { + storage, + prune_depth, + best_finalized_block, + best_finalized_entry, + unfinalized, + } + } + + /// Get reference to the storage. + pub fn storage(&self) -> &S { + &self.storage + } + + /// Get value valid at block. + pub fn value_at_block(&self, at: &ComplexBlockId) -> ClientResult> { + let head = if at.number <= self.best_finalized_block.number { + // if the block is older than the best known finalized block + // => we're should search for the finalized value + + // BUT since we're not guaranteeing to provide correct values for forks + // behind the finalized block, check if the block is finalized first + if !chain::is_finalized_block(&self.storage, at, As::sa(::std::u64::MAX))? { + return Ok(None); + } + + self.best_finalized_entry.as_ref() + } else if self.unfinalized.is_empty() { + // there are no unfinalized entries + // => we should search for the finalized value + self.best_finalized_entry.as_ref() + } else { + // there are unfinalized entries + // => find the fork containing given block and read from this fork + // IF there's no matching fork, ensure that this isn't a block from a fork that has forked + // behind the best finalized block and search at finalized fork + + match self.find_unfinalized_fork(at)? { + Some(fork) => Some(&fork.head), + None => match self.best_finalized_entry.as_ref() { + Some(best_finalized_entry) + if chain::is_connected_to_block( + &self.storage, + &best_finalized_entry.valid_from, + at, + )? => + { + Some(best_finalized_entry) + } + _ => None, + }, + } + }; + + match head { + Some(head) => head + .search_best_before(&self.storage, at.number, true) + .map(|e| e.and_then(|e| e.0.value)), + None => Ok(None), + } + } + + /// When new block is inserted into database. + pub fn on_block_insert>( + &self, + tx: &mut Tx, + parent: ComplexBlockId, + block: ComplexBlockId, + value: Option, + is_final: bool, + ) -> ClientResult>> { + // this guarantee is currently provided by LightStorage && we're relying on it here + debug_assert!(!is_final || self.best_finalized_block.hash == parent.hash); + + // we do not store any values behind finalized + if block.number != Zero::zero() && self.best_finalized_block.number >= block.number { + return Ok(None); + } + + // if the block is not final, it is possibly appended to/forking from existing unfinalized fork + if !is_final { + let mut fork_and_action = None; + + // first: try to find fork that is known to has the best block we're appending to + for (index, fork) in self.unfinalized.iter().enumerate() { + if fork.try_append(&parent) { + fork_and_action = Some((index, ForkAppendResult::Append)); + break; + } + } + + // if not found, check cases: + // - we're appending to the fork for the first time after restart; + // - we're forking existing unfinalized fork from the middle; + if fork_and_action.is_none() { + let best_finalized_entry_block = self + .best_finalized_entry + .as_ref() + .map(|f| f.valid_from.number); + for (index, fork) in self.unfinalized.iter().enumerate() { + if let Some(action) = + fork.try_append_or_fork(&self.storage, &parent, best_finalized_entry_block)? + { + fork_and_action = Some((index, action)); + break; + } + } + } + + // if we have found matching unfinalized fork => early exit + match fork_and_action { + // append to unfinalized fork + Some((index, ForkAppendResult::Append)) => { + let new_storage_entry = match self.unfinalized[index].head.try_update(value) { + Some(new_storage_entry) => new_storage_entry, + None => return Ok(Some(CommitOperation::AppendNewBlock(index, block))), + }; + + tx.insert_storage_entry(&block, &new_storage_entry); + let operation = + CommitOperation::AppendNewEntry(index, new_storage_entry.into_entry(block)); + tx.update_meta( + self.best_finalized_entry.as_ref(), + &self.unfinalized, + &operation, + ); + return Ok(Some(operation)); + } + // fork from the middle of unfinalized fork + Some((_, ForkAppendResult::Fork(prev_valid_from))) => { + // it is possible that we're inserting extra (but still required) fork here + let new_storage_entry = StorageEntry { + prev_valid_from: Some(prev_valid_from), + value, + }; + + tx.insert_storage_entry(&block, &new_storage_entry); + let operation = + CommitOperation::AddNewFork(new_storage_entry.into_entry(block)); + tx.update_meta( + self.best_finalized_entry.as_ref(), + &self.unfinalized, + &operation, + ); + return Ok(Some(operation)); + } + None => (), + } + } + + // if we're here, then one of following is true: + // - either we're inserting final block => all ancestors are already finalized AND the only thing we can do + // is to try to update last finalized entry + // - either we're inserting non-final blocks that has no ancestors in any known unfinalized forks + + let new_storage_entry = match self.best_finalized_entry.as_ref() { + Some(best_finalized_entry) => best_finalized_entry.try_update(value), + None if value.is_some() => Some(StorageEntry { + prev_valid_from: None, + value, + }), + None => None, + }; + + if !is_final { + return Ok(match new_storage_entry { + Some(new_storage_entry) => { + tx.insert_storage_entry(&block, &new_storage_entry); + let operation = + CommitOperation::AddNewFork(new_storage_entry.into_entry(block)); + tx.update_meta( + self.best_finalized_entry.as_ref(), + &self.unfinalized, + &operation, + ); + Some(operation) + } + None => None, + }); + } + + // cleanup database from abandoned unfinalized forks and obsolete finalized entries + let abandoned_forks = self.destroy_abandoned_forks(tx, &block); + self.prune_finalized_entries(tx, &block); + + match new_storage_entry { + Some(new_storage_entry) => { + tx.insert_storage_entry(&block, &new_storage_entry); + let operation = CommitOperation::BlockFinalized( + block.clone(), + Some(new_storage_entry.into_entry(block)), + abandoned_forks, + ); + tx.update_meta( + self.best_finalized_entry.as_ref(), + &self.unfinalized, + &operation, + ); + Ok(Some(operation)) + } + None => Ok(Some(CommitOperation::BlockFinalized( + block, + None, + abandoned_forks, + ))), + } + } + + /// When previously inserted block is finalized. + pub fn on_block_finalize>( + &self, + tx: &mut Tx, + parent: ComplexBlockId, + block: ComplexBlockId, + ) -> ClientResult>> { + // this guarantee is currently provided by LightStorage && we're relying on it here + debug_assert_eq!(self.best_finalized_block.hash, parent.hash); + + // there could be at most one entry that is finalizing + let finalizing_entry = self + .storage + .read_entry(&block)? + .map(|entry| entry.into_entry(block.clone())); + + // cleanup database from abandoned unfinalized forks and obsolete finalized entries + let abandoned_forks = self.destroy_abandoned_forks(tx, &block); + self.prune_finalized_entries(tx, &block); + + let update_meta = finalizing_entry.is_some(); + let operation = CommitOperation::BlockFinalized(block, finalizing_entry, abandoned_forks); + if update_meta { + tx.update_meta( + self.best_finalized_entry.as_ref(), + &self.unfinalized, + &operation, + ); + } + Ok(Some(operation)) + } + + /// When transaction is committed. + pub fn on_transaction_commit(&mut self, op: CommitOperation) { + match op { + CommitOperation::AppendNewBlock(index, best_block) => { + let mut fork = self.unfinalized.get_mut(index).expect( + "ListCache is a crate-private type; internal clients of ListCache are committing transaction while cache is locked; - CommitOperation holds valid references while cache is locked; qed"); - fork.best_block = Some(best_block); - }, - CommitOperation::AppendNewEntry(index, entry) => { - let mut fork = self.unfinalized.get_mut(index) - .expect("ListCache is a crate-private type; + CommitOperation holds valid references while cache is locked; qed", + ); + fork.best_block = Some(best_block); + } + CommitOperation::AppendNewEntry(index, entry) => { + let mut fork = self.unfinalized.get_mut(index).expect( + "ListCache is a crate-private type; internal clients of ListCache are committing transaction while cache is locked; - CommitOperation holds valid references while cache is locked; qed"); - fork.best_block = Some(entry.valid_from.clone()); - fork.head = entry; - }, - CommitOperation::AddNewFork(entry) => { - self.unfinalized.push(Fork { - best_block: Some(entry.valid_from.clone()), - head: entry, - }); - }, - CommitOperation::BlockFinalized(block, finalizing_entry, forks) => { - self.best_finalized_block = block; - if let Some(finalizing_entry) = finalizing_entry { - self.best_finalized_entry = Some(finalizing_entry); - } - for fork_index in forks.iter().rev() { - self.unfinalized.remove(*fork_index); - } - }, - } - } - - /// Prune old finalized entries. - fn prune_finalized_entries>( - &self, - tx: &mut Tx, - block: &ComplexBlockId - ) { - let mut do_pruning = || -> ClientResult<()> { - // calculate last ancient block number - let ancient_block = match block.number.as_().checked_sub(self.prune_depth.as_()) { - Some(number) => match self.storage.read_id(As::sa(number))? { - Some(hash) => ComplexBlockId::new(hash, As::sa(number)), - None => return Ok(()), - }, - None => return Ok(()), - }; - - // if there's an entry at this block: - // - remove reference from this entry to the previous entry - // - destroy fork starting with previous entry - let current_entry = match self.storage.read_entry(&ancient_block)? { - Some(current_entry) => current_entry, - None => return Ok(()), - }; - let first_entry_to_truncate = match current_entry.prev_valid_from { - Some(prev_valid_from) => prev_valid_from, - None => return Ok(()), - }; - - // truncate ancient entry - tx.insert_storage_entry(&ancient_block, &StorageEntry { - prev_valid_from: None, - value: current_entry.value, - }); - - // destroy 'fork' ending with previous entry - Fork { best_block: None, head: Entry { valid_from: first_entry_to_truncate, value: None } } - .destroy(&self.storage, tx, None) - }; - - if let Err(error) = do_pruning() { - warn!(target: "db", "Failed to prune ancient cache entries: {}", error); - } - } - - /// Try to destroy abandoned forks (forked before best finalized block) when block is finalized. - fn destroy_abandoned_forks>( - &self, - tx: &mut Tx, - block: &ComplexBlockId - ) -> BTreeSet { - let mut destroyed = BTreeSet::new(); - for (index, fork) in self.unfinalized.iter().enumerate() { - if fork.head.valid_from.number == block.number { - destroyed.insert(index); - if fork.head.valid_from.hash != block.hash { - if let Err(error) = fork.destroy(&self.storage, tx, Some(block.number)) { - warn!(target: "db", "Failed to destroy abandoned unfinalized cache fork: {}", error); - } - } - } - } - - destroyed - } - - /// Search unfinalized fork where given block belongs. - fn find_unfinalized_fork(&self, block: &ComplexBlockId) -> ClientResult>> { - for unfinalized in &self.unfinalized { - if unfinalized.matches(&self.storage, block)? { - return Ok(Some(&unfinalized)); - } - } - - Ok(None) - } + CommitOperation holds valid references while cache is locked; qed", + ); + fork.best_block = Some(entry.valid_from.clone()); + fork.head = entry; + } + CommitOperation::AddNewFork(entry) => { + self.unfinalized.push(Fork { + best_block: Some(entry.valid_from.clone()), + head: entry, + }); + } + CommitOperation::BlockFinalized(block, finalizing_entry, forks) => { + self.best_finalized_block = block; + if let Some(finalizing_entry) = finalizing_entry { + self.best_finalized_entry = Some(finalizing_entry); + } + for fork_index in forks.iter().rev() { + self.unfinalized.remove(*fork_index); + } + } + } + } + + /// Prune old finalized entries. + fn prune_finalized_entries>( + &self, + tx: &mut Tx, + block: &ComplexBlockId, + ) { + let mut do_pruning = || -> ClientResult<()> { + // calculate last ancient block number + let ancient_block = match block.number.as_().checked_sub(self.prune_depth.as_()) { + Some(number) => match self.storage.read_id(As::sa(number))? { + Some(hash) => ComplexBlockId::new(hash, As::sa(number)), + None => return Ok(()), + }, + None => return Ok(()), + }; + + // if there's an entry at this block: + // - remove reference from this entry to the previous entry + // - destroy fork starting with previous entry + let current_entry = match self.storage.read_entry(&ancient_block)? { + Some(current_entry) => current_entry, + None => return Ok(()), + }; + let first_entry_to_truncate = match current_entry.prev_valid_from { + Some(prev_valid_from) => prev_valid_from, + None => return Ok(()), + }; + + // truncate ancient entry + tx.insert_storage_entry( + &ancient_block, + &StorageEntry { + prev_valid_from: None, + value: current_entry.value, + }, + ); + + // destroy 'fork' ending with previous entry + Fork { + best_block: None, + head: Entry { + valid_from: first_entry_to_truncate, + value: None, + }, + } + .destroy(&self.storage, tx, None) + }; + + if let Err(error) = do_pruning() { + warn!(target: "db", "Failed to prune ancient cache entries: {}", error); + } + } + + /// Try to destroy abandoned forks (forked before best finalized block) when block is finalized. + fn destroy_abandoned_forks>( + &self, + tx: &mut Tx, + block: &ComplexBlockId, + ) -> BTreeSet { + let mut destroyed = BTreeSet::new(); + for (index, fork) in self.unfinalized.iter().enumerate() { + if fork.head.valid_from.number == block.number { + destroyed.insert(index); + if fork.head.valid_from.hash != block.hash { + if let Err(error) = fork.destroy(&self.storage, tx, Some(block.number)) { + warn!(target: "db", "Failed to destroy abandoned unfinalized cache fork: {}", error); + } + } + } + } + + destroyed + } + + /// Search unfinalized fork where given block belongs. + fn find_unfinalized_fork( + &self, + block: &ComplexBlockId, + ) -> ClientResult>> { + for unfinalized in &self.unfinalized { + if unfinalized.matches(&self.storage, block)? { + return Ok(Some(&unfinalized)); + } + } + + Ok(None) + } } impl Fork { - /// Get reference to the head entry of this fork. - pub fn head(&self) -> &Entry { - &self.head - } - - /// Check if the block is the part of the fork. - pub fn matches>( - &self, - storage: &S, - block: &ComplexBlockId, - ) -> ClientResult { - let range = self.head.search_best_range_before(storage, block.number)?; - match range { - None => Ok(false), - Some((begin, end)) => chain::is_connected_to_range(storage, block, (&begin, end.as_ref())), - } - } - - /// Try to append NEW block to the fork. This method willonly 'work' (return true) when block - /// is actually appended to the fork AND the best known block of the fork is known (i.e. some - /// block has been already appended to this fork after last restart). - pub fn try_append(&self, parent: &ComplexBlockId) -> bool { - // when the best block of the fork is known, the check is trivial - // - // most of calls will hopefully end here, because best_block is only unknown - // after restart and until new block is appended to the fork - self.best_block.as_ref() == Some(parent) - } - - /// Try to append new block to the fork OR fork it. - pub fn try_append_or_fork>( - &self, - storage: &S, - parent: &ComplexBlockId, - best_finalized_entry_block: Option>, - ) -> ClientResult>> { - // try to find entries that are (possibly) surrounding the parent block - let range = self.head.search_best_range_before(storage, parent.number)?; - let begin = match range { - Some((begin, _)) => begin, - None => return Ok(None), - }; - - // check if the parent is connected to the beginning of the range - if !chain::is_connected_to_block(storage, &parent, &begin)? { - return Ok(None); - } - - // the block is connected to the begin-entry. If begin is the head entry - // => we need to append new block to the fork - if begin == self.head.valid_from { - return Ok(Some(ForkAppendResult::Append)); - } - - // the parent block belongs to this fork AND it is located after last finalized entry - // => we need to make a new fork - if best_finalized_entry_block.map(|f| begin.number > f).unwrap_or(true) { - return Ok(Some(ForkAppendResult::Fork(begin))); - } - - Ok(None) - } - - /// Destroy fork by deleting all unfinalized entries. - pub fn destroy, Tx: StorageTransaction>( - &self, - storage: &S, - tx: &mut Tx, - best_finalized_block: Option>, - ) -> ClientResult<()> { - let mut current = self.head.valid_from.clone(); - loop { - // optionally: deletion stops when we found entry at finalized block - if let Some(best_finalized_block) = best_finalized_block { - if chain::is_finalized_block(storage, ¤t, best_finalized_block)? { - return Ok(()); - } - } - - // read pointer to previous entry - let entry = storage.require_entry(¤t)?; - tx.remove_storage_entry(¤t); - - // deletion stops when there are no more entries in the list - current = match entry.prev_valid_from { - Some(prev_valid_from) => prev_valid_from, - None => return Ok(()), - }; - } - } + /// Get reference to the head entry of this fork. + pub fn head(&self) -> &Entry { + &self.head + } + + /// Check if the block is the part of the fork. + pub fn matches>( + &self, + storage: &S, + block: &ComplexBlockId, + ) -> ClientResult { + let range = self.head.search_best_range_before(storage, block.number)?; + match range { + None => Ok(false), + Some((begin, end)) => { + chain::is_connected_to_range(storage, block, (&begin, end.as_ref())) + } + } + } + + /// Try to append NEW block to the fork. This method willonly 'work' (return true) when block + /// is actually appended to the fork AND the best known block of the fork is known (i.e. some + /// block has been already appended to this fork after last restart). + pub fn try_append(&self, parent: &ComplexBlockId) -> bool { + // when the best block of the fork is known, the check is trivial + // + // most of calls will hopefully end here, because best_block is only unknown + // after restart and until new block is appended to the fork + self.best_block.as_ref() == Some(parent) + } + + /// Try to append new block to the fork OR fork it. + pub fn try_append_or_fork>( + &self, + storage: &S, + parent: &ComplexBlockId, + best_finalized_entry_block: Option>, + ) -> ClientResult>> { + // try to find entries that are (possibly) surrounding the parent block + let range = self.head.search_best_range_before(storage, parent.number)?; + let begin = match range { + Some((begin, _)) => begin, + None => return Ok(None), + }; + + // check if the parent is connected to the beginning of the range + if !chain::is_connected_to_block(storage, &parent, &begin)? { + return Ok(None); + } + + // the block is connected to the begin-entry. If begin is the head entry + // => we need to append new block to the fork + if begin == self.head.valid_from { + return Ok(Some(ForkAppendResult::Append)); + } + + // the parent block belongs to this fork AND it is located after last finalized entry + // => we need to make a new fork + if best_finalized_entry_block + .map(|f| begin.number > f) + .unwrap_or(true) + { + return Ok(Some(ForkAppendResult::Fork(begin))); + } + + Ok(None) + } + + /// Destroy fork by deleting all unfinalized entries. + pub fn destroy, Tx: StorageTransaction>( + &self, + storage: &S, + tx: &mut Tx, + best_finalized_block: Option>, + ) -> ClientResult<()> { + let mut current = self.head.valid_from.clone(); + loop { + // optionally: deletion stops when we found entry at finalized block + if let Some(best_finalized_block) = best_finalized_block { + if chain::is_finalized_block(storage, ¤t, best_finalized_block)? { + return Ok(()); + } + } + + // read pointer to previous entry + let entry = storage.require_entry(¤t)?; + tx.remove_storage_entry(¤t); + + // deletion stops when there are no more entries in the list + current = match entry.prev_valid_from { + Some(prev_valid_from) => prev_valid_from, + None => return Ok(()), + }; + } + } } /// Blockchain related functions. mod chain { - use runtime_primitives::traits::Header as HeaderT; - use super::*; - - /// Is the block1 connected both ends of the range. - pub fn is_connected_to_range>( - storage: &S, - block: &ComplexBlockId, - range: (&ComplexBlockId, Option<&ComplexBlockId>), - ) -> ClientResult { - let (begin, end) = range; - Ok(is_connected_to_block(storage, block, begin)? - && match end { - Some(end) => is_connected_to_block(storage, block, end)?, - None => true, - }) - } - - /// Is the block1 directly connected (i.e. part of the same fork) to block2? - pub fn is_connected_to_block>( - storage: &S, - block1: &ComplexBlockId, - block2: &ComplexBlockId, - ) -> ClientResult { - let (begin, end) = if block1 > block2 { (block2, block1) } else { (block1, block2) }; - let mut current = storage.read_header(&end.hash)? - .ok_or_else(|| ClientErrorKind::UnknownBlock(format!("{}", end.hash)))?; - while *current.number() > begin.number { - current = storage.read_header(current.parent_hash())? - .ok_or_else(|| ClientErrorKind::UnknownBlock(format!("{}", current.parent_hash())))?; - } - - Ok(begin.hash == current.hash()) - } - - /// Returns true if the given block is finalized. - pub fn is_finalized_block>( - storage: &S, - block: &ComplexBlockId, - best_finalized_block: NumberFor, - ) -> ClientResult { - if block.number > best_finalized_block { - return Ok(false); - } - - storage.read_id(block.number) - .map(|hash| hash.as_ref() == Some(&block.hash)) - } + use super::*; + use runtime_primitives::traits::Header as HeaderT; + + /// Is the block1 connected both ends of the range. + pub fn is_connected_to_range>( + storage: &S, + block: &ComplexBlockId, + range: (&ComplexBlockId, Option<&ComplexBlockId>), + ) -> ClientResult { + let (begin, end) = range; + Ok(is_connected_to_block(storage, block, begin)? + && match end { + Some(end) => is_connected_to_block(storage, block, end)?, + None => true, + }) + } + + /// Is the block1 directly connected (i.e. part of the same fork) to block2? + pub fn is_connected_to_block>( + storage: &S, + block1: &ComplexBlockId, + block2: &ComplexBlockId, + ) -> ClientResult { + let (begin, end) = if block1 > block2 { + (block2, block1) + } else { + (block1, block2) + }; + let mut current = storage + .read_header(&end.hash)? + .ok_or_else(|| ClientErrorKind::UnknownBlock(format!("{}", end.hash)))?; + while *current.number() > begin.number { + current = storage.read_header(current.parent_hash())?.ok_or_else(|| { + ClientErrorKind::UnknownBlock(format!("{}", current.parent_hash())) + })?; + } + + Ok(begin.hash == current.hash()) + } + + /// Returns true if the given block is finalized. + pub fn is_finalized_block>( + storage: &S, + block: &ComplexBlockId, + best_finalized_block: NumberFor, + ) -> ClientResult { + if block.number > best_finalized_block { + return Ok(false); + } + + storage + .read_id(block.number) + .map(|hash| hash.as_ref() == Some(&block.hash)) + } } /// Read list cache forks at blocks IDs. fn read_forks>( - storage: &S, - meta: Metadata, + storage: &S, + meta: Metadata, ) -> ClientResult<(Option>, Vec>)> { - let finalized = match meta.finalized { - Some(finalized) => Some(storage.require_entry(&finalized)? - .into_entry(finalized)), - None => None, - }; - - let unfinalized = meta.unfinalized.into_iter() - .map(|unfinalized| storage.require_entry(&unfinalized) - .map(|storage_entry| Fork { - best_block: None, - head: storage_entry.into_entry(unfinalized), - })) - .collect::>()?; - - Ok((finalized, unfinalized)) + let finalized = match meta.finalized { + Some(finalized) => Some(storage.require_entry(&finalized)?.into_entry(finalized)), + None => None, + }; + + let unfinalized = meta + .unfinalized + .into_iter() + .map(|unfinalized| { + storage + .require_entry(&unfinalized) + .map(|storage_entry| Fork { + best_block: None, + head: storage_entry.into_entry(unfinalized), + }) + }) + .collect::>()?; + + Ok((finalized, unfinalized)) } #[cfg(test)] pub mod tests { - use test_client::runtime::H256; - use runtime_primitives::testing::{Header, Block as RawBlock, ExtrinsicWrapper}; - use runtime_primitives::traits::Header as HeaderT; - use crate::cache::list_storage::tests::{DummyStorage, FaultyStorage, DummyTransaction}; - use super::*; - - type Block = RawBlock>; - - pub fn test_id(number: u64) -> ComplexBlockId { - ComplexBlockId::new(H256::from_low_u64_be(number), number) - } - - fn correct_id(number: u64) -> ComplexBlockId { - ComplexBlockId::new(test_header(number).hash(), number) - } - - fn fork_id(fork_nonce: u64, fork_from: u64, number: u64) -> ComplexBlockId { - ComplexBlockId::new(fork_header(fork_nonce, fork_from, number).hash(), number) - } - - fn test_header(number: u64) -> Header { - Header { - parent_hash: if number == 0 { Default::default() } else { test_header(number - 1).hash() }, - number, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - } - } - - fn fork_header(fork_nonce: u64, fork_from: u64, number: u64) -> Header { - if fork_from == number { - test_header(number) - } else { - Header { - parent_hash: fork_header(fork_nonce, fork_from, number - 1).hash(), - number, - state_root: H256::from_low_u64_be(1 + fork_nonce), - extrinsics_root: Default::default(), - digest: Default::default(), - } - } - } - - #[test] - fn list_value_at_block_works() { - // when block is earlier than best finalized block AND it is not finalized - // --- 50 --- - // ----------> [100] - assert_eq!(ListCache::<_, u64, _>::new(DummyStorage::new(), 1024, test_id(100)) - .value_at_block(&test_id(50)).unwrap(), None); - // when block is earlier than best finalized block AND it is finalized AND value is empty - // [30] ---- 50 ---> [100] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(50, H256::from_low_u64_be(50)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(100) }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: None }), - 1024, test_id(100) - ).value_at_block(&test_id(50)).unwrap(), None); - // when block is earlier than best finalized block AND it is finalized AND value is some - // [30] ---- 50 ---> [100] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(50, H256::from_low_u64_be(50)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(100) }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: Some(30) }), - 1024, test_id(100) - ).value_at_block(&test_id(50)).unwrap(), Some(30)); - // when block is the best finalized block AND value is some - // ---> [100] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(100, H256::from_low_u64_be(100)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(100) }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: Some(30) }), - 1024, test_id(100) - ).value_at_block(&test_id(100)).unwrap(), Some(100)); - // when block is parallel to the best finalized block - // ---- 100 - // ---> [100] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(50, H256::from_low_u64_be(50)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(100) }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: Some(30) }), - 1024, test_id(100) - ).value_at_block(&ComplexBlockId::new(H256::from_low_u64_be(2), 100)).unwrap(), None); - - // when block is later than last finalized block AND there are no forks AND finalized value is None - // ---> [100] --- 200 - assert_eq!(ListCache::<_, u64, _>::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(50, H256::from_low_u64_be(50)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: None }), - 1024, test_id(100) - ).value_at_block(&test_id(200)).unwrap(), None); - // when block is later than last finalized block AND there are no forks AND finalized value is Some - // ---> [100] --- 200 - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(50, H256::from_low_u64_be(50)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(100) }), - 1024, test_id(100) - ).value_at_block(&test_id(200)).unwrap(), Some(100)); - - // when block is later than last finalized block AND there are no matching forks - // AND block is connected to finalized block AND finalized value is None - // --- 3 - // ---> [2] /---------> [4] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: None }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 2, 3)), - 1024, test_id(2) - ).value_at_block(&fork_id(0, 2, 3)).unwrap(), None); - // when block is later than last finalized block AND there are no matching forks - // AND block is connected to finalized block AND finalized value is Some - // --- 3 - // ---> [2] /---------> [4] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 2, 3)), - 1024, test_id(2) - ).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some(2)); - // when block is later than last finalized block AND there are no matching forks - // AND block is not connected to finalized block - // --- 2 --- 3 - // 1 /---> [2] ---------> [4] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) }) - .with_header(test_header(1)) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 1, 3)) - .with_header(fork_header(0, 1, 2)), - 1024, test_id(2) - ).value_at_block(&fork_id(0, 1, 3)).unwrap(), None); - - // when block is later than last finalized block AND it appends to unfinalized fork from the end - // AND unfinalized value is Some - // ---> [2] ---> [4] ---> 5 - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) }) - .with_header(test_header(4)) - .with_header(test_header(5)), - 1024, test_id(2) - ).value_at_block(&correct_id(5)).unwrap(), Some(4)); - // when block is later than last finalized block AND it appends to unfinalized fork from the end - // AND unfinalized value is None - // ---> [2] ---> [4] ---> 5 - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: None }) - .with_header(test_header(4)) - .with_header(test_header(5)), - 1024, test_id(2) - ).value_at_block(&correct_id(5)).unwrap(), None); - // when block is later than last finalized block AND it fits to the middle of unfinalized fork - // AND unfinalized value is Some - // ---> [2] ---> [4] ---> 5 ---> [6] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(6)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) }) - .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(4)), value: None }) - .with_header(test_header(4)) - .with_header(test_header(5)) - .with_header(test_header(6)), - 1024, test_id(2) - ).value_at_block(&correct_id(5)).unwrap(), Some(4)); - // when block is later than last finalized block AND it fits to the middle of unfinalized fork - // AND unfinalized value is None - // ---> [2] ---> [4] ---> 5 ---> [6] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(6)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: None }) - .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(4)), value: Some(4) }) - .with_header(test_header(4)) - .with_header(test_header(5)) - .with_header(test_header(6)), - 1024, test_id(2) - ).value_at_block(&correct_id(5)).unwrap(), None); - // when block is later than last finalized block AND it does not fits unfinalized fork - // AND it is connected to the finalized block AND finalized value is Some - // ---> [2] ----------> [4] - // \--- 3 - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 2, 3)), - 1024, test_id(2) - ).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some(2)); - // when block is later than last finalized block AND it does not fits unfinalized fork - // AND it is connected to the finalized block AND finalized value is Some - // ---> [2] ----------> [4] - // \--- 3 - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: None }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 2, 3)), - 1024, test_id(2) - ).value_at_block(&fork_id(0, 2, 3)).unwrap(), None); - } - - #[test] - fn list_on_block_insert_works() { - // when trying to insert block < finalized number - assert!(ListCache::new(DummyStorage::new(), 1024, test_id(100)) - .on_block_insert(&mut DummyTransaction::new(), test_id(49), test_id(50), Some(50), false).unwrap().is_none()); - // when trying to insert block @ finalized number - assert!(ListCache::new(DummyStorage::new(), 1024, test_id(100)) - .on_block_insert(&mut DummyTransaction::new(), test_id(99), test_id(100), Some(100), false).unwrap().is_none()); - - // when trying to insert non-final block AND it appends to the best block of unfinalized fork - // AND new value is the same as in the fork' best block - let mut cache = ListCache::new( - DummyStorage::new() - .with_meta(None, vec![test_id(4)]) - .with_entry(test_id(4), StorageEntry { prev_valid_from: None, value: Some(4) }), - 1024, test_id(2) - ); - cache.unfinalized[0].best_block = Some(test_id(4)); - let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(4), false).unwrap(), - Some(CommitOperation::AppendNewBlock(0, test_id(5)))); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert!(tx.updated_meta().is_none()); - // when trying to insert non-final block AND it appends to the best block of unfinalized fork - // AND new value is the same as in the fork' best block - let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(5), false).unwrap(), - Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: test_id(5), value: Some(5) }))); - assert_eq!(*tx.inserted_entries(), vec![test_id(5).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![test_id(5)] })); - - // when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork - // AND new value is the same as in the fork' best block - let cache = ListCache::new( - DummyStorage::new() - .with_meta(None, vec![correct_id(4)]) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: None, value: Some(4) }) - .with_header(test_header(4)), - 1024, test_id(2) - ); - let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(4), false).unwrap(), - Some(CommitOperation::AppendNewBlock(0, correct_id(5)))); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert!(tx.updated_meta().is_none()); - // when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork - // AND new value is the same as in the fork' best block - let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(5), false).unwrap(), - Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(5), value: Some(5) }))); - assert_eq!(*tx.inserted_entries(), vec![correct_id(5).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![correct_id(5)] })); - - // when trying to insert non-final block AND it forks unfinalized fork - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)), - 1024, correct_id(2) - ); - let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), false).unwrap(), - Some(CommitOperation::AddNewFork(Entry { valid_from: fork_id(0, 3, 4), value: Some(14) }))); - assert_eq!(*tx.inserted_entries(), vec![fork_id(0, 3, 4).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(4), fork_id(0, 3, 4)] })); - - // when trying to insert non-final block AND there are no unfinalized forks - // AND value is the same as last finalized - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }), - 1024, correct_id(2) - ); - let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), false).unwrap(), None); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert!(tx.updated_meta().is_none()); - // when trying to insert non-final block AND there are no unfinalized forks - // AND value differs from last finalized - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }), - 1024, correct_id(2) - ); - let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), false).unwrap(), - Some(CommitOperation::AddNewFork(Entry { valid_from: correct_id(3), value: Some(3) }))); - assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(3)] })); - - // when inserting finalized entry AND there are no previous finalzed entries - let cache = ListCache::new(DummyStorage::new(), 1024, correct_id(2)); - let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), true).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), Some(Entry { valid_from: correct_id(3), value: Some(3) }), Default::default()))); - assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] })); - // when inserting finalized entry AND value is the same as in previous finalized - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }), - 1024, correct_id(2) - ); - let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), true).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default()))); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert!(tx.updated_meta().is_none()); - // when inserting finalized entry AND value differs from previous finalized - let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), true).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), Some(Entry { valid_from: correct_id(3), value: Some(3) }), Default::default()))); - assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] })); - - // inserting finalized entry removes abandoned fork EVEN if new entry is not inserted - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: Some(13) }), - 1024, correct_id(2) - ); - let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), true).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect()))); - } - - #[test] - fn list_on_block_finalized_works() { - // finalization does not finalizes entry if it does not exists - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(5)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) }), - 1024, correct_id(2) - ); - let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_finalize(&mut tx, correct_id(2), correct_id(3)).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default()))); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert!(tx.updated_meta().is_none()); - // finalization finalizes entry - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(5)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) }), - 1024, correct_id(4) - ); - let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_finalize(&mut tx, correct_id(4), correct_id(5)).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(5), Some(Entry { valid_from: correct_id(5), value: Some(5) }), vec![0].into_iter().collect()))); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(5)), unfinalized: vec![] })); - // finalization removes abandoned forks - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: Some(13) }), - 1024, correct_id(2) - ); - let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_finalize(&mut tx, correct_id(2), correct_id(3)).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect()))); - } - - #[test] - fn list_transaction_commit_works() { - let mut cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(5), correct_id(6)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) }) - .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(5)), value: Some(6) }), - 1024, correct_id(2) - ); - - // when new block is appended to unfinalized fork - cache.on_transaction_commit(CommitOperation::AppendNewBlock(0, correct_id(6))); - assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(6))); - // when new entry is appnded to unfinalized fork - cache.on_transaction_commit(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(7), value: Some(7) })); - assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(7))); - assert_eq!(cache.unfinalized[0].head, Entry { valid_from: correct_id(7), value: Some(7) }); - // when new fork is added - cache.on_transaction_commit(CommitOperation::AddNewFork(Entry { valid_from: correct_id(10), value: Some(10) })); - assert_eq!(cache.unfinalized[2].best_block, Some(correct_id(10))); - assert_eq!(cache.unfinalized[2].head, Entry { valid_from: correct_id(10), value: Some(10) }); - // when block is finalized + entry is finalized + unfinalized forks are deleted - cache.on_transaction_commit(CommitOperation::BlockFinalized(correct_id(20), Some(Entry { valid_from: correct_id(20), value: Some(20) }), vec![0, 1, 2].into_iter().collect())); - assert_eq!(cache.best_finalized_block, correct_id(20)); - assert_eq!(cache.best_finalized_entry, Some(Entry { valid_from: correct_id(20), value: Some(20) })); - assert!(cache.unfinalized.is_empty()); - } - - #[test] - fn list_find_unfinalized_fork_works() { - // ----------> [3] - // --- [2] ---------> 4 ---> [5] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(None, vec![fork_id(0, 1, 3), correct_id(5)]) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: Some(13) }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: None }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(test_header(5)), - 1024, correct_id(0) - ).find_unfinalized_fork(&correct_id(4)).unwrap().unwrap().head.valid_from, correct_id(5)); - // --- [2] ---------------> [5] - // ----------> [3] ---> 4 - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)]) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: Some(13) }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: Some(correct_id(1)), value: Some(2) }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(test_header(5)) - .with_header(fork_header(0, 1, 2)) - .with_header(fork_header(0, 1, 3)) - .with_header(fork_header(0, 1, 4)), - 1024, correct_id(0) - ).find_unfinalized_fork(&fork_id(0, 1, 4)).unwrap().unwrap().head.valid_from, fork_id(0, 1, 3)); - // --- [2] ---------------> [5] - // ----------> [3] - // -----------------> 4 - assert!(ListCache::new( - DummyStorage::new() - .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)]) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: Some(13) }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: Some(correct_id(1)), value: Some(2) }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(test_header(5)) - .with_header(fork_header(0, 1, 3)) - .with_header(fork_header(0, 1, 4)) - .with_header(fork_header(1, 1, 2)) - .with_header(fork_header(1, 1, 3)) - .with_header(fork_header(1, 1, 4)), - 1024, correct_id(0) - ).find_unfinalized_fork(&fork_id(1, 1, 4)).unwrap().is_none()); - } - - #[test] - fn fork_matches_works() { - // when block is not within list range - let storage = DummyStorage::new() - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: Some(50) }); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } } - .matches(&storage, &test_id(20)).unwrap(), false); - // when block is not connected to the begin block - let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) }) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)) - .with_header(fork_header(0, 2, 4)) - .with_header(fork_header(0, 2, 3)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } } - .matches(&storage, &fork_id(0, 2, 4)).unwrap(), false); - // when block is not connected to the end block - let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) }) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)) - .with_header(fork_header(0, 3, 4)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } } - .matches(&storage, &fork_id(0, 3, 4)).unwrap(), false); - // when block is connected to the begin block AND end is open - let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: None, value: Some(100) }) - .with_header(test_header(5)) - .with_header(test_header(6)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } } - .matches(&storage, &correct_id(6)).unwrap(), true); - // when block is connected to the begin block AND to the end block - let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) }) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } } - .matches(&storage, &correct_id(4)).unwrap(), true); - } - - #[test] - fn fork_try_append_works() { - // when best block is unknown - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } } - .try_append(&test_id(100)), false); - // when best block is known but different - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } } - .try_append(&test_id(101)), false); - // when best block is known and the same - assert_eq!(Fork::<_, u64> { best_block: Some(test_id(100)), head: Entry { valid_from: test_id(100), value: None } } - .try_append(&test_id(100)), true); - } - - #[test] - fn fork_try_append_or_fork_works() { - // when there's no entry before parent - let storage = DummyStorage::new() - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: Some(50) }); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } } - .try_append_or_fork(&storage, &test_id(30), None).unwrap(), None); - // when parent does not belong to the fork - let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) }) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)) - .with_header(fork_header(0, 2, 4)) - .with_header(fork_header(0, 2, 3)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } } - .try_append_or_fork(&storage, &fork_id(0, 2, 4), None).unwrap(), None); - // when the entry before parent is the head entry - let storage = DummyStorage::new() - .with_entry(ComplexBlockId::new(test_header(5).hash(), 5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) }) - .with_header(test_header(6)) - .with_header(test_header(5)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } } - .try_append_or_fork(&storage, &correct_id(6), None).unwrap(), Some(ForkAppendResult::Append)); - // when the parent located after last finalized entry - let storage = DummyStorage::new() - .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) }) - .with_header(test_header(6)) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)) - .with_header(fork_header(0, 4, 5)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(6), value: Some(100) } } - .try_append_or_fork(&storage, &fork_id(0, 4, 5), None).unwrap(), Some(ForkAppendResult::Fork(ComplexBlockId::new(test_header(3).hash(), 3)))); - // when the parent located before last finalized entry - let storage = DummyStorage::new() - .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) }) - .with_header(test_header(6)) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)) - .with_header(fork_header(0, 4, 5)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(6), value: Some(100) } } - .try_append_or_fork(&storage, &fork_id(0, 4, 5), Some(3)).unwrap(), None); - } - - #[test] - fn fork_destroy_works() { - // when we reached finalized entry without iterations - let storage = DummyStorage::new().with_id(100, H256::from_low_u64_be(100)); - let mut tx = DummyTransaction::new(); - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } } - .destroy(&storage, &mut tx, Some(200)).unwrap(); - assert!(tx.removed_entries().is_empty()); - // when we reach finalized entry with iterations - let storage = DummyStorage::new() - .with_id(10, H256::from_low_u64_be(10)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: Some(test_id(20)), value: Some(50) }) - .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: Some(20) }) - .with_entry(test_id(10), StorageEntry { prev_valid_from: Some(test_id(5)), value: Some(10) }) - .with_entry(test_id(5), StorageEntry { prev_valid_from: Some(test_id(3)), value: Some(5) }) - .with_entry(test_id(3), StorageEntry { prev_valid_from: None, value: None }); - let mut tx = DummyTransaction::new(); - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } } - .destroy(&storage, &mut tx, Some(200)).unwrap(); - assert_eq!(*tx.removed_entries(), - vec![test_id(100).hash, test_id(50).hash, test_id(20).hash].into_iter().collect()); - // when we reach beginning of fork before finalized block - let storage = DummyStorage::new() - .with_id(10, H256::from_low_u64_be(10)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: Some(50) }); - let mut tx = DummyTransaction::new(); - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } } - .destroy(&storage, &mut tx, Some(200)).unwrap(); - assert_eq!(*tx.removed_entries(), - vec![test_id(100).hash, test_id(50).hash].into_iter().collect()); - } - - #[test] - fn is_connected_to_block_fails() { - // when storage returns error - assert!(chain::is_connected_to_block::<_, u64, _>(&FaultyStorage, &test_id(1), &test_id(100)).is_err()); - // when there's no header in the storage - assert!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new(), &test_id(1), &test_id(100)).is_err()); - } - - #[test] - fn is_connected_to_block_works() { - // when without iterations we end up with different block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(1)), - &test_id(1), &correct_id(1)).unwrap(), false); - // when with ASC iterations we end up with different block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - &test_id(0), &correct_id(2)).unwrap(), false); - // when with DESC iterations we end up with different block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - &correct_id(2), &test_id(0)).unwrap(), false); - // when without iterations we end up with the same block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(1)), - &correct_id(1), &correct_id(1)).unwrap(), true); - // when with ASC iterations we end up with the same block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - &correct_id(0), &correct_id(2)).unwrap(), true); - // when with DESC iterations we end up with the same block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - &correct_id(2), &correct_id(0)).unwrap(), true); - } - - #[test] - fn is_finalized_block_fails() { - // when storage returns error - assert!(chain::is_finalized_block::<_, u64, _>(&FaultyStorage, &test_id(1), 100).is_err()); - - } - - #[test] - fn is_finalized_block_works() { - // when number of block is larger than last finalized block - assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(100), 1).unwrap(), false); - // when there's no hash for this block number in the database - assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(1), 100).unwrap(), false); - // when there's different hash for this block number in the database - assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new() - .with_id(1, H256::from_low_u64_be(2)), &test_id(1), 100).unwrap(), false); - // when there's the same hash for this block number in the database - assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new() - .with_id(1, H256::from_low_u64_be(1)), &test_id(1), 100).unwrap(), true); - } - - #[test] - fn read_forks_fails() { - // when storage returns error during finalized entry read - assert!(read_forks::(&FaultyStorage, Metadata { - finalized: Some(test_id(1)), - unfinalized: vec![], - }).is_err()); - // when storage returns error during unfinalized entry read - assert!(read_forks::(&FaultyStorage, Metadata { - finalized: None, - unfinalized: vec![test_id(1)], - }).is_err()); - // when finalized entry is not found - assert!(read_forks::(&DummyStorage::new(), Metadata { - finalized: Some(test_id(1)), - unfinalized: vec![], - }).is_err()); - // when unfinalized entry is not found - assert!(read_forks::(&DummyStorage::new(), Metadata { - finalized: None, - unfinalized: vec![test_id(1)], - }).is_err()); - } - - #[test] - fn read_forks_works() { - let storage = DummyStorage::new() - .with_entry(test_id(10), StorageEntry { prev_valid_from: Some(test_id(1)), value: Some(11) }) - .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(2)), value: None }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: Some(33) }); - let expected = ( - Some(Entry { valid_from: test_id(10), value: Some(11) }), - vec![ - Fork { best_block: None, head: Entry { valid_from: test_id(20), value: None } }, - Fork { best_block: None, head: Entry { valid_from: test_id(30), value: Some(33) } }, - ], - ); - - assert_eq!(expected, read_forks(&storage, Metadata { - finalized: Some(test_id(10)), - unfinalized: vec![test_id(20), test_id(30)], - }).unwrap()); - } - - #[test] - fn ancient_entries_are_pruned() { - let cache = ListCache::new(DummyStorage::new() - .with_id(10, H256::from_low_u64_be(10)) - .with_id(20, H256::from_low_u64_be(20)) - .with_id(30, H256::from_low_u64_be(30)) - .with_entry(test_id(10), StorageEntry { prev_valid_from: None, value: Some(10) }) - .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: Some(20) }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: Some(test_id(20)), value: Some(30) }), - 10, test_id(9)); - let mut tx = DummyTransaction::new(); - - // when finalizing entry #10: no entries pruned - cache.prune_finalized_entries(&mut tx, &test_id(10)); - assert!(tx.removed_entries().is_empty()); - assert!(tx.inserted_entries().is_empty()); - // when finalizing entry #19: no entries pruned - cache.prune_finalized_entries(&mut tx, &test_id(19)); - assert!(tx.removed_entries().is_empty()); - assert!(tx.inserted_entries().is_empty()); - // when finalizing entry #20: no entries pruned - cache.prune_finalized_entries(&mut tx, &test_id(20)); - assert!(tx.removed_entries().is_empty()); - assert!(tx.inserted_entries().is_empty()); - // when finalizing entry #30: entry 10 pruned + entry 20 is truncated - cache.prune_finalized_entries(&mut tx, &test_id(30)); - assert_eq!(*tx.removed_entries(), vec![test_id(10).hash].into_iter().collect()); - assert_eq!(*tx.inserted_entries(), vec![test_id(20).hash].into_iter().collect()); - } + use super::*; + use crate::cache::list_storage::tests::{DummyStorage, DummyTransaction, FaultyStorage}; + use runtime_primitives::testing::{Block as RawBlock, ExtrinsicWrapper, Header}; + use runtime_primitives::traits::Header as HeaderT; + use test_client::runtime::H256; + + type Block = RawBlock>; + + pub fn test_id(number: u64) -> ComplexBlockId { + ComplexBlockId::new(H256::from_low_u64_be(number), number) + } + + fn correct_id(number: u64) -> ComplexBlockId { + ComplexBlockId::new(test_header(number).hash(), number) + } + + fn fork_id(fork_nonce: u64, fork_from: u64, number: u64) -> ComplexBlockId { + ComplexBlockId::new(fork_header(fork_nonce, fork_from, number).hash(), number) + } + + fn test_header(number: u64) -> Header { + Header { + parent_hash: if number == 0 { + Default::default() + } else { + test_header(number - 1).hash() + }, + number, + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: Default::default(), + } + } + + fn fork_header(fork_nonce: u64, fork_from: u64, number: u64) -> Header { + if fork_from == number { + test_header(number) + } else { + Header { + parent_hash: fork_header(fork_nonce, fork_from, number - 1).hash(), + number, + state_root: H256::from_low_u64_be(1 + fork_nonce), + extrinsics_root: Default::default(), + digest: Default::default(), + } + } + } + + #[test] + fn list_value_at_block_works() { + // when block is earlier than best finalized block AND it is not finalized + // --- 50 --- + // ----------> [100] + assert_eq!( + ListCache::<_, u64, _>::new(DummyStorage::new(), 1024, test_id(100)) + .value_at_block(&test_id(50)) + .unwrap(), + None + ); + // when block is earlier than best finalized block AND it is finalized AND value is empty + // [30] ---- 50 ---> [100] + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(test_id(100)), Vec::new()) + .with_id(50, H256::from_low_u64_be(50)) + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(30)), + value: Some(100) + } + ) + .with_entry( + test_id(30), + StorageEntry { + prev_valid_from: None, + value: None + } + ), + 1024, + test_id(100) + ) + .value_at_block(&test_id(50)) + .unwrap(), + None + ); + // when block is earlier than best finalized block AND it is finalized AND value is some + // [30] ---- 50 ---> [100] + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(test_id(100)), Vec::new()) + .with_id(50, H256::from_low_u64_be(50)) + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(30)), + value: Some(100) + } + ) + .with_entry( + test_id(30), + StorageEntry { + prev_valid_from: None, + value: Some(30) + } + ), + 1024, + test_id(100) + ) + .value_at_block(&test_id(50)) + .unwrap(), + Some(30) + ); + // when block is the best finalized block AND value is some + // ---> [100] + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(test_id(100)), Vec::new()) + .with_id(100, H256::from_low_u64_be(100)) + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(30)), + value: Some(100) + } + ) + .with_entry( + test_id(30), + StorageEntry { + prev_valid_from: None, + value: Some(30) + } + ), + 1024, + test_id(100) + ) + .value_at_block(&test_id(100)) + .unwrap(), + Some(100) + ); + // when block is parallel to the best finalized block + // ---- 100 + // ---> [100] + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(test_id(100)), Vec::new()) + .with_id(50, H256::from_low_u64_be(50)) + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(30)), + value: Some(100) + } + ) + .with_entry( + test_id(30), + StorageEntry { + prev_valid_from: None, + value: Some(30) + } + ), + 1024, + test_id(100) + ) + .value_at_block(&ComplexBlockId::new(H256::from_low_u64_be(2), 100)) + .unwrap(), + None + ); + + // when block is later than last finalized block AND there are no forks AND finalized value is None + // ---> [100] --- 200 + assert_eq!( + ListCache::<_, u64, _>::new( + DummyStorage::new() + .with_meta(Some(test_id(100)), Vec::new()) + .with_id(50, H256::from_low_u64_be(50)) + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(30)), + value: None + } + ), + 1024, + test_id(100) + ) + .value_at_block(&test_id(200)) + .unwrap(), + None + ); + // when block is later than last finalized block AND there are no forks AND finalized value is Some + // ---> [100] --- 200 + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(test_id(100)), Vec::new()) + .with_id(50, H256::from_low_u64_be(50)) + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(30)), + value: Some(100) + } + ), + 1024, + test_id(100) + ) + .value_at_block(&test_id(200)) + .unwrap(), + Some(100) + ); + + // when block is later than last finalized block AND there are no matching forks + // AND block is connected to finalized block AND finalized value is None + // --- 3 + // ---> [2] /---------> [4] + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: None + } + ) + .with_entry( + correct_id(4), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: Some(4) + } + ) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(fork_header(0, 2, 3)), + 1024, + test_id(2) + ) + .value_at_block(&fork_id(0, 2, 3)) + .unwrap(), + None + ); + // when block is later than last finalized block AND there are no matching forks + // AND block is connected to finalized block AND finalized value is Some + // --- 3 + // ---> [2] /---------> [4] + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: Some(2) + } + ) + .with_entry( + correct_id(4), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: Some(4) + } + ) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(fork_header(0, 2, 3)), + 1024, + test_id(2) + ) + .value_at_block(&fork_id(0, 2, 3)) + .unwrap(), + Some(2) + ); + // when block is later than last finalized block AND there are no matching forks + // AND block is not connected to finalized block + // --- 2 --- 3 + // 1 /---> [2] ---------> [4] + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: Some(2) + } + ) + .with_entry( + correct_id(4), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: Some(4) + } + ) + .with_header(test_header(1)) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(fork_header(0, 1, 3)) + .with_header(fork_header(0, 1, 2)), + 1024, + test_id(2) + ) + .value_at_block(&fork_id(0, 1, 3)) + .unwrap(), + None + ); + + // when block is later than last finalized block AND it appends to unfinalized fork from the end + // AND unfinalized value is Some + // ---> [2] ---> [4] ---> 5 + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: Some(2) + } + ) + .with_entry( + correct_id(4), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: Some(4) + } + ) + .with_header(test_header(4)) + .with_header(test_header(5)), + 1024, + test_id(2) + ) + .value_at_block(&correct_id(5)) + .unwrap(), + Some(4) + ); + // when block is later than last finalized block AND it appends to unfinalized fork from the end + // AND unfinalized value is None + // ---> [2] ---> [4] ---> 5 + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: Some(2) + } + ) + .with_entry( + correct_id(4), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: None + } + ) + .with_header(test_header(4)) + .with_header(test_header(5)), + 1024, + test_id(2) + ) + .value_at_block(&correct_id(5)) + .unwrap(), + None + ); + // when block is later than last finalized block AND it fits to the middle of unfinalized fork + // AND unfinalized value is Some + // ---> [2] ---> [4] ---> 5 ---> [6] + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(6)]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: Some(2) + } + ) + .with_entry( + correct_id(4), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: Some(4) + } + ) + .with_entry( + correct_id(6), + StorageEntry { + prev_valid_from: Some(correct_id(4)), + value: None + } + ) + .with_header(test_header(4)) + .with_header(test_header(5)) + .with_header(test_header(6)), + 1024, + test_id(2) + ) + .value_at_block(&correct_id(5)) + .unwrap(), + Some(4) + ); + // when block is later than last finalized block AND it fits to the middle of unfinalized fork + // AND unfinalized value is None + // ---> [2] ---> [4] ---> 5 ---> [6] + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(6)]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: Some(2) + } + ) + .with_entry( + correct_id(4), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: None + } + ) + .with_entry( + correct_id(6), + StorageEntry { + prev_valid_from: Some(correct_id(4)), + value: Some(4) + } + ) + .with_header(test_header(4)) + .with_header(test_header(5)) + .with_header(test_header(6)), + 1024, + test_id(2) + ) + .value_at_block(&correct_id(5)) + .unwrap(), + None + ); + // when block is later than last finalized block AND it does not fits unfinalized fork + // AND it is connected to the finalized block AND finalized value is Some + // ---> [2] ----------> [4] + // \--- 3 + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry( + correct_id(4), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: Some(4) + } + ) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: Some(2) + } + ) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(fork_header(0, 2, 3)), + 1024, + test_id(2) + ) + .value_at_block(&fork_id(0, 2, 3)) + .unwrap(), + Some(2) + ); + // when block is later than last finalized block AND it does not fits unfinalized fork + // AND it is connected to the finalized block AND finalized value is Some + // ---> [2] ----------> [4] + // \--- 3 + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry( + correct_id(4), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: Some(4) + } + ) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: None + } + ) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(fork_header(0, 2, 3)), + 1024, + test_id(2) + ) + .value_at_block(&fork_id(0, 2, 3)) + .unwrap(), + None + ); + } + + #[test] + fn list_on_block_insert_works() { + // when trying to insert block < finalized number + assert!(ListCache::new(DummyStorage::new(), 1024, test_id(100)) + .on_block_insert( + &mut DummyTransaction::new(), + test_id(49), + test_id(50), + Some(50), + false + ) + .unwrap() + .is_none()); + // when trying to insert block @ finalized number + assert!(ListCache::new(DummyStorage::new(), 1024, test_id(100)) + .on_block_insert( + &mut DummyTransaction::new(), + test_id(99), + test_id(100), + Some(100), + false + ) + .unwrap() + .is_none()); + + // when trying to insert non-final block AND it appends to the best block of unfinalized fork + // AND new value is the same as in the fork' best block + let mut cache = ListCache::new( + DummyStorage::new() + .with_meta(None, vec![test_id(4)]) + .with_entry( + test_id(4), + StorageEntry { + prev_valid_from: None, + value: Some(4), + }, + ), + 1024, + test_id(2), + ); + cache.unfinalized[0].best_block = Some(test_id(4)); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .on_block_insert(&mut tx, test_id(4), test_id(5), Some(4), false) + .unwrap(), + Some(CommitOperation::AppendNewBlock(0, test_id(5))) + ); + assert!(tx.inserted_entries().is_empty()); + assert!(tx.removed_entries().is_empty()); + assert!(tx.updated_meta().is_none()); + // when trying to insert non-final block AND it appends to the best block of unfinalized fork + // AND new value is the same as in the fork' best block + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .on_block_insert(&mut tx, test_id(4), test_id(5), Some(5), false) + .unwrap(), + Some(CommitOperation::AppendNewEntry( + 0, + Entry { + valid_from: test_id(5), + value: Some(5) + } + )) + ); + assert_eq!( + *tx.inserted_entries(), + vec![test_id(5).hash].into_iter().collect() + ); + assert!(tx.removed_entries().is_empty()); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { + finalized: None, + unfinalized: vec![test_id(5)] + }) + ); + + // when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork + // AND new value is the same as in the fork' best block + let cache = ListCache::new( + DummyStorage::new() + .with_meta(None, vec![correct_id(4)]) + .with_entry( + correct_id(4), + StorageEntry { + prev_valid_from: None, + value: Some(4), + }, + ) + .with_header(test_header(4)), + 1024, + test_id(2), + ); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(4), false) + .unwrap(), + Some(CommitOperation::AppendNewBlock(0, correct_id(5))) + ); + assert!(tx.inserted_entries().is_empty()); + assert!(tx.removed_entries().is_empty()); + assert!(tx.updated_meta().is_none()); + // when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork + // AND new value is the same as in the fork' best block + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(5), false) + .unwrap(), + Some(CommitOperation::AppendNewEntry( + 0, + Entry { + valid_from: correct_id(5), + value: Some(5) + } + )) + ); + assert_eq!( + *tx.inserted_entries(), + vec![correct_id(5).hash].into_iter().collect() + ); + assert!(tx.removed_entries().is_empty()); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { + finalized: None, + unfinalized: vec![correct_id(5)] + }) + ); + + // when trying to insert non-final block AND it forks unfinalized fork + let cache = ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry( + correct_id(4), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: Some(4), + }, + ) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: Some(2), + }, + ) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)), + 1024, + correct_id(2), + ); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), false) + .unwrap(), + Some(CommitOperation::AddNewFork(Entry { + valid_from: fork_id(0, 3, 4), + value: Some(14) + })) + ); + assert_eq!( + *tx.inserted_entries(), + vec![fork_id(0, 3, 4).hash].into_iter().collect() + ); + assert!(tx.removed_entries().is_empty()); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { + finalized: Some(correct_id(2)), + unfinalized: vec![correct_id(4), fork_id(0, 3, 4)] + }) + ); + + // when trying to insert non-final block AND there are no unfinalized forks + // AND value is the same as last finalized + let cache = ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: Some(2), + }, + ), + 1024, + correct_id(2), + ); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), false) + .unwrap(), + None + ); + assert!(tx.inserted_entries().is_empty()); + assert!(tx.removed_entries().is_empty()); + assert!(tx.updated_meta().is_none()); + // when trying to insert non-final block AND there are no unfinalized forks + // AND value differs from last finalized + let cache = ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: Some(2), + }, + ), + 1024, + correct_id(2), + ); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), false) + .unwrap(), + Some(CommitOperation::AddNewFork(Entry { + valid_from: correct_id(3), + value: Some(3) + })) + ); + assert_eq!( + *tx.inserted_entries(), + vec![correct_id(3).hash].into_iter().collect() + ); + assert!(tx.removed_entries().is_empty()); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { + finalized: Some(correct_id(2)), + unfinalized: vec![correct_id(3)] + }) + ); + + // when inserting finalized entry AND there are no previous finalzed entries + let cache = ListCache::new(DummyStorage::new(), 1024, correct_id(2)); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), true) + .unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(3), + Some(Entry { + valid_from: correct_id(3), + value: Some(3) + }), + Default::default() + )) + ); + assert_eq!( + *tx.inserted_entries(), + vec![correct_id(3).hash].into_iter().collect() + ); + assert!(tx.removed_entries().is_empty()); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { + finalized: Some(correct_id(3)), + unfinalized: vec![] + }) + ); + // when inserting finalized entry AND value is the same as in previous finalized + let cache = ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: Some(2), + }, + ), + 1024, + correct_id(2), + ); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), true) + .unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(3), + None, + Default::default() + )) + ); + assert!(tx.inserted_entries().is_empty()); + assert!(tx.removed_entries().is_empty()); + assert!(tx.updated_meta().is_none()); + // when inserting finalized entry AND value differs from previous finalized + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), true) + .unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(3), + Some(Entry { + valid_from: correct_id(3), + value: Some(3) + }), + Default::default() + )) + ); + assert_eq!( + *tx.inserted_entries(), + vec![correct_id(3).hash].into_iter().collect() + ); + assert!(tx.removed_entries().is_empty()); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { + finalized: Some(correct_id(3)), + unfinalized: vec![] + }) + ); + + // inserting finalized entry removes abandoned fork EVEN if new entry is not inserted + let cache = ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: Some(2), + }, + ) + .with_entry( + fork_id(0, 1, 3), + StorageEntry { + prev_valid_from: None, + value: Some(13), + }, + ), + 1024, + correct_id(2), + ); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), true) + .unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(3), + None, + vec![0].into_iter().collect() + )) + ); + } + + #[test] + fn list_on_block_finalized_works() { + // finalization does not finalizes entry if it does not exists + let cache = ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(5)]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: Some(2), + }, + ) + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: Some(5), + }, + ), + 1024, + correct_id(2), + ); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .on_block_finalize(&mut tx, correct_id(2), correct_id(3)) + .unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(3), + None, + Default::default() + )) + ); + assert!(tx.inserted_entries().is_empty()); + assert!(tx.removed_entries().is_empty()); + assert!(tx.updated_meta().is_none()); + // finalization finalizes entry + let cache = ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(5)]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: Some(2), + }, + ) + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: Some(5), + }, + ), + 1024, + correct_id(4), + ); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .on_block_finalize(&mut tx, correct_id(4), correct_id(5)) + .unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(5), + Some(Entry { + valid_from: correct_id(5), + value: Some(5) + }), + vec![0].into_iter().collect() + )) + ); + assert!(tx.inserted_entries().is_empty()); + assert!(tx.removed_entries().is_empty()); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { + finalized: Some(correct_id(5)), + unfinalized: vec![] + }) + ); + // finalization removes abandoned forks + let cache = ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: Some(2), + }, + ) + .with_entry( + fork_id(0, 1, 3), + StorageEntry { + prev_valid_from: None, + value: Some(13), + }, + ), + 1024, + correct_id(2), + ); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .on_block_finalize(&mut tx, correct_id(2), correct_id(3)) + .unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(3), + None, + vec![0].into_iter().collect() + )) + ); + } + + #[test] + fn list_transaction_commit_works() { + let mut cache = ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(5), correct_id(6)]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: Some(2), + }, + ) + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: Some(5), + }, + ) + .with_entry( + correct_id(6), + StorageEntry { + prev_valid_from: Some(correct_id(5)), + value: Some(6), + }, + ), + 1024, + correct_id(2), + ); + + // when new block is appended to unfinalized fork + cache.on_transaction_commit(CommitOperation::AppendNewBlock(0, correct_id(6))); + assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(6))); + // when new entry is appnded to unfinalized fork + cache.on_transaction_commit(CommitOperation::AppendNewEntry( + 0, + Entry { + valid_from: correct_id(7), + value: Some(7), + }, + )); + assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(7))); + assert_eq!( + cache.unfinalized[0].head, + Entry { + valid_from: correct_id(7), + value: Some(7) + } + ); + // when new fork is added + cache.on_transaction_commit(CommitOperation::AddNewFork(Entry { + valid_from: correct_id(10), + value: Some(10), + })); + assert_eq!(cache.unfinalized[2].best_block, Some(correct_id(10))); + assert_eq!( + cache.unfinalized[2].head, + Entry { + valid_from: correct_id(10), + value: Some(10) + } + ); + // when block is finalized + entry is finalized + unfinalized forks are deleted + cache.on_transaction_commit(CommitOperation::BlockFinalized( + correct_id(20), + Some(Entry { + valid_from: correct_id(20), + value: Some(20), + }), + vec![0, 1, 2].into_iter().collect(), + )); + assert_eq!(cache.best_finalized_block, correct_id(20)); + assert_eq!( + cache.best_finalized_entry, + Some(Entry { + valid_from: correct_id(20), + value: Some(20) + }) + ); + assert!(cache.unfinalized.is_empty()); + } + + #[test] + fn list_find_unfinalized_fork_works() { + // ----------> [3] + // --- [2] ---------> 4 ---> [5] + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(None, vec![fork_id(0, 1, 3), correct_id(5)]) + .with_entry( + fork_id(0, 1, 3), + StorageEntry { + prev_valid_from: Some(correct_id(1)), + value: Some(13) + } + ) + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: Some(5) + } + ) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: None + } + ) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(test_header(5)), + 1024, + correct_id(0) + ) + .find_unfinalized_fork(&correct_id(4)) + .unwrap() + .unwrap() + .head + .valid_from, + correct_id(5) + ); + // --- [2] ---------------> [5] + // ----------> [3] ---> 4 + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)]) + .with_entry( + fork_id(0, 1, 3), + StorageEntry { + prev_valid_from: Some(correct_id(1)), + value: Some(13) + } + ) + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: Some(5) + } + ) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: Some(correct_id(1)), + value: Some(2) + } + ) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(test_header(5)) + .with_header(fork_header(0, 1, 2)) + .with_header(fork_header(0, 1, 3)) + .with_header(fork_header(0, 1, 4)), + 1024, + correct_id(0) + ) + .find_unfinalized_fork(&fork_id(0, 1, 4)) + .unwrap() + .unwrap() + .head + .valid_from, + fork_id(0, 1, 3) + ); + // --- [2] ---------------> [5] + // ----------> [3] + // -----------------> 4 + assert!(ListCache::new( + DummyStorage::new() + .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)]) + .with_entry( + fork_id(0, 1, 3), + StorageEntry { + prev_valid_from: Some(correct_id(1)), + value: Some(13) + } + ) + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: Some(5) + } + ) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: Some(correct_id(1)), + value: Some(2) + } + ) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(test_header(5)) + .with_header(fork_header(0, 1, 3)) + .with_header(fork_header(0, 1, 4)) + .with_header(fork_header(1, 1, 2)) + .with_header(fork_header(1, 1, 3)) + .with_header(fork_header(1, 1, 4)), + 1024, + correct_id(0) + ) + .find_unfinalized_fork(&fork_id(1, 1, 4)) + .unwrap() + .is_none()); + } + + #[test] + fn fork_matches_works() { + // when block is not within list range + let storage = DummyStorage::new() + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(50)), + value: Some(100), + }, + ) + .with_entry( + test_id(50), + StorageEntry { + prev_valid_from: None, + value: Some(50), + }, + ); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: test_id(100), + value: None + } + } + .matches(&storage, &test_id(20)) + .unwrap(), + false + ); + // when block is not connected to the begin block + let storage = DummyStorage::new() + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(3)), + value: Some(100), + }, + ) + .with_entry( + correct_id(3), + StorageEntry { + prev_valid_from: None, + value: Some(200), + }, + ) + .with_header(test_header(5)) + .with_header(test_header(4)) + .with_header(test_header(3)) + .with_header(fork_header(0, 2, 4)) + .with_header(fork_header(0, 2, 3)); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: correct_id(5), + value: Some(100) + } + } + .matches(&storage, &fork_id(0, 2, 4)) + .unwrap(), + false + ); + // when block is not connected to the end block + let storage = DummyStorage::new() + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(3)), + value: Some(100), + }, + ) + .with_entry( + correct_id(3), + StorageEntry { + prev_valid_from: None, + value: Some(200), + }, + ) + .with_header(test_header(5)) + .with_header(test_header(4)) + .with_header(test_header(3)) + .with_header(fork_header(0, 3, 4)); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: correct_id(5), + value: Some(100) + } + } + .matches(&storage, &fork_id(0, 3, 4)) + .unwrap(), + false + ); + // when block is connected to the begin block AND end is open + let storage = DummyStorage::new() + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: None, + value: Some(100), + }, + ) + .with_header(test_header(5)) + .with_header(test_header(6)); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: correct_id(5), + value: Some(100) + } + } + .matches(&storage, &correct_id(6)) + .unwrap(), + true + ); + // when block is connected to the begin block AND to the end block + let storage = DummyStorage::new() + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(3)), + value: Some(100), + }, + ) + .with_entry( + correct_id(3), + StorageEntry { + prev_valid_from: None, + value: Some(200), + }, + ) + .with_header(test_header(5)) + .with_header(test_header(4)) + .with_header(test_header(3)); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: correct_id(5), + value: Some(100) + } + } + .matches(&storage, &correct_id(4)) + .unwrap(), + true + ); + } + + #[test] + fn fork_try_append_works() { + // when best block is unknown + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: test_id(100), + value: None + } + } + .try_append(&test_id(100)), + false + ); + // when best block is known but different + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: test_id(100), + value: None + } + } + .try_append(&test_id(101)), + false + ); + // when best block is known and the same + assert_eq!( + Fork::<_, u64> { + best_block: Some(test_id(100)), + head: Entry { + valid_from: test_id(100), + value: None + } + } + .try_append(&test_id(100)), + true + ); + } + + #[test] + fn fork_try_append_or_fork_works() { + // when there's no entry before parent + let storage = DummyStorage::new() + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(50)), + value: Some(100), + }, + ) + .with_entry( + test_id(50), + StorageEntry { + prev_valid_from: None, + value: Some(50), + }, + ); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: test_id(100), + value: None + } + } + .try_append_or_fork(&storage, &test_id(30), None) + .unwrap(), + None + ); + // when parent does not belong to the fork + let storage = DummyStorage::new() + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(3)), + value: Some(100), + }, + ) + .with_entry( + correct_id(3), + StorageEntry { + prev_valid_from: None, + value: Some(200), + }, + ) + .with_header(test_header(5)) + .with_header(test_header(4)) + .with_header(test_header(3)) + .with_header(fork_header(0, 2, 4)) + .with_header(fork_header(0, 2, 3)); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: correct_id(5), + value: Some(100) + } + } + .try_append_or_fork(&storage, &fork_id(0, 2, 4), None) + .unwrap(), + None + ); + // when the entry before parent is the head entry + let storage = DummyStorage::new() + .with_entry( + ComplexBlockId::new(test_header(5).hash(), 5), + StorageEntry { + prev_valid_from: Some(correct_id(3)), + value: Some(100), + }, + ) + .with_header(test_header(6)) + .with_header(test_header(5)); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: correct_id(5), + value: Some(100) + } + } + .try_append_or_fork(&storage, &correct_id(6), None) + .unwrap(), + Some(ForkAppendResult::Append) + ); + // when the parent located after last finalized entry + let storage = DummyStorage::new() + .with_entry( + correct_id(6), + StorageEntry { + prev_valid_from: Some(correct_id(3)), + value: Some(100), + }, + ) + .with_entry( + correct_id(3), + StorageEntry { + prev_valid_from: None, + value: Some(200), + }, + ) + .with_header(test_header(6)) + .with_header(test_header(5)) + .with_header(test_header(4)) + .with_header(test_header(3)) + .with_header(fork_header(0, 4, 5)); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: correct_id(6), + value: Some(100) + } + } + .try_append_or_fork(&storage, &fork_id(0, 4, 5), None) + .unwrap(), + Some(ForkAppendResult::Fork(ComplexBlockId::new( + test_header(3).hash(), + 3 + ))) + ); + // when the parent located before last finalized entry + let storage = DummyStorage::new() + .with_entry( + correct_id(6), + StorageEntry { + prev_valid_from: Some(correct_id(3)), + value: Some(100), + }, + ) + .with_entry( + correct_id(3), + StorageEntry { + prev_valid_from: None, + value: Some(200), + }, + ) + .with_header(test_header(6)) + .with_header(test_header(5)) + .with_header(test_header(4)) + .with_header(test_header(3)) + .with_header(fork_header(0, 4, 5)); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: correct_id(6), + value: Some(100) + } + } + .try_append_or_fork(&storage, &fork_id(0, 4, 5), Some(3)) + .unwrap(), + None + ); + } + + #[test] + fn fork_destroy_works() { + // when we reached finalized entry without iterations + let storage = DummyStorage::new().with_id(100, H256::from_low_u64_be(100)); + let mut tx = DummyTransaction::new(); + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: test_id(100), + value: None, + }, + } + .destroy(&storage, &mut tx, Some(200)) + .unwrap(); + assert!(tx.removed_entries().is_empty()); + // when we reach finalized entry with iterations + let storage = DummyStorage::new() + .with_id(10, H256::from_low_u64_be(10)) + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(50)), + value: Some(100), + }, + ) + .with_entry( + test_id(50), + StorageEntry { + prev_valid_from: Some(test_id(20)), + value: Some(50), + }, + ) + .with_entry( + test_id(20), + StorageEntry { + prev_valid_from: Some(test_id(10)), + value: Some(20), + }, + ) + .with_entry( + test_id(10), + StorageEntry { + prev_valid_from: Some(test_id(5)), + value: Some(10), + }, + ) + .with_entry( + test_id(5), + StorageEntry { + prev_valid_from: Some(test_id(3)), + value: Some(5), + }, + ) + .with_entry( + test_id(3), + StorageEntry { + prev_valid_from: None, + value: None, + }, + ); + let mut tx = DummyTransaction::new(); + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: test_id(100), + value: None, + }, + } + .destroy(&storage, &mut tx, Some(200)) + .unwrap(); + assert_eq!( + *tx.removed_entries(), + vec![test_id(100).hash, test_id(50).hash, test_id(20).hash] + .into_iter() + .collect() + ); + // when we reach beginning of fork before finalized block + let storage = DummyStorage::new() + .with_id(10, H256::from_low_u64_be(10)) + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(50)), + value: Some(100), + }, + ) + .with_entry( + test_id(50), + StorageEntry { + prev_valid_from: None, + value: Some(50), + }, + ); + let mut tx = DummyTransaction::new(); + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: test_id(100), + value: None, + }, + } + .destroy(&storage, &mut tx, Some(200)) + .unwrap(); + assert_eq!( + *tx.removed_entries(), + vec![test_id(100).hash, test_id(50).hash] + .into_iter() + .collect() + ); + } + + #[test] + fn is_connected_to_block_fails() { + // when storage returns error + assert!(chain::is_connected_to_block::<_, u64, _>( + &FaultyStorage, + &test_id(1), + &test_id(100) + ) + .is_err()); + // when there's no header in the storage + assert!(chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new(), + &test_id(1), + &test_id(100) + ) + .is_err()); + } + + #[test] + fn is_connected_to_block_works() { + // when without iterations we end up with different block + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new().with_header(test_header(1)), + &test_id(1), + &correct_id(1) + ) + .unwrap(), + false + ); + // when with ASC iterations we end up with different block + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new() + .with_header(test_header(0)) + .with_header(test_header(1)) + .with_header(test_header(2)), + &test_id(0), + &correct_id(2) + ) + .unwrap(), + false + ); + // when with DESC iterations we end up with different block + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new() + .with_header(test_header(0)) + .with_header(test_header(1)) + .with_header(test_header(2)), + &correct_id(2), + &test_id(0) + ) + .unwrap(), + false + ); + // when without iterations we end up with the same block + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new().with_header(test_header(1)), + &correct_id(1), + &correct_id(1) + ) + .unwrap(), + true + ); + // when with ASC iterations we end up with the same block + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new() + .with_header(test_header(0)) + .with_header(test_header(1)) + .with_header(test_header(2)), + &correct_id(0), + &correct_id(2) + ) + .unwrap(), + true + ); + // when with DESC iterations we end up with the same block + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new() + .with_header(test_header(0)) + .with_header(test_header(1)) + .with_header(test_header(2)), + &correct_id(2), + &correct_id(0) + ) + .unwrap(), + true + ); + } + + #[test] + fn is_finalized_block_fails() { + // when storage returns error + assert!(chain::is_finalized_block::<_, u64, _>(&FaultyStorage, &test_id(1), 100).is_err()); + } + + #[test] + fn is_finalized_block_works() { + // when number of block is larger than last finalized block + assert_eq!( + chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(100), 1).unwrap(), + false + ); + // when there's no hash for this block number in the database + assert_eq!( + chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(1), 100).unwrap(), + false + ); + // when there's different hash for this block number in the database + assert_eq!( + chain::is_finalized_block::<_, u64, _>( + &DummyStorage::new().with_id(1, H256::from_low_u64_be(2)), + &test_id(1), + 100 + ) + .unwrap(), + false + ); + // when there's the same hash for this block number in the database + assert_eq!( + chain::is_finalized_block::<_, u64, _>( + &DummyStorage::new().with_id(1, H256::from_low_u64_be(1)), + &test_id(1), + 100 + ) + .unwrap(), + true + ); + } + + #[test] + fn read_forks_fails() { + // when storage returns error during finalized entry read + assert!(read_forks::( + &FaultyStorage, + Metadata { + finalized: Some(test_id(1)), + unfinalized: vec![], + } + ) + .is_err()); + // when storage returns error during unfinalized entry read + assert!(read_forks::( + &FaultyStorage, + Metadata { + finalized: None, + unfinalized: vec![test_id(1)], + } + ) + .is_err()); + // when finalized entry is not found + assert!(read_forks::( + &DummyStorage::new(), + Metadata { + finalized: Some(test_id(1)), + unfinalized: vec![], + } + ) + .is_err()); + // when unfinalized entry is not found + assert!(read_forks::( + &DummyStorage::new(), + Metadata { + finalized: None, + unfinalized: vec![test_id(1)], + } + ) + .is_err()); + } + + #[test] + fn read_forks_works() { + let storage = DummyStorage::new() + .with_entry( + test_id(10), + StorageEntry { + prev_valid_from: Some(test_id(1)), + value: Some(11), + }, + ) + .with_entry( + test_id(20), + StorageEntry { + prev_valid_from: Some(test_id(2)), + value: None, + }, + ) + .with_entry( + test_id(30), + StorageEntry { + prev_valid_from: None, + value: Some(33), + }, + ); + let expected = ( + Some(Entry { + valid_from: test_id(10), + value: Some(11), + }), + vec![ + Fork { + best_block: None, + head: Entry { + valid_from: test_id(20), + value: None, + }, + }, + Fork { + best_block: None, + head: Entry { + valid_from: test_id(30), + value: Some(33), + }, + }, + ], + ); + + assert_eq!( + expected, + read_forks( + &storage, + Metadata { + finalized: Some(test_id(10)), + unfinalized: vec![test_id(20), test_id(30)], + } + ) + .unwrap() + ); + } + + #[test] + fn ancient_entries_are_pruned() { + let cache = ListCache::new( + DummyStorage::new() + .with_id(10, H256::from_low_u64_be(10)) + .with_id(20, H256::from_low_u64_be(20)) + .with_id(30, H256::from_low_u64_be(30)) + .with_entry( + test_id(10), + StorageEntry { + prev_valid_from: None, + value: Some(10), + }, + ) + .with_entry( + test_id(20), + StorageEntry { + prev_valid_from: Some(test_id(10)), + value: Some(20), + }, + ) + .with_entry( + test_id(30), + StorageEntry { + prev_valid_from: Some(test_id(20)), + value: Some(30), + }, + ), + 10, + test_id(9), + ); + let mut tx = DummyTransaction::new(); + + // when finalizing entry #10: no entries pruned + cache.prune_finalized_entries(&mut tx, &test_id(10)); + assert!(tx.removed_entries().is_empty()); + assert!(tx.inserted_entries().is_empty()); + // when finalizing entry #19: no entries pruned + cache.prune_finalized_entries(&mut tx, &test_id(19)); + assert!(tx.removed_entries().is_empty()); + assert!(tx.inserted_entries().is_empty()); + // when finalizing entry #20: no entries pruned + cache.prune_finalized_entries(&mut tx, &test_id(20)); + assert!(tx.removed_entries().is_empty()); + assert!(tx.inserted_entries().is_empty()); + // when finalizing entry #30: entry 10 pruned + entry 20 is truncated + cache.prune_finalized_entries(&mut tx, &test_id(30)); + assert_eq!( + *tx.removed_entries(), + vec![test_id(10).hash].into_iter().collect() + ); + assert_eq!( + *tx.inserted_entries(), + vec![test_id(20).hash].into_iter().collect() + ); + } } diff --git a/core/client/db/src/cache/list_entry.rs b/core/client/db/src/cache/list_entry.rs index 237ae9a268..68d8bc76a8 100644 --- a/core/client/db/src/cache/list_entry.rs +++ b/core/client/db/src/cache/list_entry.rs @@ -17,20 +17,20 @@ //! List-cache storage entries. use client::error::Result as ClientResult; +use parity_codec::{Decode, Encode}; use runtime_primitives::traits::{Block as BlockT, NumberFor}; -use parity_codec::{Encode, Decode}; +use crate::cache::list_storage::Storage; use crate::cache::{CacheItemT, ComplexBlockId}; -use crate::cache::list_storage::{Storage}; /// Single list-based cache entry. #[derive(Debug)] #[cfg_attr(test, derive(PartialEq))] pub struct Entry { - /// first block, when this value became actual - pub valid_from: ComplexBlockId, - /// None means that we do not know the value starting from `valid_from` block - pub value: Option, + /// first block, when this value became actual + pub valid_from: ComplexBlockId, + /// None means that we do not know the value starting from `valid_from` block + pub value: Option, } /// Internal representation of the single list-based cache entry. The entry points to the @@ -38,128 +38,254 @@ pub struct Entry { #[derive(Debug, Encode, Decode)] #[cfg_attr(test, derive(Clone, PartialEq))] pub struct StorageEntry { - /// None if valid from the beginning - pub prev_valid_from: Option>, - /// None means that we do not know the value starting from `valid_from` block - pub value: Option, + /// None if valid from the beginning + pub prev_valid_from: Option>, + /// None means that we do not know the value starting from `valid_from` block + pub value: Option, } impl Entry { - /// Returns Some if the entry should be updated with the new value. - pub fn try_update(&self, value: Option) -> Option> { - match self.value == value { - true => None, - false => Some(StorageEntry { - prev_valid_from: Some(self.valid_from.clone()), - value, - }), - } - } - - /// Wrapper that calls search_before to get range where the given block fits. - pub fn search_best_range_before>( - &self, - storage: &S, - block: NumberFor, - ) -> ClientResult, Option>)>> { - Ok(self.search_best_before(storage, block, false)? - .map(|(entry, next)| (entry.valid_from, next))) - } - - /// Searches the list, ending with THIS entry for the best entry preceeding (or at) - /// given block number. - /// If the entry is found, result is the entry and the block id of next entry (if exists). - /// NOTE that this function does not check that the passed block is actually linked to - /// the blocks it found. - pub fn search_best_before>( - &self, - storage: &S, - block: NumberFor, - require_value: bool, - ) -> ClientResult, Option>)>> { - // we're looking for the best value - let mut next = None; - let mut current = self.valid_from.clone(); - if block >= self.valid_from.number { - let value = if require_value { self.value.clone() } else { None }; - return Ok(Some((Entry { valid_from: current, value }, next))); - } - - // else - travel back in time - loop { - let entry = storage.require_entry(¤t)?; - if block >= current.number { - return Ok(Some((Entry { valid_from: current, value: entry.value }, next))); - } - - next = Some(current); - current = match entry.prev_valid_from { - Some(prev_valid_from) => prev_valid_from, - None => return Ok(None), - }; - } - } + /// Returns Some if the entry should be updated with the new value. + pub fn try_update(&self, value: Option) -> Option> { + match self.value == value { + true => None, + false => Some(StorageEntry { + prev_valid_from: Some(self.valid_from.clone()), + value, + }), + } + } + + /// Wrapper that calls search_before to get range where the given block fits. + pub fn search_best_range_before>( + &self, + storage: &S, + block: NumberFor, + ) -> ClientResult, Option>)>> { + Ok(self + .search_best_before(storage, block, false)? + .map(|(entry, next)| (entry.valid_from, next))) + } + + /// Searches the list, ending with THIS entry for the best entry preceeding (or at) + /// given block number. + /// If the entry is found, result is the entry and the block id of next entry (if exists). + /// NOTE that this function does not check that the passed block is actually linked to + /// the blocks it found. + pub fn search_best_before>( + &self, + storage: &S, + block: NumberFor, + require_value: bool, + ) -> ClientResult, Option>)>> { + // we're looking for the best value + let mut next = None; + let mut current = self.valid_from.clone(); + if block >= self.valid_from.number { + let value = if require_value { + self.value.clone() + } else { + None + }; + return Ok(Some(( + Entry { + valid_from: current, + value, + }, + next, + ))); + } + + // else - travel back in time + loop { + let entry = storage.require_entry(¤t)?; + if block >= current.number { + return Ok(Some(( + Entry { + valid_from: current, + value: entry.value, + }, + next, + ))); + } + + next = Some(current); + current = match entry.prev_valid_from { + Some(prev_valid_from) => prev_valid_from, + None => return Ok(None), + }; + } + } } impl StorageEntry { - /// Converts storage entry into an entry, valid from given block. - pub fn into_entry(self, valid_from: ComplexBlockId) -> Entry { - Entry { - valid_from, - value: self.value, - } - } + /// Converts storage entry into an entry, valid from given block. + pub fn into_entry(self, valid_from: ComplexBlockId) -> Entry { + Entry { + valid_from, + value: self.value, + } + } } #[cfg(test)] mod tests { - use crate::cache::list_cache::tests::test_id; - use crate::cache::list_storage::tests::{DummyStorage, FaultyStorage}; - use super::*; - - #[test] - fn entry_try_update_works() { - // when trying to update with the same None value - assert_eq!(Entry::<_, u64> { valid_from: test_id(1), value: None }.try_update(None), None); - // when trying to update with the same Some value - assert_eq!(Entry { valid_from: test_id(1), value: Some(1) }.try_update(Some(1)), None); - // when trying to update with different None value - assert_eq!(Entry { valid_from: test_id(1), value: Some(1) }.try_update(None), - Some(StorageEntry { prev_valid_from: Some(test_id(1)), value: None })); - // when trying to update with different Some value - assert_eq!(Entry { valid_from: test_id(1), value: Some(1) }.try_update(Some(2)), - Some(StorageEntry { prev_valid_from: Some(test_id(1)), value: Some(2) })); - } - - #[test] - fn entry_search_best_before_fails() { - // when storage returns error - assert!(Entry::<_, u64> { valid_from: test_id(100), value: None }.search_best_before(&FaultyStorage, 50, false).is_err()); - } - - #[test] - fn entry_search_best_before_works() { - // when block is better than our best block AND value is not required - assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: Some(100) } - .search_best_before(&DummyStorage::new(), 150, false).unwrap(), - Some((Entry::<_, u64> { valid_from: test_id(100), value: None }, None))); - // when block is better than our best block AND value is required - assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: Some(100) } - .search_best_before(&DummyStorage::new(), 150, true).unwrap(), - Some((Entry::<_, u64> { valid_from: test_id(100), value: Some(100) }, None))); - // when block is found between two entries - assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: Some(100) } - .search_best_before(&DummyStorage::new() - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(50) }), - 75, false).unwrap(), - Some((Entry::<_, u64> { valid_from: test_id(50), value: Some(50) }, Some(test_id(100))))); - // when block is not found - assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: Some(100) } - .search_best_before(&DummyStorage::new() - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: Some(50) }), - 30, true).unwrap(), - None); - } + use super::*; + use crate::cache::list_cache::tests::test_id; + use crate::cache::list_storage::tests::{DummyStorage, FaultyStorage}; + + #[test] + fn entry_try_update_works() { + // when trying to update with the same None value + assert_eq!( + Entry::<_, u64> { + valid_from: test_id(1), + value: None + } + .try_update(None), + None + ); + // when trying to update with the same Some value + assert_eq!( + Entry { + valid_from: test_id(1), + value: Some(1) + } + .try_update(Some(1)), + None + ); + // when trying to update with different None value + assert_eq!( + Entry { + valid_from: test_id(1), + value: Some(1) + } + .try_update(None), + Some(StorageEntry { + prev_valid_from: Some(test_id(1)), + value: None + }) + ); + // when trying to update with different Some value + assert_eq!( + Entry { + valid_from: test_id(1), + value: Some(1) + } + .try_update(Some(2)), + Some(StorageEntry { + prev_valid_from: Some(test_id(1)), + value: Some(2) + }) + ); + } + + #[test] + fn entry_search_best_before_fails() { + // when storage returns error + assert!(Entry::<_, u64> { + valid_from: test_id(100), + value: None + } + .search_best_before(&FaultyStorage, 50, false) + .is_err()); + } + + #[test] + fn entry_search_best_before_works() { + // when block is better than our best block AND value is not required + assert_eq!( + Entry::<_, u64> { + valid_from: test_id(100), + value: Some(100) + } + .search_best_before(&DummyStorage::new(), 150, false) + .unwrap(), + Some(( + Entry::<_, u64> { + valid_from: test_id(100), + value: None + }, + None + )) + ); + // when block is better than our best block AND value is required + assert_eq!( + Entry::<_, u64> { + valid_from: test_id(100), + value: Some(100) + } + .search_best_before(&DummyStorage::new(), 150, true) + .unwrap(), + Some(( + Entry::<_, u64> { + valid_from: test_id(100), + value: Some(100) + }, + None + )) + ); + // when block is found between two entries + assert_eq!( + Entry::<_, u64> { + valid_from: test_id(100), + value: Some(100) + } + .search_best_before( + &DummyStorage::new() + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(50)), + value: Some(100) + } + ) + .with_entry( + test_id(50), + StorageEntry { + prev_valid_from: Some(test_id(30)), + value: Some(50) + } + ), + 75, + false + ) + .unwrap(), + Some(( + Entry::<_, u64> { + valid_from: test_id(50), + value: Some(50) + }, + Some(test_id(100)) + )) + ); + // when block is not found + assert_eq!( + Entry::<_, u64> { + valid_from: test_id(100), + value: Some(100) + } + .search_best_before( + &DummyStorage::new() + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(50)), + value: Some(100) + } + ) + .with_entry( + test_id(50), + StorageEntry { + prev_valid_from: None, + value: Some(50) + } + ), + 30, + true + ) + .unwrap(), + None + ); + } } diff --git a/core/client/db/src/cache/list_storage.rs b/core/client/db/src/cache/list_storage.rs index 659a30507e..49a6b2f58c 100644 --- a/core/client/db/src/cache/list_storage.rs +++ b/core/client/db/src/cache/list_storage.rs @@ -18,361 +18,430 @@ use std::sync::Arc; -use kvdb::{KeyValueDB, DBTransaction}; +use kvdb::{DBTransaction, KeyValueDB}; +use crate::utils::{self, db_err, meta_keys}; use client::error::{Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult}; -use parity_codec::{Encode, Decode}; +use parity_codec::{Decode, Encode}; use runtime_primitives::generic::BlockId; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use crate::utils::{self, db_err, meta_keys}; -use crate::cache::{CacheItemT, ComplexBlockId}; use crate::cache::list_cache::{CommitOperation, Fork}; use crate::cache::list_entry::{Entry, StorageEntry}; +use crate::cache::{CacheItemT, ComplexBlockId}; /// Single list-cache metadata. #[derive(Debug)] #[cfg_attr(test, derive(Clone, PartialEq))] pub struct Metadata { - /// Block at which best finalized entry is stored. - pub finalized: Option>, - /// A set of blocks at which best unfinalized entries are stored. - pub unfinalized: Vec>, + /// Block at which best finalized entry is stored. + pub finalized: Option>, + /// A set of blocks at which best unfinalized entries are stored. + pub unfinalized: Vec>, } /// Readonly list-cache storage trait. pub trait Storage { - /// Reads hash of the block at given number. - fn read_id(&self, at: NumberFor) -> ClientResult>; - - /// Reads header of the block with given hash. - fn read_header(&self, at: &Block::Hash) -> ClientResult>; - - /// Reads cache metadata: best finalized entry (if some) and the list. - fn read_meta(&self) -> ClientResult>; - - /// Reads cache entry from the storage. - fn read_entry(&self, at: &ComplexBlockId) -> ClientResult>>; - - /// Reads referenced (and thus existing) cache entry from the storage. - fn require_entry(&self, at: &ComplexBlockId) -> ClientResult> { - self.read_entry(at) - .and_then(|entry| entry - .ok_or_else(|| ClientError::from( - ClientErrorKind::Backend(format!("Referenced cache entry at {:?} is not found", at))))) - } + /// Reads hash of the block at given number. + fn read_id(&self, at: NumberFor) -> ClientResult>; + + /// Reads header of the block with given hash. + fn read_header(&self, at: &Block::Hash) -> ClientResult>; + + /// Reads cache metadata: best finalized entry (if some) and the list. + fn read_meta(&self) -> ClientResult>; + + /// Reads cache entry from the storage. + fn read_entry( + &self, + at: &ComplexBlockId, + ) -> ClientResult>>; + + /// Reads referenced (and thus existing) cache entry from the storage. + fn require_entry(&self, at: &ComplexBlockId) -> ClientResult> { + self.read_entry(at).and_then(|entry| { + entry.ok_or_else(|| { + ClientError::from(ClientErrorKind::Backend(format!( + "Referenced cache entry at {:?} is not found", + at + ))) + }) + }) + } } /// List-cache storage transaction. pub trait StorageTransaction { - /// Insert storage entry at given block. - fn insert_storage_entry(&mut self, at: &ComplexBlockId, entry: &StorageEntry); - - /// Delete storage entry at given block. - fn remove_storage_entry(&mut self, at: &ComplexBlockId); - - /// Update metadata of the cache. - fn update_meta( - &mut self, - best_finalized_entry: Option<&Entry>, - unfinalized: &[Fork], - operation: &CommitOperation, - ); + /// Insert storage entry at given block. + fn insert_storage_entry(&mut self, at: &ComplexBlockId, entry: &StorageEntry); + + /// Delete storage entry at given block. + fn remove_storage_entry(&mut self, at: &ComplexBlockId); + + /// Update metadata of the cache. + fn update_meta( + &mut self, + best_finalized_entry: Option<&Entry>, + unfinalized: &[Fork], + operation: &CommitOperation, + ); } /// A set of columns used by the DbStorage. #[derive(Debug)] pub struct DbColumns { - /// Column holding cache meta. - pub meta: Option, - /// Column holding the mapping of { block number => block hash } for blocks of the best chain. - pub key_lookup: Option, - /// Column holding the mapping of { block hash => block header }. - pub header: Option, - /// Column holding cache entries. - pub cache: Option, + /// Column holding cache meta. + pub meta: Option, + /// Column holding the mapping of { block number => block hash } for blocks of the best chain. + pub key_lookup: Option, + /// Column holding the mapping of { block hash => block header }. + pub header: Option, + /// Column holding cache entries. + pub cache: Option, } /// Database-backed list cache storage. pub struct DbStorage { - name: Vec, - meta_key: Vec, - db: Arc, - columns: DbColumns, + name: Vec, + meta_key: Vec, + db: Arc, + columns: DbColumns, } impl DbStorage { - /// Create new database-backed list cache storage. - pub fn new(name: Vec, db: Arc, columns: DbColumns) -> Self { - let meta_key = meta::key(&name); - DbStorage { name, meta_key, db, columns } - } - - /// Get reference to the database. - pub fn db(&self) -> &Arc { &self.db } - - /// Get reference to the database columns. - pub fn columns(&self) -> &DbColumns { &self.columns } - - /// Encode block id for storing as a key in cache column. - /// We append prefix to the actual encoding to allow several caches - /// store entries in the same column. - pub fn encode_block_id(&self, block: &ComplexBlockId) -> Vec { - let mut encoded = self.name.clone(); - encoded.extend(block.hash.as_ref()); - encoded - } + /// Create new database-backed list cache storage. + pub fn new(name: Vec, db: Arc, columns: DbColumns) -> Self { + let meta_key = meta::key(&name); + DbStorage { + name, + meta_key, + db, + columns, + } + } + + /// Get reference to the database. + pub fn db(&self) -> &Arc { + &self.db + } + + /// Get reference to the database columns. + pub fn columns(&self) -> &DbColumns { + &self.columns + } + + /// Encode block id for storing as a key in cache column. + /// We append prefix to the actual encoding to allow several caches + /// store entries in the same column. + pub fn encode_block_id(&self, block: &ComplexBlockId) -> Vec { + let mut encoded = self.name.clone(); + encoded.extend(block.hash.as_ref()); + encoded + } } impl Storage for DbStorage { - fn read_id(&self, at: NumberFor) -> ClientResult> { - utils::read_header::(&*self.db, self.columns.key_lookup, self.columns.header, BlockId::Number(at)) - .map(|maybe_header| maybe_header.map(|header| header.hash())) - } - - fn read_header(&self, at: &Block::Hash) -> ClientResult> { - utils::read_header::(&*self.db, self.columns.key_lookup, self.columns.header, BlockId::Hash(*at)) - } - - fn read_meta(&self) -> ClientResult> { - self.db.get(self.columns.meta, &self.meta_key) - .map_err(db_err) - .and_then(|meta| match meta { - Some(meta) => meta::decode(&*meta), - None => Ok(Metadata { - finalized: None, - unfinalized: Vec::new(), - }), - }) - } - - fn read_entry(&self, at: &ComplexBlockId) -> ClientResult>> { - self.db.get(self.columns.cache, &self.encode_block_id(at)) - .map_err(db_err) - .and_then(|entry| match entry { - Some(entry) => StorageEntry::::decode(&mut &entry[..]) - .ok_or_else(|| ClientErrorKind::Backend("Failed to decode cache entry".into()).into()) - .map(Some), - None => Ok(None), - }) - } + fn read_id(&self, at: NumberFor) -> ClientResult> { + utils::read_header::( + &*self.db, + self.columns.key_lookup, + self.columns.header, + BlockId::Number(at), + ) + .map(|maybe_header| maybe_header.map(|header| header.hash())) + } + + fn read_header(&self, at: &Block::Hash) -> ClientResult> { + utils::read_header::( + &*self.db, + self.columns.key_lookup, + self.columns.header, + BlockId::Hash(*at), + ) + } + + fn read_meta(&self) -> ClientResult> { + self.db + .get(self.columns.meta, &self.meta_key) + .map_err(db_err) + .and_then(|meta| match meta { + Some(meta) => meta::decode(&*meta), + None => Ok(Metadata { + finalized: None, + unfinalized: Vec::new(), + }), + }) + } + + fn read_entry( + &self, + at: &ComplexBlockId, + ) -> ClientResult>> { + self.db + .get(self.columns.cache, &self.encode_block_id(at)) + .map_err(db_err) + .and_then(|entry| match entry { + Some(entry) => StorageEntry::::decode(&mut &entry[..]) + .ok_or_else(|| { + ClientErrorKind::Backend("Failed to decode cache entry".into()).into() + }) + .map(Some), + None => Ok(None), + }) + } } /// Database-backed list cache storage transaction. pub struct DbStorageTransaction<'a> { - storage: &'a DbStorage, - tx: &'a mut DBTransaction, + storage: &'a DbStorage, + tx: &'a mut DBTransaction, } impl<'a> DbStorageTransaction<'a> { - /// Create new database transaction. - pub fn new(storage: &'a DbStorage, tx: &'a mut DBTransaction) -> Self { - DbStorageTransaction { storage, tx } - } + /// Create new database transaction. + pub fn new(storage: &'a DbStorage, tx: &'a mut DBTransaction) -> Self { + DbStorageTransaction { storage, tx } + } } impl<'a, Block: BlockT, T: CacheItemT> StorageTransaction for DbStorageTransaction<'a> { - fn insert_storage_entry(&mut self, at: &ComplexBlockId, entry: &StorageEntry) { - self.tx.put(self.storage.columns.cache, &self.storage.encode_block_id(at), &entry.encode()); - } - - fn remove_storage_entry(&mut self, at: &ComplexBlockId) { - self.tx.delete(self.storage.columns.cache, &self.storage.encode_block_id(at)); - } - - fn update_meta( - &mut self, - best_finalized_entry: Option<&Entry>, - unfinalized: &[Fork], - operation: &CommitOperation, - ) { - self.tx.put( - self.storage.columns.meta, - &self.storage.meta_key, - &meta::encode(best_finalized_entry, unfinalized, operation)); - } + fn insert_storage_entry(&mut self, at: &ComplexBlockId, entry: &StorageEntry) { + self.tx.put( + self.storage.columns.cache, + &self.storage.encode_block_id(at), + &entry.encode(), + ); + } + + fn remove_storage_entry(&mut self, at: &ComplexBlockId) { + self.tx.delete( + self.storage.columns.cache, + &self.storage.encode_block_id(at), + ); + } + + fn update_meta( + &mut self, + best_finalized_entry: Option<&Entry>, + unfinalized: &[Fork], + operation: &CommitOperation, + ) { + self.tx.put( + self.storage.columns.meta, + &self.storage.meta_key, + &meta::encode(best_finalized_entry, unfinalized, operation), + ); + } } /// Metadata related functions. mod meta { - use super::*; - - /// Convert cache name into cache metadata key. - pub fn key(name: &[u8]) -> Vec { - let mut key_name = meta_keys::CACHE_META_PREFIX.to_vec(); - key_name.extend_from_slice(name); - key_name - } - - /// Encode cache metadata 'applying' commit operation before encoding. - pub fn encode( - best_finalized_entry: Option<&Entry>, - unfinalized: &[Fork], - op: &CommitOperation - ) -> Vec { - let mut finalized = best_finalized_entry.as_ref().map(|entry| &entry.valid_from); - let mut unfinalized = unfinalized.iter().map(|fork| &fork.head().valid_from).collect::>(); - - match op { - CommitOperation::AppendNewBlock(_, _) => (), - CommitOperation::AppendNewEntry(index, ref entry) => { - unfinalized[*index] = &entry.valid_from; - }, - CommitOperation::AddNewFork(ref entry) => { - unfinalized.push(&entry.valid_from); - }, - CommitOperation::BlockFinalized(_, ref finalizing_entry, ref forks) => { - finalized = finalizing_entry.as_ref().map(|entry| &entry.valid_from); - for fork_index in forks.iter().rev() { - unfinalized.remove(*fork_index); - } - }, - } - - (finalized, unfinalized).encode() - } - - /// Decode meta information. - pub fn decode(encoded: &[u8]) -> ClientResult> { - let input = &mut &*encoded; - let finalized: Option> = Decode::decode(input) - .ok_or_else(|| ClientError::from(ClientErrorKind::Backend("Error decoding cache meta".into())))?; - let unfinalized: Vec> = Decode::decode(input) - .ok_or_else(|| ClientError::from(ClientErrorKind::Backend("Error decoding cache meta".into())))?; - - Ok(Metadata { finalized, unfinalized }) - } + use super::*; + + /// Convert cache name into cache metadata key. + pub fn key(name: &[u8]) -> Vec { + let mut key_name = meta_keys::CACHE_META_PREFIX.to_vec(); + key_name.extend_from_slice(name); + key_name + } + + /// Encode cache metadata 'applying' commit operation before encoding. + pub fn encode( + best_finalized_entry: Option<&Entry>, + unfinalized: &[Fork], + op: &CommitOperation, + ) -> Vec { + let mut finalized = best_finalized_entry.as_ref().map(|entry| &entry.valid_from); + let mut unfinalized = unfinalized + .iter() + .map(|fork| &fork.head().valid_from) + .collect::>(); + + match op { + CommitOperation::AppendNewBlock(_, _) => (), + CommitOperation::AppendNewEntry(index, ref entry) => { + unfinalized[*index] = &entry.valid_from; + } + CommitOperation::AddNewFork(ref entry) => { + unfinalized.push(&entry.valid_from); + } + CommitOperation::BlockFinalized(_, ref finalizing_entry, ref forks) => { + finalized = finalizing_entry.as_ref().map(|entry| &entry.valid_from); + for fork_index in forks.iter().rev() { + unfinalized.remove(*fork_index); + } + } + } + + (finalized, unfinalized).encode() + } + + /// Decode meta information. + pub fn decode(encoded: &[u8]) -> ClientResult> { + let input = &mut &*encoded; + let finalized: Option> = Decode::decode(input).ok_or_else(|| { + ClientError::from(ClientErrorKind::Backend("Error decoding cache meta".into())) + })?; + let unfinalized: Vec> = Decode::decode(input).ok_or_else(|| { + ClientError::from(ClientErrorKind::Backend("Error decoding cache meta".into())) + })?; + + Ok(Metadata { + finalized, + unfinalized, + }) + } } #[cfg(test)] pub mod tests { - use std::collections::{HashMap, HashSet}; - use super::*; - - pub struct FaultyStorage; - - impl Storage for FaultyStorage { - fn read_id(&self, _at: NumberFor) -> ClientResult> { - Err(ClientErrorKind::Backend("TestError".into()).into()) - } - - fn read_header(&self, _at: &Block::Hash) -> ClientResult> { - Err(ClientErrorKind::Backend("TestError".into()).into()) - } - - fn read_meta(&self) -> ClientResult> { - Err(ClientErrorKind::Backend("TestError".into()).into()) - } - - fn read_entry(&self, _at: &ComplexBlockId) -> ClientResult>> { - Err(ClientErrorKind::Backend("TestError".into()).into()) - } - } - - pub struct DummyStorage { - meta: Metadata, - ids: HashMap, Block::Hash>, - headers: HashMap, - entries: HashMap>, - } - - impl DummyStorage { - pub fn new() -> Self { - DummyStorage { - meta: Metadata { - finalized: None, - unfinalized: Vec::new(), - }, - ids: HashMap::new(), - headers: HashMap::new(), - entries: HashMap::new(), - } - } - - pub fn with_meta(mut self, finalized: Option>, unfinalized: Vec>) -> Self { - self.meta.finalized = finalized; - self.meta.unfinalized = unfinalized; - self - } - - pub fn with_id(mut self, at: NumberFor, id: Block::Hash) -> Self { - self.ids.insert(at, id); - self - } - - pub fn with_header(mut self, header: Block::Header) -> Self { - self.headers.insert(header.hash(), header); - self - } - - pub fn with_entry(mut self, at: ComplexBlockId, entry: StorageEntry) -> Self { - self.entries.insert(at.hash, entry); - self - } - } - - impl Storage for DummyStorage { - fn read_id(&self, at: NumberFor) -> ClientResult> { - Ok(self.ids.get(&at).cloned()) - } - - fn read_header(&self, at: &Block::Hash) -> ClientResult> { - Ok(self.headers.get(&at).cloned()) - } - - fn read_meta(&self) -> ClientResult> { - Ok(self.meta.clone()) - } - - fn read_entry(&self, at: &ComplexBlockId) -> ClientResult>> { - Ok(self.entries.get(&at.hash).cloned()) - } - } - - pub struct DummyTransaction { - updated_meta: Option>, - inserted_entries: HashSet, - removed_entries: HashSet, - } - - impl DummyTransaction { - pub fn new() -> Self { - DummyTransaction { - updated_meta: None, - inserted_entries: HashSet::new(), - removed_entries: HashSet::new(), - } - } - - pub fn inserted_entries(&self) -> &HashSet { - &self.inserted_entries - } - - pub fn removed_entries(&self) -> &HashSet { - &self.removed_entries - } - - pub fn updated_meta(&self) -> &Option> { - &self.updated_meta - } - } - - impl StorageTransaction for DummyTransaction { - fn insert_storage_entry(&mut self, at: &ComplexBlockId, _entry: &StorageEntry) { - self.inserted_entries.insert(at.hash); - } - - fn remove_storage_entry(&mut self, at: &ComplexBlockId) { - self.removed_entries.insert(at.hash); - } - - fn update_meta( - &mut self, - best_finalized_entry: Option<&Entry>, - unfinalized: &[Fork], - operation: &CommitOperation, - ) { - self.updated_meta = Some(meta::decode(&meta::encode(best_finalized_entry, unfinalized, operation)).unwrap()); - } - } + use super::*; + use std::collections::{HashMap, HashSet}; + + pub struct FaultyStorage; + + impl Storage for FaultyStorage { + fn read_id(&self, _at: NumberFor) -> ClientResult> { + Err(ClientErrorKind::Backend("TestError".into()).into()) + } + + fn read_header(&self, _at: &Block::Hash) -> ClientResult> { + Err(ClientErrorKind::Backend("TestError".into()).into()) + } + + fn read_meta(&self) -> ClientResult> { + Err(ClientErrorKind::Backend("TestError".into()).into()) + } + + fn read_entry( + &self, + _at: &ComplexBlockId, + ) -> ClientResult>> { + Err(ClientErrorKind::Backend("TestError".into()).into()) + } + } + + pub struct DummyStorage { + meta: Metadata, + ids: HashMap, Block::Hash>, + headers: HashMap, + entries: HashMap>, + } + + impl DummyStorage { + pub fn new() -> Self { + DummyStorage { + meta: Metadata { + finalized: None, + unfinalized: Vec::new(), + }, + ids: HashMap::new(), + headers: HashMap::new(), + entries: HashMap::new(), + } + } + + pub fn with_meta( + mut self, + finalized: Option>, + unfinalized: Vec>, + ) -> Self { + self.meta.finalized = finalized; + self.meta.unfinalized = unfinalized; + self + } + + pub fn with_id(mut self, at: NumberFor, id: Block::Hash) -> Self { + self.ids.insert(at, id); + self + } + + pub fn with_header(mut self, header: Block::Header) -> Self { + self.headers.insert(header.hash(), header); + self + } + + pub fn with_entry( + mut self, + at: ComplexBlockId, + entry: StorageEntry, + ) -> Self { + self.entries.insert(at.hash, entry); + self + } + } + + impl Storage for DummyStorage { + fn read_id(&self, at: NumberFor) -> ClientResult> { + Ok(self.ids.get(&at).cloned()) + } + + fn read_header(&self, at: &Block::Hash) -> ClientResult> { + Ok(self.headers.get(&at).cloned()) + } + + fn read_meta(&self) -> ClientResult> { + Ok(self.meta.clone()) + } + + fn read_entry( + &self, + at: &ComplexBlockId, + ) -> ClientResult>> { + Ok(self.entries.get(&at.hash).cloned()) + } + } + + pub struct DummyTransaction { + updated_meta: Option>, + inserted_entries: HashSet, + removed_entries: HashSet, + } + + impl DummyTransaction { + pub fn new() -> Self { + DummyTransaction { + updated_meta: None, + inserted_entries: HashSet::new(), + removed_entries: HashSet::new(), + } + } + + pub fn inserted_entries(&self) -> &HashSet { + &self.inserted_entries + } + + pub fn removed_entries(&self) -> &HashSet { + &self.removed_entries + } + + pub fn updated_meta(&self) -> &Option> { + &self.updated_meta + } + } + + impl StorageTransaction for DummyTransaction { + fn insert_storage_entry( + &mut self, + at: &ComplexBlockId, + _entry: &StorageEntry, + ) { + self.inserted_entries.insert(at.hash); + } + + fn remove_storage_entry(&mut self, at: &ComplexBlockId) { + self.removed_entries.insert(at.hash); + } + + fn update_meta( + &mut self, + best_finalized_entry: Option<&Entry>, + unfinalized: &[Fork], + operation: &CommitOperation, + ) { + self.updated_meta = Some( + meta::decode(&meta::encode(best_finalized_entry, unfinalized, operation)).unwrap(), + ); + } + } } diff --git a/core/client/db/src/cache/mod.rs b/core/client/db/src/cache/mod.rs index b5dd45f11d..e860a70e43 100644 --- a/core/client/db/src/cache/mod.rs +++ b/core/client/db/src/cache/mod.rs @@ -16,18 +16,18 @@ //! DB-backed cache of blockchain data. -use std::{sync::Arc, collections::HashMap}; use parking_lot::RwLock; +use std::{collections::HashMap, sync::Arc}; -use kvdb::{KeyValueDB, DBTransaction}; +use kvdb::{DBTransaction, KeyValueDB}; +use crate::utils::{self, COLUMN_META}; use client::blockchain::Cache as BlockchainCache; use client::error::Result as ClientResult; -use parity_codec::{Encode, Decode}; -use runtime_primitives::generic::BlockId; -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, As}; use consensus_common::well_known_cache_keys::Id as CacheKeyId; -use crate::utils::{self, COLUMN_META}; +use parity_codec::{Decode, Encode}; +use runtime_primitives::generic::BlockId; +use runtime_primitives::traits::{As, Block as BlockT, Header as HeaderT, NumberFor}; use self::list_cache::ListCache; @@ -41,21 +41,21 @@ const PRUNE_DEPTH: u64 = 1024; /// Block identifier that holds both hash and number. #[derive(Clone, Debug, Encode, Decode, PartialEq)] pub struct ComplexBlockId { - hash: Block::Hash, - number: NumberFor, + hash: Block::Hash, + number: NumberFor, } impl ComplexBlockId { - /// Create new complex block id. - pub fn new(hash: Block::Hash, number: NumberFor) -> Self { - ComplexBlockId { hash, number } - } + /// Create new complex block id. + pub fn new(hash: Block::Hash, number: NumberFor) -> Self { + ComplexBlockId { hash, number } + } } impl ::std::cmp::PartialOrd for ComplexBlockId { - fn partial_cmp(&self, other: &ComplexBlockId) -> Option<::std::cmp::Ordering> { - self.number.partial_cmp(&other.number) - } + fn partial_cmp(&self, other: &ComplexBlockId) -> Option<::std::cmp::Ordering> { + self.number.partial_cmp(&other.number) + } } /// All cache items must implement this trait. @@ -65,220 +65,238 @@ impl CacheItemT for T where T: Clone + Decode + Encode + PartialEq {} /// Database-backed blockchain data cache. pub struct DbCache { - cache_at: HashMap, self::list_storage::DbStorage>>, - db: Arc, - key_lookup_column: Option, - header_column: Option, - authorities_column: Option, - best_finalized_block: ComplexBlockId, + cache_at: HashMap, self::list_storage::DbStorage>>, + db: Arc, + key_lookup_column: Option, + header_column: Option, + authorities_column: Option, + best_finalized_block: ComplexBlockId, } impl DbCache { - /// Create new cache. - pub fn new( - db: Arc, - key_lookup_column: Option, - header_column: Option, - authorities_column: Option, - best_finalized_block: ComplexBlockId, - ) -> Self { - Self { - cache_at: HashMap::new(), - db, - key_lookup_column, - header_column, - authorities_column, - best_finalized_block, - } - } - - /// Begin cache transaction. - pub fn transaction<'a>(&'a mut self, tx: &'a mut DBTransaction) -> DbCacheTransaction<'a, Block> { - DbCacheTransaction { - cache: self, - tx, - cache_at_op: HashMap::new(), - best_finalized_block: None, - } - } - - /// Run post-commit cache operations. - pub fn commit(&mut self, ops: DbCacheTransactionOps) { - for (name, op) in ops.cache_at_op.into_iter() { - self.get_cache(name).on_transaction_commit(op); - } - if let Some(best_finalized_block) = ops.best_finalized_block { - self.best_finalized_block = best_finalized_block; - } - } - - /// Creates `ListCache` with the given name or returns a reference to the existing. - fn get_cache(&mut self, name: CacheKeyId) -> &mut ListCache, self::list_storage::DbStorage> { - get_cache_helper( - &mut self.cache_at, - name, - &self.db, - self.key_lookup_column, - self.header_column, - self.authorities_column, - &self.best_finalized_block - ) - } + /// Create new cache. + pub fn new( + db: Arc, + key_lookup_column: Option, + header_column: Option, + authorities_column: Option, + best_finalized_block: ComplexBlockId, + ) -> Self { + Self { + cache_at: HashMap::new(), + db, + key_lookup_column, + header_column, + authorities_column, + best_finalized_block, + } + } + + /// Begin cache transaction. + pub fn transaction<'a>( + &'a mut self, + tx: &'a mut DBTransaction, + ) -> DbCacheTransaction<'a, Block> { + DbCacheTransaction { + cache: self, + tx, + cache_at_op: HashMap::new(), + best_finalized_block: None, + } + } + + /// Run post-commit cache operations. + pub fn commit(&mut self, ops: DbCacheTransactionOps) { + for (name, op) in ops.cache_at_op.into_iter() { + self.get_cache(name).on_transaction_commit(op); + } + if let Some(best_finalized_block) = ops.best_finalized_block { + self.best_finalized_block = best_finalized_block; + } + } + + /// Creates `ListCache` with the given name or returns a reference to the existing. + fn get_cache( + &mut self, + name: CacheKeyId, + ) -> &mut ListCache, self::list_storage::DbStorage> { + get_cache_helper( + &mut self.cache_at, + name, + &self.db, + self.key_lookup_column, + self.header_column, + self.authorities_column, + &self.best_finalized_block, + ) + } } // This helper is needed because otherwise the borrow checker will require to // clone all parameters outside of the closure. fn get_cache_helper<'a, Block: BlockT>( - cache_at: &'a mut HashMap, self::list_storage::DbStorage>>, - name: CacheKeyId, - db: &Arc, - key_lookup: Option, - header: Option, - cache: Option, - best_finalized_block: &ComplexBlockId, + cache_at: &'a mut HashMap, self::list_storage::DbStorage>>, + name: CacheKeyId, + db: &Arc, + key_lookup: Option, + header: Option, + cache: Option, + best_finalized_block: &ComplexBlockId, ) -> &'a mut ListCache, self::list_storage::DbStorage> { - cache_at.entry(name).or_insert_with(|| { - ListCache::new( - self::list_storage::DbStorage::new(name.to_vec(), db.clone(), - self::list_storage::DbColumns { - meta: COLUMN_META, - key_lookup, - header, - cache, - }, - ), - As::sa(PRUNE_DEPTH), - best_finalized_block.clone(), - ) - }) + cache_at.entry(name).or_insert_with(|| { + ListCache::new( + self::list_storage::DbStorage::new( + name.to_vec(), + db.clone(), + self::list_storage::DbColumns { + meta: COLUMN_META, + key_lookup, + header, + cache, + }, + ), + As::sa(PRUNE_DEPTH), + best_finalized_block.clone(), + ) + }) } /// Cache operations that are to be committed after database transaction is committed. pub struct DbCacheTransactionOps { - cache_at_op: HashMap>>, - best_finalized_block: Option>, + cache_at_op: HashMap>>, + best_finalized_block: Option>, } /// Database-backed blockchain data cache transaction valid for single block import. pub struct DbCacheTransaction<'a, Block: BlockT> { - cache: &'a mut DbCache, - tx: &'a mut DBTransaction, - cache_at_op: HashMap>>, - best_finalized_block: Option>, + cache: &'a mut DbCache, + tx: &'a mut DBTransaction, + cache_at_op: HashMap>>, + best_finalized_block: Option>, } impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { - /// Convert transaction into post-commit operations set. - pub fn into_ops(self) -> DbCacheTransactionOps { - DbCacheTransactionOps { - cache_at_op: self.cache_at_op, - best_finalized_block: self.best_finalized_block, - } - } - - /// When new block is inserted into database. - pub fn on_block_insert( - mut self, - parent: ComplexBlockId, - block: ComplexBlockId, - data_at: HashMap>, - is_final: bool, - ) -> ClientResult { - assert!(self.cache_at_op.is_empty()); - - // prepare list of caches that are not update - // (we might still need to do some cache maintenance in this case) - let missed_caches = self.cache.cache_at.keys() - .filter(|cache| !data_at.contains_key(cache.clone())) - .cloned() - .collect::>(); - - let mut insert_op = |name: CacheKeyId, value: Option>| -> Result<(), client::error::Error> { - let cache = self.cache.get_cache(name); - let op = cache.on_block_insert( - &mut self::list_storage::DbStorageTransaction::new( - cache.storage(), - &mut self.tx, - ), - parent.clone(), - block.clone(), - value.or(cache.value_at_block(&parent)?), - is_final, - )?; - if let Some(op) = op { - self.cache_at_op.insert(name, op); - } - Ok(()) - }; - - data_at.into_iter().try_for_each(|(name, data)| insert_op(name, Some(data)))?; - missed_caches.into_iter().try_for_each(|name| insert_op(name, None))?; - - if is_final { - self.best_finalized_block = Some(block); - } - - Ok(self) - } - - /// When previously inserted block is finalized. - pub fn on_block_finalize( - mut self, - parent: ComplexBlockId, - block: ComplexBlockId - ) -> ClientResult { - assert!(self.cache_at_op.is_empty()); - - for (name, cache_at) in self.cache.cache_at.iter() { - let op = cache_at.on_block_finalize( - &mut self::list_storage::DbStorageTransaction::new( - cache_at.storage(), - &mut self.tx - ), - parent.clone(), - block.clone(), - )?; - - if let Some(op) = op { - self.cache_at_op.insert(name.to_owned(), op); - } - } - - self.best_finalized_block = Some(block); - - Ok(self) - } + /// Convert transaction into post-commit operations set. + pub fn into_ops(self) -> DbCacheTransactionOps { + DbCacheTransactionOps { + cache_at_op: self.cache_at_op, + best_finalized_block: self.best_finalized_block, + } + } + + /// When new block is inserted into database. + pub fn on_block_insert( + mut self, + parent: ComplexBlockId, + block: ComplexBlockId, + data_at: HashMap>, + is_final: bool, + ) -> ClientResult { + assert!(self.cache_at_op.is_empty()); + + // prepare list of caches that are not update + // (we might still need to do some cache maintenance in this case) + let missed_caches = self + .cache + .cache_at + .keys() + .filter(|cache| !data_at.contains_key(cache.clone())) + .cloned() + .collect::>(); + + let mut insert_op = |name: CacheKeyId, + value: Option>| + -> Result<(), client::error::Error> { + let cache = self.cache.get_cache(name); + let op = cache.on_block_insert( + &mut self::list_storage::DbStorageTransaction::new(cache.storage(), &mut self.tx), + parent.clone(), + block.clone(), + value.or(cache.value_at_block(&parent)?), + is_final, + )?; + if let Some(op) = op { + self.cache_at_op.insert(name, op); + } + Ok(()) + }; + + data_at + .into_iter() + .try_for_each(|(name, data)| insert_op(name, Some(data)))?; + missed_caches + .into_iter() + .try_for_each(|name| insert_op(name, None))?; + + if is_final { + self.best_finalized_block = Some(block); + } + + Ok(self) + } + + /// When previously inserted block is finalized. + pub fn on_block_finalize( + mut self, + parent: ComplexBlockId, + block: ComplexBlockId, + ) -> ClientResult { + assert!(self.cache_at_op.is_empty()); + + for (name, cache_at) in self.cache.cache_at.iter() { + let op = cache_at.on_block_finalize( + &mut self::list_storage::DbStorageTransaction::new( + cache_at.storage(), + &mut self.tx, + ), + parent.clone(), + block.clone(), + )?; + + if let Some(op) = op { + self.cache_at_op.insert(name.to_owned(), op); + } + } + + self.best_finalized_block = Some(block); + + Ok(self) + } } /// Synchronous implementation of database-backed blockchain data cache. pub struct DbCacheSync(pub RwLock>); impl BlockchainCache for DbCacheSync { - fn get_at(&self, key: &CacheKeyId, at: &BlockId) -> Option> { - let cache = self.0.read(); - let storage = cache.cache_at.get(key)?.storage(); - let db = storage.db(); - let columns = storage.columns(); - let at = match *at { - BlockId::Hash(hash) => { - let header = utils::read_header::( - &**db, - columns.key_lookup, - columns.header, - BlockId::Hash(hash.clone())).ok()??; - ComplexBlockId::new(hash, *header.number()) - }, - BlockId::Number(number) => { - let hash = utils::read_header::( - &**db, - columns.key_lookup, - columns.header, - BlockId::Number(number.clone())).ok()??.hash(); - ComplexBlockId::new(hash, number) - }, - }; - - cache.cache_at.get(key)?.value_at_block(&at).ok()? - } + fn get_at(&self, key: &CacheKeyId, at: &BlockId) -> Option> { + let cache = self.0.read(); + let storage = cache.cache_at.get(key)?.storage(); + let db = storage.db(); + let columns = storage.columns(); + let at = match *at { + BlockId::Hash(hash) => { + let header = utils::read_header::( + &**db, + columns.key_lookup, + columns.header, + BlockId::Hash(hash.clone()), + ) + .ok()??; + ComplexBlockId::new(hash, *header.number()) + } + BlockId::Number(number) => { + let hash = utils::read_header::( + &**db, + columns.key_lookup, + columns.header, + BlockId::Number(number.clone()), + ) + .ok()?? + .hash(); + ComplexBlockId::new(hash, number) + } + }; + + cache.cache_at.get(key)?.value_at_block(&at).ok()? + } } - diff --git a/core/client/db/src/lib.rs b/core/client/db/src/lib.rs index 7657bfd396..634d48d728 100644 --- a/core/client/db/src/lib.rs +++ b/core/client/db/src/lib.rs @@ -30,35 +30,39 @@ mod cache; mod storage_cache; mod utils; -use std::sync::Arc; -use std::path::PathBuf; -use std::io; use std::collections::HashMap; +use std::io; +use std::path::PathBuf; +use std::sync::Arc; +use crate::storage_cache::{new_shared_cache, CachingState, SharedCache}; +use crate::utils::{ + block_id_to_lookup_key, db_err, meta_keys, open_database, read_db, read_meta, Meta, +}; use client::backend::NewBlockState; use client::blockchain::HeaderBackend; +use client::children; +use client::leaves::{FinalizationDisplaced, LeafSet}; use client::ExecutionStrategies; -use parity_codec::{Decode, Encode}; +use consensus_common::well_known_cache_keys; +use executor::RuntimeInfo; use hash_db::Hasher; -use kvdb::{KeyValueDB, DBTransaction}; -use trie::{MemoryDB, PrefixedMemoryDB, prefixed_key}; +use kvdb::{DBTransaction, KeyValueDB}; +use log::{debug, trace, warn}; +use parity_codec::{Decode, Encode}; use parking_lot::RwLock; -use primitives::{H256, Blake2Hasher, ChangesTrieConfiguration, convert_hash}; use primitives::storage::well_known_keys; -use runtime_primitives::{generic::BlockId, Justification, StorageOverlay, ChildrenStorageOverlay}; -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, As, NumberFor, Zero, Digest, DigestItem}; +use primitives::{convert_hash, Blake2Hasher, ChangesTrieConfiguration, H256}; +use runtime_primitives::traits::{ + As, Block as BlockT, Digest, DigestItem, Header as HeaderT, NumberFor, Zero, +}; use runtime_primitives::BuildStorage; +use runtime_primitives::{generic::BlockId, ChildrenStorageOverlay, Justification, StorageOverlay}; +pub use state_db::PruningMode; +use state_db::StateDb; use state_machine::backend::Backend as StateBackend; -use executor::RuntimeInfo; use state_machine::{CodeExecutor, DBValue}; -use crate::utils::{Meta, db_err, meta_keys, open_database, read_db, block_id_to_lookup_key, read_meta}; -use client::leaves::{LeafSet, FinalizationDisplaced}; -use client::children; -use state_db::StateDb; -use consensus_common::well_known_cache_keys; -use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; -use log::{trace, debug, warn}; -pub use state_db::PruningMode; +use trie::{prefixed_key, MemoryDB, PrefixedMemoryDB}; #[cfg(feature = "test-helpers")] use client::in_mem::Backend as InMemoryBackend; @@ -68,1880 +72,2618 @@ const MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR: u64 = 32768; const STATE_CACHE_SIZE_BYTES: usize = 16 * 1024 * 1024; /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. -pub type DbState = state_machine::TrieBackend>, Blake2Hasher>; +pub type DbState = + state_machine::TrieBackend>, Blake2Hasher>; /// Database settings. pub struct DatabaseSettings { - /// Cache size in bytes. If `None` default is used. - pub cache_size: Option, - /// Path to the database. - pub path: PathBuf, - /// Pruning mode. - pub pruning: PruningMode, + /// Cache size in bytes. If `None` default is used. + pub cache_size: Option, + /// Path to the database. + pub path: PathBuf, + /// Pruning mode. + pub pruning: PruningMode, } /// Create an instance of db-backed client. pub fn new_client( - settings: DatabaseSettings, - executor: E, - genesis_storage: S, - execution_strategies: ExecutionStrategies, -) -> Result, client::LocalCallExecutor, E>, Block, RA>, client::error::Error> - where - Block: BlockT, - E: CodeExecutor + RuntimeInfo, - S: BuildStorage, + settings: DatabaseSettings, + executor: E, + genesis_storage: S, + execution_strategies: ExecutionStrategies, +) -> Result< + client::Client, client::LocalCallExecutor, E>, Block, RA>, + client::error::Error, +> +where + Block: BlockT, + E: CodeExecutor + RuntimeInfo, + S: BuildStorage, { - let backend = Arc::new(Backend::new(settings, CANONICALIZATION_DELAY)?); - let executor = client::LocalCallExecutor::new(backend.clone(), executor); - Ok(client::Client::new(backend, executor, genesis_storage, execution_strategies)?) + let backend = Arc::new(Backend::new(settings, CANONICALIZATION_DELAY)?); + let executor = client::LocalCallExecutor::new(backend.clone(), executor); + Ok(client::Client::new( + backend, + executor, + genesis_storage, + execution_strategies, + )?) } mod columns { - pub const META: Option = crate::utils::COLUMN_META; - pub const STATE: Option = Some(1); - pub const STATE_META: Option = Some(2); - /// maps hashes to lookup keys and numbers to canon hashes. - pub const KEY_LOOKUP: Option = Some(3); - pub const HEADER: Option = Some(4); - pub const BODY: Option = Some(5); - pub const JUSTIFICATION: Option = Some(6); - pub const CHANGES_TRIE: Option = Some(7); - pub const AUX: Option = Some(8); + pub const META: Option = crate::utils::COLUMN_META; + pub const STATE: Option = Some(1); + pub const STATE_META: Option = Some(2); + /// maps hashes to lookup keys and numbers to canon hashes. + pub const KEY_LOOKUP: Option = Some(3); + pub const HEADER: Option = Some(4); + pub const BODY: Option = Some(5); + pub const JUSTIFICATION: Option = Some(6); + pub const CHANGES_TRIE: Option = Some(7); + pub const AUX: Option = Some(8); } struct PendingBlock { - header: Block::Header, - justification: Option, - body: Option>, - leaf_state: NewBlockState, + header: Block::Header, + justification: Option, + body: Option>, + leaf_state: NewBlockState, } // wrapper that implements trait required for state_db struct StateMetaDb<'a>(&'a KeyValueDB); impl<'a> state_db::MetaDb for StateMetaDb<'a> { - type Error = io::Error; + type Error = io::Error; - fn get_meta(&self, key: &[u8]) -> Result>, Self::Error> { - self.0.get(columns::STATE_META, key).map(|r| r.map(|v| v.to_vec())) - } + fn get_meta(&self, key: &[u8]) -> Result>, Self::Error> { + self.0 + .get(columns::STATE_META, key) + .map(|r| r.map(|v| v.to_vec())) + } } /// Block database pub struct BlockchainDb { - db: Arc, - meta: Arc, Block::Hash>>>, - leaves: RwLock>>, + db: Arc, + meta: Arc, Block::Hash>>>, + leaves: RwLock>>, } impl BlockchainDb { - fn new(db: Arc) -> Result { - let meta = read_meta::(&*db, columns::META, columns::HEADER)?; - let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?; - Ok(BlockchainDb { - db, - leaves: RwLock::new(leaves), - meta: Arc::new(RwLock::new(meta)), - }) - } - - fn update_meta( - &self, - hash: Block::Hash, - number: ::Number, - is_best: bool, - is_finalized: bool - ) { - let mut meta = self.meta.write(); - if number.is_zero() { - meta.genesis_hash = hash; - meta.finalized_hash = hash; - } - - if is_best { - meta.best_number = number; - meta.best_hash = hash; - } - - if is_finalized { - meta.finalized_number = number; - meta.finalized_hash = hash; - } - } + fn new(db: Arc) -> Result { + let meta = read_meta::(&*db, columns::META, columns::HEADER)?; + let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?; + Ok(BlockchainDb { + db, + leaves: RwLock::new(leaves), + meta: Arc::new(RwLock::new(meta)), + }) + } + + fn update_meta( + &self, + hash: Block::Hash, + number: ::Number, + is_best: bool, + is_finalized: bool, + ) { + let mut meta = self.meta.write(); + if number.is_zero() { + meta.genesis_hash = hash; + meta.finalized_hash = hash; + } + + if is_best { + meta.best_number = number; + meta.best_hash = hash; + } + + if is_finalized { + meta.finalized_number = number; + meta.finalized_hash = hash; + } + } } impl client::blockchain::HeaderBackend for BlockchainDb { - fn header(&self, id: BlockId) -> Result, client::error::Error> { - utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) - } - - fn info(&self) -> Result, client::error::Error> { - let meta = self.meta.read(); - Ok(client::blockchain::Info { - best_hash: meta.best_hash, - best_number: meta.best_number, - genesis_hash: meta.genesis_hash, - finalized_hash: meta.finalized_hash, - finalized_number: meta.finalized_number, - }) - } - - fn status(&self, id: BlockId) -> Result { - let exists = match id { - BlockId::Hash(_) => read_db( - &*self.db, - columns::KEY_LOOKUP, - columns::HEADER, - id - )?.is_some(), - BlockId::Number(n) => n <= self.meta.read().best_number, - }; - match exists { - true => Ok(client::blockchain::BlockStatus::InChain), - false => Ok(client::blockchain::BlockStatus::Unknown), - } - } - - fn number(&self, hash: Block::Hash) -> Result>, client::error::Error> { - if let Some(lookup_key) = block_id_to_lookup_key::(&*self.db, columns::KEY_LOOKUP, BlockId::Hash(hash))? { - let number = utils::lookup_key_to_number(&lookup_key)?; - Ok(Some(number)) - } else { - Ok(None) - } - } - - fn hash(&self, number: NumberFor) -> Result, client::error::Error> { - self.header(BlockId::Number(number)).and_then(|maybe_header| match maybe_header { - Some(header) => Ok(Some(header.hash().clone())), - None => Ok(None), - }) - } + fn header(&self, id: BlockId) -> Result, client::error::Error> { + utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) + } + + fn info(&self) -> Result, client::error::Error> { + let meta = self.meta.read(); + Ok(client::blockchain::Info { + best_hash: meta.best_hash, + best_number: meta.best_number, + genesis_hash: meta.genesis_hash, + finalized_hash: meta.finalized_hash, + finalized_number: meta.finalized_number, + }) + } + + fn status( + &self, + id: BlockId, + ) -> Result { + let exists = match id { + BlockId::Hash(_) => { + read_db(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id)?.is_some() + } + BlockId::Number(n) => n <= self.meta.read().best_number, + }; + match exists { + true => Ok(client::blockchain::BlockStatus::InChain), + false => Ok(client::blockchain::BlockStatus::Unknown), + } + } + + fn number(&self, hash: Block::Hash) -> Result>, client::error::Error> { + if let Some(lookup_key) = + block_id_to_lookup_key::(&*self.db, columns::KEY_LOOKUP, BlockId::Hash(hash))? + { + let number = utils::lookup_key_to_number(&lookup_key)?; + Ok(Some(number)) + } else { + Ok(None) + } + } + + fn hash(&self, number: NumberFor) -> Result, client::error::Error> { + self.header(BlockId::Number(number)) + .and_then(|maybe_header| match maybe_header { + Some(header) => Ok(Some(header.hash().clone())), + None => Ok(None), + }) + } } impl client::blockchain::Backend for BlockchainDb { - fn body(&self, id: BlockId) -> Result>, client::error::Error> { - match read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, id)? { - Some(body) => match Decode::decode(&mut &body[..]) { - Some(body) => Ok(Some(body)), - None => return Err(client::error::ErrorKind::Backend("Error decoding body".into()).into()), - } - None => Ok(None), - } - } - - fn justification(&self, id: BlockId) -> Result, client::error::Error> { - match read_db(&*self.db, columns::KEY_LOOKUP, columns::JUSTIFICATION, id)? { - Some(justification) => match Decode::decode(&mut &justification[..]) { - Some(justification) => Ok(Some(justification)), - None => return Err(client::error::ErrorKind::Backend("Error decoding justification".into()).into()), - } - None => Ok(None), - } - } - - fn last_finalized(&self) -> Result { - Ok(self.meta.read().finalized_hash.clone()) - } - - fn cache(&self) -> Option>> { - None - } - - fn leaves(&self) -> Result, client::error::Error> { - Ok(self.leaves.read().hashes()) - } - - fn children(&self, parent_hash: Block::Hash) -> Result, client::error::Error> { - children::read_children(&*self.db, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash) - } + fn body( + &self, + id: BlockId, + ) -> Result>, client::error::Error> { + match read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, id)? { + Some(body) => match Decode::decode(&mut &body[..]) { + Some(body) => Ok(Some(body)), + None => { + return Err( + client::error::ErrorKind::Backend("Error decoding body".into()).into(), + ); + } + }, + None => Ok(None), + } + } + + fn justification( + &self, + id: BlockId, + ) -> Result, client::error::Error> { + match read_db(&*self.db, columns::KEY_LOOKUP, columns::JUSTIFICATION, id)? { + Some(justification) => match Decode::decode(&mut &justification[..]) { + Some(justification) => Ok(Some(justification)), + None => { + return Err(client::error::ErrorKind::Backend( + "Error decoding justification".into(), + ) + .into()); + } + }, + None => Ok(None), + } + } + + fn last_finalized(&self) -> Result { + Ok(self.meta.read().finalized_hash.clone()) + } + + fn cache(&self) -> Option>> { + None + } + + fn leaves(&self) -> Result, client::error::Error> { + Ok(self.leaves.read().hashes()) + } + + fn children(&self, parent_hash: Block::Hash) -> Result, client::error::Error> { + children::read_children( + &*self.db, + columns::META, + meta_keys::CHILDREN_PREFIX, + parent_hash, + ) + } } impl client::blockchain::ProvideCache for BlockchainDb { - fn cache(&self) -> Option>> { - None - } + fn cache(&self) -> Option>> { + None + } } /// Database transaction pub struct BlockImportOperation { - old_state: CachingState, - db_updates: PrefixedMemoryDB, - storage_updates: Vec<(Vec, Option>)>, - changes_trie_updates: MemoryDB, - pending_block: Option>, - aux_ops: Vec<(Vec, Option>)>, - finalized_blocks: Vec<(BlockId, Option)>, - set_head: Option>, + old_state: CachingState, + db_updates: PrefixedMemoryDB, + storage_updates: Vec<(Vec, Option>)>, + changes_trie_updates: MemoryDB, + pending_block: Option>, + aux_ops: Vec<(Vec, Option>)>, + finalized_blocks: Vec<(BlockId, Option)>, + set_head: Option>, } impl BlockImportOperation { - fn apply_aux(&mut self, transaction: &mut DBTransaction) { - for (key, maybe_val) in self.aux_ops.drain(..) { - match maybe_val { - Some(val) => transaction.put_vec(columns::AUX, &key, val), - None => transaction.delete(columns::AUX, &key), - } - } - } + fn apply_aux(&mut self, transaction: &mut DBTransaction) { + for (key, maybe_val) in self.aux_ops.drain(..) { + match maybe_val { + Some(val) => transaction.put_vec(columns::AUX, &key, val), + None => transaction.delete(columns::AUX, &key), + } + } + } } impl client::backend::BlockImportOperation -for BlockImportOperation -where Block: BlockT, + for BlockImportOperation +where + Block: BlockT, { - type State = CachingState; - - fn state(&self) -> Result, client::error::Error> { - Ok(Some(&self.old_state)) - } - - fn set_block_data( - &mut self, - header: Block::Header, - body: Option>, - justification: Option, - leaf_state: NewBlockState, - ) -> Result<(), client::error::Error> { - assert!(self.pending_block.is_none(), "Only one block per operation is allowed"); - self.pending_block = Some(PendingBlock { - header, - body, - justification, - leaf_state, - }); - Ok(()) - } - - fn update_cache(&mut self, _cache: HashMap>) { - // Currently cache isn't implemented on full nodes. - } - - fn update_db_storage(&mut self, update: PrefixedMemoryDB) -> Result<(), client::error::Error> { - self.db_updates = update; - Ok(()) - } - - fn reset_storage(&mut self, mut top: StorageOverlay, children: ChildrenStorageOverlay) -> Result { - - if top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) { - return Err(client::error::ErrorKind::GenesisInvalid.into()); - } - - let mut transaction: PrefixedMemoryDB = Default::default(); - - for (child_key, child_map) in children { - if !well_known_keys::is_child_storage_key(&child_key) { - return Err(client::error::ErrorKind::GenesisInvalid.into()); - } - - let (root, is_default, update) = self.old_state.child_storage_root(&child_key, child_map.into_iter().map(|(k, v)| (k, Some(v)))); - transaction.consolidate(update); - - if !is_default { - top.insert(child_key, root); - } - } - - let (root, update) = self.old_state.storage_root(top.into_iter().map(|(k, v)| (k, Some(v)))); - transaction.consolidate(update); - - self.db_updates = transaction; - Ok(root) - } - - fn update_changes_trie(&mut self, update: MemoryDB) -> Result<(), client::error::Error> { - self.changes_trie_updates = update; - Ok(()) - } - - fn insert_aux(&mut self, ops: I) -> Result<(), client::error::Error> - where I: IntoIterator, Option>)> - { - self.aux_ops.append(&mut ops.into_iter().collect()); - Ok(()) - } - - fn update_storage(&mut self, update: Vec<(Vec, Option>)>) -> Result<(), client::error::Error> { - self.storage_updates = update; - Ok(()) - } - - fn mark_finalized(&mut self, block: BlockId, justification: Option) -> Result<(), client::error::Error> { - self.finalized_blocks.push((block, justification)); - Ok(()) - } - - fn mark_head(&mut self, block: BlockId) -> Result<(), client::error::Error> { - assert!(self.set_head.is_none(), "Only one set head per operation is allowed"); - self.set_head = Some(block); - Ok(()) - } + type State = CachingState; + + fn state(&self) -> Result, client::error::Error> { + Ok(Some(&self.old_state)) + } + + fn set_block_data( + &mut self, + header: Block::Header, + body: Option>, + justification: Option, + leaf_state: NewBlockState, + ) -> Result<(), client::error::Error> { + assert!( + self.pending_block.is_none(), + "Only one block per operation is allowed" + ); + self.pending_block = Some(PendingBlock { + header, + body, + justification, + leaf_state, + }); + Ok(()) + } + + fn update_cache(&mut self, _cache: HashMap>) { + // Currently cache isn't implemented on full nodes. + } + + fn update_db_storage( + &mut self, + update: PrefixedMemoryDB, + ) -> Result<(), client::error::Error> { + self.db_updates = update; + Ok(()) + } + + fn reset_storage( + &mut self, + mut top: StorageOverlay, + children: ChildrenStorageOverlay, + ) -> Result { + if top + .iter() + .any(|(k, _)| well_known_keys::is_child_storage_key(k)) + { + return Err(client::error::ErrorKind::GenesisInvalid.into()); + } + + let mut transaction: PrefixedMemoryDB = Default::default(); + + for (child_key, child_map) in children { + if !well_known_keys::is_child_storage_key(&child_key) { + return Err(client::error::ErrorKind::GenesisInvalid.into()); + } + + let (root, is_default, update) = self + .old_state + .child_storage_root(&child_key, child_map.into_iter().map(|(k, v)| (k, Some(v)))); + transaction.consolidate(update); + + if !is_default { + top.insert(child_key, root); + } + } + + let (root, update) = self + .old_state + .storage_root(top.into_iter().map(|(k, v)| (k, Some(v)))); + transaction.consolidate(update); + + self.db_updates = transaction; + Ok(root) + } + + fn update_changes_trie( + &mut self, + update: MemoryDB, + ) -> Result<(), client::error::Error> { + self.changes_trie_updates = update; + Ok(()) + } + + fn insert_aux(&mut self, ops: I) -> Result<(), client::error::Error> + where + I: IntoIterator, Option>)>, + { + self.aux_ops.append(&mut ops.into_iter().collect()); + Ok(()) + } + + fn update_storage( + &mut self, + update: Vec<(Vec, Option>)>, + ) -> Result<(), client::error::Error> { + self.storage_updates = update; + Ok(()) + } + + fn mark_finalized( + &mut self, + block: BlockId, + justification: Option, + ) -> Result<(), client::error::Error> { + self.finalized_blocks.push((block, justification)); + Ok(()) + } + + fn mark_head(&mut self, block: BlockId) -> Result<(), client::error::Error> { + assert!( + self.set_head.is_none(), + "Only one set head per operation is allowed" + ); + self.set_head = Some(block); + Ok(()) + } } struct StorageDb { - pub db: Arc, - pub state_db: StateDb>, + pub db: Arc, + pub state_db: StateDb>, } impl state_machine::Storage for StorageDb { - fn get(&self, key: &H256, prefix: &[u8]) -> Result, String> { - let key = prefixed_key::(key, prefix); - self.state_db.get(&key, self).map(|r| r.map(|v| DBValue::from_slice(&v))) - .map_err(|e| format!("Database backend error: {:?}", e)) - } + fn get(&self, key: &H256, prefix: &[u8]) -> Result, String> { + let key = prefixed_key::(key, prefix); + self.state_db + .get(&key, self) + .map(|r| r.map(|v| DBValue::from_slice(&v))) + .map_err(|e| format!("Database backend error: {:?}", e)) + } } impl state_db::NodeDb for StorageDb { - type Error = io::Error; - type Key = [u8]; - - fn get(&self, key: &[u8]) -> Result>, Self::Error> { - self.db.get(columns::STATE, key).map(|r| r.map(|v| v.to_vec())) - } + type Error = io::Error; + type Key = [u8]; + + fn get(&self, key: &[u8]) -> Result>, Self::Error> { + self.db + .get(columns::STATE, key) + .map(|r| r.map(|v| v.to_vec())) + } } struct DbGenesisStorage(pub H256); impl DbGenesisStorage { - pub fn new() -> Self { - let mut root = H256::default(); - let mut mdb = MemoryDB::::default(); - state_machine::TrieDBMut::::new(&mut mdb, &mut root); - DbGenesisStorage(root) - } + pub fn new() -> Self { + let mut root = H256::default(); + let mut mdb = MemoryDB::::default(); + state_machine::TrieDBMut::::new(&mut mdb, &mut root); + DbGenesisStorage(root) + } } impl state_machine::Storage for DbGenesisStorage { - fn get(&self, _key: &H256, _prefix: &[u8]) -> Result, String> { - Ok(None) - } + fn get(&self, _key: &H256, _prefix: &[u8]) -> Result, String> { + Ok(None) + } } pub struct DbChangesTrieStorage { - db: Arc, - meta: Arc, Block::Hash>>>, - min_blocks_to_keep: Option, - _phantom: ::std::marker::PhantomData, + db: Arc, + meta: Arc, Block::Hash>>>, + min_blocks_to_keep: Option, + _phantom: ::std::marker::PhantomData, } impl DbChangesTrieStorage { - /// Commit new changes trie. - pub fn commit(&self, tx: &mut DBTransaction, mut changes_trie: MemoryDB) { - for (key, (val, _)) in changes_trie.drain() { - tx.put(columns::CHANGES_TRIE, &key[..], &val); - } - } - - /// Prune obsolete changes tries. - pub fn prune(&self, config: Option, tx: &mut DBTransaction, block_hash: Block::Hash, block_num: NumberFor) { - // never prune on archive nodes - let min_blocks_to_keep = match self.min_blocks_to_keep { - Some(min_blocks_to_keep) => min_blocks_to_keep, - None => return, - }; - - // read configuration from the database. it is OK to do it here (without checking tx for - // modifications), since config can't change - let config = match config { - Some(config) => config, - None => return, - }; - - state_machine::prune_changes_tries( - &config, - &*self, - min_blocks_to_keep, - &state_machine::ChangesTrieAnchorBlockId { - hash: convert_hash(&block_hash), - number: block_num.as_(), - }, - |node| tx.delete(columns::CHANGES_TRIE, node.as_ref())); - } + /// Commit new changes trie. + pub fn commit(&self, tx: &mut DBTransaction, mut changes_trie: MemoryDB) { + for (key, (val, _)) in changes_trie.drain() { + tx.put(columns::CHANGES_TRIE, &key[..], &val); + } + } + + /// Prune obsolete changes tries. + pub fn prune( + &self, + config: Option, + tx: &mut DBTransaction, + block_hash: Block::Hash, + block_num: NumberFor, + ) { + // never prune on archive nodes + let min_blocks_to_keep = match self.min_blocks_to_keep { + Some(min_blocks_to_keep) => min_blocks_to_keep, + None => return, + }; + + // read configuration from the database. it is OK to do it here (without checking tx for + // modifications), since config can't change + let config = match config { + Some(config) => config, + None => return, + }; + + state_machine::prune_changes_tries( + &config, + &*self, + min_blocks_to_keep, + &state_machine::ChangesTrieAnchorBlockId { + hash: convert_hash(&block_hash), + number: block_num.as_(), + }, + |node| tx.delete(columns::CHANGES_TRIE, node.as_ref()), + ); + } } -impl client::backend::PrunableStateChangesTrieStorage for DbChangesTrieStorage { - fn oldest_changes_trie_block( - &self, - config: &ChangesTrieConfiguration, - best_finalized_block: u64 - ) -> u64 { - match self.min_blocks_to_keep { - Some(min_blocks_to_keep) => state_machine::oldest_non_pruned_changes_trie( - config, - min_blocks_to_keep, - best_finalized_block, - ), - None => 1, - } - } +impl client::backend::PrunableStateChangesTrieStorage + for DbChangesTrieStorage +{ + fn oldest_changes_trie_block( + &self, + config: &ChangesTrieConfiguration, + best_finalized_block: u64, + ) -> u64 { + match self.min_blocks_to_keep { + Some(min_blocks_to_keep) => state_machine::oldest_non_pruned_changes_trie( + config, + min_blocks_to_keep, + best_finalized_block, + ), + None => 1, + } + } } -impl state_machine::ChangesTrieRootsStorage for DbChangesTrieStorage { - fn root(&self, anchor: &state_machine::ChangesTrieAnchorBlockId, block: u64) -> Result, String> { - // check API requirement: we can't get NEXT block(s) based on anchor - if block > anchor.number { - return Err(format!("Can't get changes trie root at {} using anchor at {}", block, anchor.number)); - } - - // we need to get hash of the block to resolve changes trie root - let block_id = if block <= self.meta.read().finalized_number.as_() { - // if block is finalized, we could just read canonical hash - BlockId::Number(As::sa(block)) - } else { - // the block is not finalized - let mut current_num = anchor.number; - let mut current_hash: Block::Hash = convert_hash(&anchor.hash); - let maybe_anchor_header: Block::Header = utils::require_header::( - &*self.db, columns::KEY_LOOKUP, columns::HEADER, BlockId::Number(As::sa(current_num)) - ).map_err(|e| e.to_string())?; - if maybe_anchor_header.hash() == current_hash { - // if anchor is canonicalized, then the block is also canonicalized - BlockId::Number(As::sa(block)) - } else { - // else (block is not finalized + anchor is not canonicalized): - // => we should find the required block hash by traversing - // back from the anchor to the block with given number - while current_num != block { - let current_header: Block::Header = utils::require_header::( - &*self.db, columns::KEY_LOOKUP, columns::HEADER, BlockId::Hash(current_hash) - ).map_err(|e| e.to_string())?; - - current_hash = *current_header.parent_hash(); - current_num = current_num - 1; - } - - BlockId::Hash(current_hash) - } - }; - - Ok(utils::require_header::(&*self.db, columns::KEY_LOOKUP, columns::HEADER, block_id) - .map_err(|e| e.to_string())? - .digest().log(DigestItem::as_changes_trie_root) - .map(|root| H256::from_slice(root.as_ref()))) - } +impl state_machine::ChangesTrieRootsStorage + for DbChangesTrieStorage +{ + fn root( + &self, + anchor: &state_machine::ChangesTrieAnchorBlockId, + block: u64, + ) -> Result, String> { + // check API requirement: we can't get NEXT block(s) based on anchor + if block > anchor.number { + return Err(format!( + "Can't get changes trie root at {} using anchor at {}", + block, anchor.number + )); + } + + // we need to get hash of the block to resolve changes trie root + let block_id = if block <= self.meta.read().finalized_number.as_() { + // if block is finalized, we could just read canonical hash + BlockId::Number(As::sa(block)) + } else { + // the block is not finalized + let mut current_num = anchor.number; + let mut current_hash: Block::Hash = convert_hash(&anchor.hash); + let maybe_anchor_header: Block::Header = utils::require_header::( + &*self.db, + columns::KEY_LOOKUP, + columns::HEADER, + BlockId::Number(As::sa(current_num)), + ) + .map_err(|e| e.to_string())?; + if maybe_anchor_header.hash() == current_hash { + // if anchor is canonicalized, then the block is also canonicalized + BlockId::Number(As::sa(block)) + } else { + // else (block is not finalized + anchor is not canonicalized): + // => we should find the required block hash by traversing + // back from the anchor to the block with given number + while current_num != block { + let current_header: Block::Header = utils::require_header::( + &*self.db, + columns::KEY_LOOKUP, + columns::HEADER, + BlockId::Hash(current_hash), + ) + .map_err(|e| e.to_string())?; + + current_hash = *current_header.parent_hash(); + current_num = current_num - 1; + } + + BlockId::Hash(current_hash) + } + }; + + Ok(utils::require_header::( + &*self.db, + columns::KEY_LOOKUP, + columns::HEADER, + block_id, + ) + .map_err(|e| e.to_string())? + .digest() + .log(DigestItem::as_changes_trie_root) + .map(|root| H256::from_slice(root.as_ref()))) + } } -impl state_machine::ChangesTrieStorage for DbChangesTrieStorage { - fn get(&self, key: &H256, _prefix: &[u8]) -> Result, String> { - self.db.get(columns::CHANGES_TRIE, &key[..]) - .map_err(|err| format!("{}", err)) - } +impl state_machine::ChangesTrieStorage + for DbChangesTrieStorage +{ + fn get(&self, key: &H256, _prefix: &[u8]) -> Result, String> { + self.db + .get(columns::CHANGES_TRIE, &key[..]) + .map_err(|err| format!("{}", err)) + } } /// Disk backend. Keeps data in a key-value store. In archive mode, trie nodes are kept from all blocks. /// Otherwise, trie nodes are kept only from some recent blocks. pub struct Backend { - storage: Arc>, - changes_tries_storage: DbChangesTrieStorage, - blockchain: BlockchainDb, - canonicalization_delay: u64, - shared_cache: SharedCache, + storage: Arc>, + changes_tries_storage: DbChangesTrieStorage, + blockchain: BlockchainDb, + canonicalization_delay: u64, + shared_cache: SharedCache, } -impl> Backend { - /// Create a new instance of database backend. - /// - /// The pruning window is how old a block must be before the state is pruned. - pub fn new(config: DatabaseSettings, canonicalization_delay: u64) -> Result { - let db = open_database(&config, columns::META, "full")?; - - Backend::from_kvdb(db as Arc<_>, config.pruning, canonicalization_delay) - } - - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test(keep_blocks: u32, canonicalization_delay: u64) -> Self { - use utils::NUM_COLUMNS; - - let db = Arc::new(::kvdb_memorydb::create(NUM_COLUMNS)); - - Backend::from_kvdb( - db as Arc<_>, - PruningMode::keep_blocks(keep_blocks), - canonicalization_delay, - ).expect("failed to create test-db") - } - - fn from_kvdb(db: Arc, pruning: PruningMode, canonicalization_delay: u64) -> Result { - let is_archive_pruning = pruning.is_archive(); - let blockchain = BlockchainDb::new(db.clone())?; - let meta = blockchain.meta.clone(); - let map_e = |e: state_db::Error| ::client::error::Error::from(format!("State database error: {:?}", e)); - let state_db: StateDb<_, _> = StateDb::new(pruning, &StateMetaDb(&*db)).map_err(map_e)?; - let storage_db = StorageDb { - db: db.clone(), - state_db, - }; - let changes_tries_storage = DbChangesTrieStorage { - db, - meta, - min_blocks_to_keep: if is_archive_pruning { None } else { Some(MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR) }, - _phantom: Default::default(), - }; - - Ok(Backend { - storage: Arc::new(storage_db), - changes_tries_storage, - blockchain, - canonicalization_delay, - shared_cache: new_shared_cache(STATE_CACHE_SIZE_BYTES), - }) - } - - /// Returns in-memory blockchain that contains the same set of blocks that the self. - #[cfg(feature = "test-helpers")] - pub fn as_in_memory(&self) -> InMemoryBackend { - use client::backend::{Backend as ClientBackend, BlockImportOperation}; - use client::blockchain::Backend as BlockchainBackend; - - let inmem = InMemoryBackend::::new(); - - // get all headers hashes && sort them by number (could be duplicate) - let mut headers: Vec<(NumberFor, Block::Hash, Block::Header)> = Vec::new(); - for (_, header) in self.blockchain.db.iter(columns::HEADER) { - let header = Block::Header::decode(&mut &header[..]).unwrap(); - let hash = header.hash(); - let number = *header.number(); - let pos = headers.binary_search_by(|item| item.0.cmp(&number)); - match pos { - Ok(pos) => headers.insert(pos, (number, hash, header)), - Err(pos) => headers.insert(pos, (number, hash, header)), - } - } - - // insert all other headers + bodies + justifications - let info = self.blockchain.info().unwrap(); - for (number, hash, header) in headers { - let id = BlockId::Hash(hash); - let justification = self.blockchain.justification(id).unwrap(); - let body = self.blockchain.body(id).unwrap(); - let state = self.state_at(id).unwrap().pairs(); - - let new_block_state = if number.is_zero() { - NewBlockState::Final - } else if hash == info.best_hash { - NewBlockState::Best - } else { - NewBlockState::Normal - }; - let mut op = inmem.begin_operation().unwrap(); - op.set_block_data(header, body, justification, new_block_state).unwrap(); - op.update_db_storage(state.into_iter().map(|(k, v)| (None, k, Some(v))).collect()).unwrap(); - inmem.commit_operation(op).unwrap(); - } - - // and now finalize the best block we have - inmem.finalize_block(BlockId::Hash(info.finalized_hash), None).unwrap(); - - inmem - } - - /// Handle setting head within a transaction. `route_to` should be the last - /// block that existed in the database. `best_to` should be the best block - /// to be set. - /// - /// In the case where the new best block is a block to be imported, `route_to` - /// should be the parent of `best_to`. In the case where we set an existing block - /// to be best, `route_to` should equal to `best_to`. - fn set_head_with_transaction(&self, transaction: &mut DBTransaction, route_to: Block::Hash, best_to: (NumberFor, Block::Hash)) -> Result<(Vec, Vec), client::error::Error> { - let mut enacted = Vec::default(); - let mut retracted = Vec::default(); - - let meta = self.blockchain.meta.read(); - - // cannot find tree route with empty DB. - if meta.best_hash != Default::default() { - let tree_route = ::client::blockchain::tree_route( - &self.blockchain, - BlockId::Hash(meta.best_hash), - BlockId::Hash(route_to), - )?; - - // uncanonicalize: check safety violations and ensure the numbers no longer - // point to these block hashes in the key mapping. - for r in tree_route.retracted() { - if r.hash == meta.finalized_hash { - warn!( - "Potential safety failure: reverting finalized block {:?}", - (&r.number, &r.hash) - ); - - return Err(::client::error::ErrorKind::NotInFinalizedChain.into()); - } - - retracted.push(r.hash.clone()); - utils::remove_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - r.number - ); - } - - // canonicalize: set the number lookup to map to this block's hash. - for e in tree_route.enacted() { - enacted.push(e.hash.clone()); - utils::insert_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - e.number, - e.hash - ); - } - } - - let lookup_key = utils::number_and_hash_to_lookup_key(best_to.0, &best_to.1); - transaction.put(columns::META, meta_keys::BEST_BLOCK, &lookup_key); - utils::insert_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - best_to.0, - best_to.1, - ); - - Ok((enacted, retracted)) - } - - fn ensure_sequential_finalization( - &self, - header: &Block::Header, - last_finalized: Option, - ) -> Result<(), client::error::Error> { - let last_finalized = last_finalized.unwrap_or_else(|| self.blockchain.meta.read().finalized_hash); - if *header.parent_hash() != last_finalized { - return Err(::client::error::ErrorKind::NonSequentialFinalization( - format!("Last finalized {:?} not parent of {:?}", last_finalized, header.hash()), - ).into()); - } - Ok(()) - } - - fn finalize_block_with_transaction( - &self, - transaction: &mut DBTransaction, - hash: &Block::Hash, - header: &Block::Header, - last_finalized: Option, - justification: Option, - finalization_displaced: &mut Option>>, - ) -> Result<(Block::Hash, ::Number, bool, bool), client::error::Error> { - // TODO: ensure best chain contains this block. - let number = *header.number(); - self.ensure_sequential_finalization(header, last_finalized)?; - self.note_finalized( - transaction, - header, - *hash, - finalization_displaced, - )?; - - if let Some(justification) = justification { - transaction.put( - columns::JUSTIFICATION, - &utils::number_and_hash_to_lookup_key(number, hash), - &justification.encode(), - ); - } - Ok((*hash, number, false, true)) - } - - // performs forced canonicaliziation with a delay after importning a non-finalized block. - fn force_delayed_canonicalize( - &self, - transaction: &mut DBTransaction, - hash: Block::Hash, - number: NumberFor, - ) - -> Result<(), client::error::Error> - { - let number_u64 = number.as_(); - if number_u64 > self.canonicalization_delay { - let new_canonical = number_u64 - self.canonicalization_delay; - - if new_canonical <= self.storage.state_db.best_canonical().unwrap_or(0) { - return Ok(()) - } - - let hash = if new_canonical == number_u64 { - hash - } else { - ::client::blockchain::HeaderBackend::hash(&self.blockchain, As::sa(new_canonical))? - .expect("existence of block with number `new_canonical` \ - implies existence of blocks with all numbers before it; qed") - }; - - trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash); - let commit = self.storage.state_db.canonicalize_block(&hash) - .map_err(|e: state_db::Error| client::error::Error::from(format!("State database error: {:?}", e)))?; - apply_state_commit(transaction, commit); - }; - - Ok(()) - } - - fn try_commit_operation(&self, mut operation: BlockImportOperation) - -> Result<(), client::error::Error> - { - let mut transaction = DBTransaction::new(); - let mut finalization_displaced_leaves = None; - - operation.apply_aux(&mut transaction); - - let mut meta_updates = Vec::new(); - let mut last_finalized_hash = self.blockchain.meta.read().finalized_hash; - - if !operation.finalized_blocks.is_empty() { - for (block, justification) in operation.finalized_blocks { - let block_hash = self.blockchain.expect_block_hash_from_id(&block)?; - let block_header = self.blockchain.expect_header(BlockId::Hash(block_hash))?; - - meta_updates.push(self.finalize_block_with_transaction( - &mut transaction, - &block_hash, - &block_header, - Some(last_finalized_hash), - justification, - &mut finalization_displaced_leaves, - )?); - last_finalized_hash = block_hash; - } - } - - let imported = if let Some(pending_block) = operation.pending_block { - let hash = pending_block.header.hash(); - let parent_hash = *pending_block.header.parent_hash(); - let number = pending_block.header.number().clone(); - - // blocks are keyed by number + hash. - let lookup_key = utils::number_and_hash_to_lookup_key(number, hash); - - let (enacted, retracted) = if pending_block.leaf_state.is_best() { - self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))? - } else { - (Default::default(), Default::default()) - }; - - utils::insert_hash_to_key_mapping( - &mut transaction, - columns::KEY_LOOKUP, - number, - hash, - ); - - transaction.put(columns::HEADER, &lookup_key, &pending_block.header.encode()); - if let Some(body) = pending_block.body { - transaction.put(columns::BODY, &lookup_key, &body.encode()); - } - if let Some(justification) = pending_block.justification { - transaction.put(columns::JUSTIFICATION, &lookup_key, &justification.encode()); - } - - if number.is_zero() { - transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &lookup_key); - transaction.put(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); - } - - let mut changeset: state_db::ChangeSet> = state_db::ChangeSet::default(); - for (key, (val, rc)) in operation.db_updates.drain() { - if rc > 0 { - changeset.inserted.push((key, val.to_vec())); - } else if rc < 0 { - changeset.deleted.push(key); - } - } - let number_u64 = number.as_(); - let commit = self.storage.state_db.insert_block(&hash, number_u64, &pending_block.header.parent_hash(), changeset) - .map_err(|e: state_db::Error| client::error::Error::from(format!("State database error: {:?}", e)))?; - apply_state_commit(&mut transaction, commit); - - // Check if need to finalize. Genesis is always finalized instantly. - let finalized = number_u64 == 0 || pending_block.leaf_state.is_final(); - - let header = &pending_block.header; - let is_best = pending_block.leaf_state.is_best(); - let changes_trie_updates = operation.changes_trie_updates; - - self.changes_tries_storage.commit(&mut transaction, changes_trie_updates); - - if finalized { - // TODO: ensure best chain contains this block. - self.ensure_sequential_finalization(header, Some(last_finalized_hash))?; - self.note_finalized( - &mut transaction, - header, - hash, - &mut finalization_displaced_leaves, - )?; - } else { - // canonicalize blocks which are old enough, regardless of finality. - self.force_delayed_canonicalize(&mut transaction, hash, *header.number())? - } - - debug!(target: "db", "DB Commit {:?} ({}), best = {}", hash, number, is_best); - - let displaced_leaf = { - let mut leaves = self.blockchain.leaves.write(); - let displaced_leaf = leaves.import(hash, number, parent_hash); - leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); - - displaced_leaf - }; - - let mut children = children::read_children(&*self.storage.db, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash)?; - children.push(hash); - children::write_children(&mut transaction, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash, children); - - meta_updates.push((hash, number, pending_block.leaf_state.is_best(), finalized)); - - Some((number, hash, enacted, retracted, displaced_leaf, is_best)) - } else { - None - }; - - if let Some(set_head) = operation.set_head { - if let Some(header) = ::client::blockchain::HeaderBackend::header(&self.blockchain, set_head)? { - let number = header.number(); - let hash = header.hash(); - - self.set_head_with_transaction( - &mut transaction, - hash.clone(), - (number.clone(), hash.clone()) - )?; - } else { - return Err(client::error::ErrorKind::UnknownBlock(format!("Cannot set head {:?}", set_head)).into()) - } - } - - let write_result = self.storage.db.write(transaction).map_err(db_err); - - if let Some((number, hash, enacted, retracted, displaced_leaf, is_best)) = imported { - if let Err(e) = write_result { - let mut leaves = self.blockchain.leaves.write(); - let mut undo = leaves.undo(); - if let Some(displaced_leaf) = displaced_leaf { - undo.undo_import(displaced_leaf); - } - - if let Some(finalization_displaced) = finalization_displaced_leaves { - undo.undo_finalization(finalization_displaced); - } - - return Err(e) - } - - operation.old_state.sync_cache( - &enacted, - &retracted, - operation.storage_updates, - Some(hash), - Some(number), - || is_best, - ); - } - - for (hash, number, is_best, is_finalized) in meta_updates { - self.blockchain.update_meta(hash, number, is_best, is_finalized); - } - - Ok(()) - } - - - // write stuff to a transaction after a new block is finalized. - // this canonicalizes finalized blocks. Fails if called with a block which - // was not a child of the last finalized block. - fn note_finalized( - &self, - transaction: &mut DBTransaction, - f_header: &Block::Header, - f_hash: Block::Hash, - displaced: &mut Option>> - ) -> Result<(), client::error::Error> where - Block: BlockT, - { - let f_num = f_header.number().clone(); - - if self.storage.state_db.best_canonical().map(|c| f_num.as_() > c).unwrap_or(true) { - let parent_hash = f_header.parent_hash().clone(); - - let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash.clone()); - transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &lookup_key); - - let commit = self.storage.state_db.canonicalize_block(&f_hash) - .map_err(|e: state_db::Error| client::error::Error::from(format!("State database error: {:?}", e)))?; - apply_state_commit(transaction, commit); - - // read config from genesis, since it is readonly atm - use client::backend::Backend; - let changes_trie_config: Option = self.state_at(BlockId::Hash(parent_hash))? - .storage(well_known_keys::CHANGES_TRIE_CONFIG)? - .and_then(|v| Decode::decode(&mut &*v)); - self.changes_tries_storage.prune(changes_trie_config, transaction, f_hash, f_num); - } - - let new_displaced = self.blockchain.leaves.write().finalize_height(f_num); - match displaced { - x @ &mut None => *x = Some(new_displaced), - &mut Some(ref mut displaced) => displaced.merge(new_displaced), - } - - Ok(()) - } +impl> Backend { + /// Create a new instance of database backend. + /// + /// The pruning window is how old a block must be before the state is pruned. + pub fn new( + config: DatabaseSettings, + canonicalization_delay: u64, + ) -> Result { + let db = open_database(&config, columns::META, "full")?; + + Backend::from_kvdb(db as Arc<_>, config.pruning, canonicalization_delay) + } + + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_test(keep_blocks: u32, canonicalization_delay: u64) -> Self { + use utils::NUM_COLUMNS; + + let db = Arc::new(::kvdb_memorydb::create(NUM_COLUMNS)); + + Backend::from_kvdb( + db as Arc<_>, + PruningMode::keep_blocks(keep_blocks), + canonicalization_delay, + ) + .expect("failed to create test-db") + } + + fn from_kvdb( + db: Arc, + pruning: PruningMode, + canonicalization_delay: u64, + ) -> Result { + let is_archive_pruning = pruning.is_archive(); + let blockchain = BlockchainDb::new(db.clone())?; + let meta = blockchain.meta.clone(); + let map_e = |e: state_db::Error| { + ::client::error::Error::from(format!("State database error: {:?}", e)) + }; + let state_db: StateDb<_, _> = StateDb::new(pruning, &StateMetaDb(&*db)).map_err(map_e)?; + let storage_db = StorageDb { + db: db.clone(), + state_db, + }; + let changes_tries_storage = DbChangesTrieStorage { + db, + meta, + min_blocks_to_keep: if is_archive_pruning { + None + } else { + Some(MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR) + }, + _phantom: Default::default(), + }; + + Ok(Backend { + storage: Arc::new(storage_db), + changes_tries_storage, + blockchain, + canonicalization_delay, + shared_cache: new_shared_cache(STATE_CACHE_SIZE_BYTES), + }) + } + + /// Returns in-memory blockchain that contains the same set of blocks that the self. + #[cfg(feature = "test-helpers")] + pub fn as_in_memory(&self) -> InMemoryBackend { + use client::backend::{Backend as ClientBackend, BlockImportOperation}; + use client::blockchain::Backend as BlockchainBackend; + + let inmem = InMemoryBackend::::new(); + + // get all headers hashes && sort them by number (could be duplicate) + let mut headers: Vec<(NumberFor, Block::Hash, Block::Header)> = Vec::new(); + for (_, header) in self.blockchain.db.iter(columns::HEADER) { + let header = Block::Header::decode(&mut &header[..]).unwrap(); + let hash = header.hash(); + let number = *header.number(); + let pos = headers.binary_search_by(|item| item.0.cmp(&number)); + match pos { + Ok(pos) => headers.insert(pos, (number, hash, header)), + Err(pos) => headers.insert(pos, (number, hash, header)), + } + } + + // insert all other headers + bodies + justifications + let info = self.blockchain.info().unwrap(); + for (number, hash, header) in headers { + let id = BlockId::Hash(hash); + let justification = self.blockchain.justification(id).unwrap(); + let body = self.blockchain.body(id).unwrap(); + let state = self.state_at(id).unwrap().pairs(); + + let new_block_state = if number.is_zero() { + NewBlockState::Final + } else if hash == info.best_hash { + NewBlockState::Best + } else { + NewBlockState::Normal + }; + let mut op = inmem.begin_operation().unwrap(); + op.set_block_data(header, body, justification, new_block_state) + .unwrap(); + op.update_db_storage(state.into_iter().map(|(k, v)| (None, k, Some(v))).collect()) + .unwrap(); + inmem.commit_operation(op).unwrap(); + } + + // and now finalize the best block we have + inmem + .finalize_block(BlockId::Hash(info.finalized_hash), None) + .unwrap(); + + inmem + } + + /// Handle setting head within a transaction. `route_to` should be the last + /// block that existed in the database. `best_to` should be the best block + /// to be set. + /// + /// In the case where the new best block is a block to be imported, `route_to` + /// should be the parent of `best_to`. In the case where we set an existing block + /// to be best, `route_to` should equal to `best_to`. + fn set_head_with_transaction( + &self, + transaction: &mut DBTransaction, + route_to: Block::Hash, + best_to: (NumberFor, Block::Hash), + ) -> Result<(Vec, Vec), client::error::Error> { + let mut enacted = Vec::default(); + let mut retracted = Vec::default(); + + let meta = self.blockchain.meta.read(); + + // cannot find tree route with empty DB. + if meta.best_hash != Default::default() { + let tree_route = ::client::blockchain::tree_route( + &self.blockchain, + BlockId::Hash(meta.best_hash), + BlockId::Hash(route_to), + )?; + + // uncanonicalize: check safety violations and ensure the numbers no longer + // point to these block hashes in the key mapping. + for r in tree_route.retracted() { + if r.hash == meta.finalized_hash { + warn!( + "Potential safety failure: reverting finalized block {:?}", + (&r.number, &r.hash) + ); + + return Err(::client::error::ErrorKind::NotInFinalizedChain.into()); + } + + retracted.push(r.hash.clone()); + utils::remove_number_to_key_mapping(transaction, columns::KEY_LOOKUP, r.number); + } + + // canonicalize: set the number lookup to map to this block's hash. + for e in tree_route.enacted() { + enacted.push(e.hash.clone()); + utils::insert_number_to_key_mapping( + transaction, + columns::KEY_LOOKUP, + e.number, + e.hash, + ); + } + } + + let lookup_key = utils::number_and_hash_to_lookup_key(best_to.0, &best_to.1); + transaction.put(columns::META, meta_keys::BEST_BLOCK, &lookup_key); + utils::insert_number_to_key_mapping(transaction, columns::KEY_LOOKUP, best_to.0, best_to.1); + + Ok((enacted, retracted)) + } + + fn ensure_sequential_finalization( + &self, + header: &Block::Header, + last_finalized: Option, + ) -> Result<(), client::error::Error> { + let last_finalized = + last_finalized.unwrap_or_else(|| self.blockchain.meta.read().finalized_hash); + if *header.parent_hash() != last_finalized { + return Err( + ::client::error::ErrorKind::NonSequentialFinalization(format!( + "Last finalized {:?} not parent of {:?}", + last_finalized, + header.hash() + )) + .into(), + ); + } + Ok(()) + } + + fn finalize_block_with_transaction( + &self, + transaction: &mut DBTransaction, + hash: &Block::Hash, + header: &Block::Header, + last_finalized: Option, + justification: Option, + finalization_displaced: &mut Option>>, + ) -> Result<(Block::Hash, ::Number, bool, bool), client::error::Error> + { + // TODO: ensure best chain contains this block. + let number = *header.number(); + self.ensure_sequential_finalization(header, last_finalized)?; + self.note_finalized(transaction, header, *hash, finalization_displaced)?; + + if let Some(justification) = justification { + transaction.put( + columns::JUSTIFICATION, + &utils::number_and_hash_to_lookup_key(number, hash), + &justification.encode(), + ); + } + Ok((*hash, number, false, true)) + } + + // performs forced canonicaliziation with a delay after importning a non-finalized block. + fn force_delayed_canonicalize( + &self, + transaction: &mut DBTransaction, + hash: Block::Hash, + number: NumberFor, + ) -> Result<(), client::error::Error> { + let number_u64 = number.as_(); + if number_u64 > self.canonicalization_delay { + let new_canonical = number_u64 - self.canonicalization_delay; + + if new_canonical <= self.storage.state_db.best_canonical().unwrap_or(0) { + return Ok(()); + } + + let hash = if new_canonical == number_u64 { + hash + } else { + ::client::blockchain::HeaderBackend::hash(&self.blockchain, As::sa(new_canonical))? + .expect( + "existence of block with number `new_canonical` \ + implies existence of blocks with all numbers before it; qed", + ) + }; + + trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash); + let commit = self.storage.state_db.canonicalize_block(&hash).map_err( + |e: state_db::Error| { + client::error::Error::from(format!("State database error: {:?}", e)) + }, + )?; + apply_state_commit(transaction, commit); + }; + + Ok(()) + } + + fn try_commit_operation( + &self, + mut operation: BlockImportOperation, + ) -> Result<(), client::error::Error> { + let mut transaction = DBTransaction::new(); + let mut finalization_displaced_leaves = None; + + operation.apply_aux(&mut transaction); + + let mut meta_updates = Vec::new(); + let mut last_finalized_hash = self.blockchain.meta.read().finalized_hash; + + if !operation.finalized_blocks.is_empty() { + for (block, justification) in operation.finalized_blocks { + let block_hash = self.blockchain.expect_block_hash_from_id(&block)?; + let block_header = self.blockchain.expect_header(BlockId::Hash(block_hash))?; + + meta_updates.push(self.finalize_block_with_transaction( + &mut transaction, + &block_hash, + &block_header, + Some(last_finalized_hash), + justification, + &mut finalization_displaced_leaves, + )?); + last_finalized_hash = block_hash; + } + } + + let imported = if let Some(pending_block) = operation.pending_block { + let hash = pending_block.header.hash(); + let parent_hash = *pending_block.header.parent_hash(); + let number = pending_block.header.number().clone(); + + // blocks are keyed by number + hash. + let lookup_key = utils::number_and_hash_to_lookup_key(number, hash); + + let (enacted, retracted) = if pending_block.leaf_state.is_best() { + self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))? + } else { + (Default::default(), Default::default()) + }; + + utils::insert_hash_to_key_mapping(&mut transaction, columns::KEY_LOOKUP, number, hash); + + transaction.put(columns::HEADER, &lookup_key, &pending_block.header.encode()); + if let Some(body) = pending_block.body { + transaction.put(columns::BODY, &lookup_key, &body.encode()); + } + if let Some(justification) = pending_block.justification { + transaction.put(columns::JUSTIFICATION, &lookup_key, &justification.encode()); + } + + if number.is_zero() { + transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &lookup_key); + transaction.put(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); + } + + let mut changeset: state_db::ChangeSet> = state_db::ChangeSet::default(); + for (key, (val, rc)) in operation.db_updates.drain() { + if rc > 0 { + changeset.inserted.push((key, val.to_vec())); + } else if rc < 0 { + changeset.deleted.push(key); + } + } + let number_u64 = number.as_(); + let commit = self + .storage + .state_db + .insert_block( + &hash, + number_u64, + &pending_block.header.parent_hash(), + changeset, + ) + .map_err(|e: state_db::Error| { + client::error::Error::from(format!("State database error: {:?}", e)) + })?; + apply_state_commit(&mut transaction, commit); + + // Check if need to finalize. Genesis is always finalized instantly. + let finalized = number_u64 == 0 || pending_block.leaf_state.is_final(); + + let header = &pending_block.header; + let is_best = pending_block.leaf_state.is_best(); + let changes_trie_updates = operation.changes_trie_updates; + + self.changes_tries_storage + .commit(&mut transaction, changes_trie_updates); + + if finalized { + // TODO: ensure best chain contains this block. + self.ensure_sequential_finalization(header, Some(last_finalized_hash))?; + self.note_finalized( + &mut transaction, + header, + hash, + &mut finalization_displaced_leaves, + )?; + } else { + // canonicalize blocks which are old enough, regardless of finality. + self.force_delayed_canonicalize(&mut transaction, hash, *header.number())? + } + + debug!(target: "db", "DB Commit {:?} ({}), best = {}", hash, number, is_best); + + let displaced_leaf = { + let mut leaves = self.blockchain.leaves.write(); + let displaced_leaf = leaves.import(hash, number, parent_hash); + leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); + + displaced_leaf + }; + + let mut children = children::read_children( + &*self.storage.db, + columns::META, + meta_keys::CHILDREN_PREFIX, + parent_hash, + )?; + children.push(hash); + children::write_children( + &mut transaction, + columns::META, + meta_keys::CHILDREN_PREFIX, + parent_hash, + children, + ); + + meta_updates.push((hash, number, pending_block.leaf_state.is_best(), finalized)); + + Some((number, hash, enacted, retracted, displaced_leaf, is_best)) + } else { + None + }; + + if let Some(set_head) = operation.set_head { + if let Some(header) = + ::client::blockchain::HeaderBackend::header(&self.blockchain, set_head)? + { + let number = header.number(); + let hash = header.hash(); + + self.set_head_with_transaction( + &mut transaction, + hash.clone(), + (number.clone(), hash.clone()), + )?; + } else { + return Err(client::error::ErrorKind::UnknownBlock(format!( + "Cannot set head {:?}", + set_head + )) + .into()); + } + } + + let write_result = self.storage.db.write(transaction).map_err(db_err); + + if let Some((number, hash, enacted, retracted, displaced_leaf, is_best)) = imported { + if let Err(e) = write_result { + let mut leaves = self.blockchain.leaves.write(); + let mut undo = leaves.undo(); + if let Some(displaced_leaf) = displaced_leaf { + undo.undo_import(displaced_leaf); + } + + if let Some(finalization_displaced) = finalization_displaced_leaves { + undo.undo_finalization(finalization_displaced); + } + + return Err(e); + } + + operation.old_state.sync_cache( + &enacted, + &retracted, + operation.storage_updates, + Some(hash), + Some(number), + || is_best, + ); + } + + for (hash, number, is_best, is_finalized) in meta_updates { + self.blockchain + .update_meta(hash, number, is_best, is_finalized); + } + + Ok(()) + } + + // write stuff to a transaction after a new block is finalized. + // this canonicalizes finalized blocks. Fails if called with a block which + // was not a child of the last finalized block. + fn note_finalized( + &self, + transaction: &mut DBTransaction, + f_header: &Block::Header, + f_hash: Block::Hash, + displaced: &mut Option>>, + ) -> Result<(), client::error::Error> + where + Block: BlockT, + { + let f_num = f_header.number().clone(); + + if self + .storage + .state_db + .best_canonical() + .map(|c| f_num.as_() > c) + .unwrap_or(true) + { + let parent_hash = f_header.parent_hash().clone(); + + let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash.clone()); + transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &lookup_key); + + let commit = self.storage.state_db.canonicalize_block(&f_hash).map_err( + |e: state_db::Error| { + client::error::Error::from(format!("State database error: {:?}", e)) + }, + )?; + apply_state_commit(transaction, commit); + + // read config from genesis, since it is readonly atm + use client::backend::Backend; + let changes_trie_config: Option = self + .state_at(BlockId::Hash(parent_hash))? + .storage(well_known_keys::CHANGES_TRIE_CONFIG)? + .and_then(|v| Decode::decode(&mut &*v)); + self.changes_tries_storage + .prune(changes_trie_config, transaction, f_hash, f_num); + } + + let new_displaced = self.blockchain.leaves.write().finalize_height(f_num); + match displaced { + x @ &mut None => *x = Some(new_displaced), + &mut Some(ref mut displaced) => displaced.merge(new_displaced), + } + + Ok(()) + } } fn apply_state_commit(transaction: &mut DBTransaction, commit: state_db::CommitSet>) { - for (key, val) in commit.data.inserted.into_iter() { - transaction.put(columns::STATE, &key[..], &val); - } - for key in commit.data.deleted.into_iter() { - transaction.delete(columns::STATE, &key[..]); - } - for (key, val) in commit.meta.inserted.into_iter() { - transaction.put(columns::STATE_META, &key[..], &val); - } - for key in commit.meta.deleted.into_iter() { - transaction.delete(columns::STATE_META, &key[..]); - } + for (key, val) in commit.data.inserted.into_iter() { + transaction.put(columns::STATE, &key[..], &val); + } + for key in commit.data.deleted.into_iter() { + transaction.delete(columns::STATE, &key[..]); + } + for (key, val) in commit.meta.inserted.into_iter() { + transaction.put(columns::STATE_META, &key[..], &val); + } + for key in commit.meta.deleted.into_iter() { + transaction.delete(columns::STATE_META, &key[..]); + } } -impl client::backend::AuxStore for Backend where Block: BlockT { - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> client::error::Result<()> { - let mut transaction = DBTransaction::new(); - for (k, v) in insert { - transaction.put(columns::AUX, k, v); - } - for k in delete { - transaction.delete(columns::AUX, k); - } - self.storage.db.write(transaction).map_err(db_err)?; - Ok(()) - } - - fn get_aux(&self, key: &[u8]) -> Result>, client::error::Error> { - Ok(self.storage.db.get(columns::AUX, key).map(|r| r.map(|v| v.to_vec())).map_err(db_err)?) - } +impl client::backend::AuxStore for Backend +where + Block: BlockT, +{ + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> client::error::Result<()> { + let mut transaction = DBTransaction::new(); + for (k, v) in insert { + transaction.put(columns::AUX, k, v); + } + for k in delete { + transaction.delete(columns::AUX, k); + } + self.storage.db.write(transaction).map_err(db_err)?; + Ok(()) + } + + fn get_aux(&self, key: &[u8]) -> Result>, client::error::Error> { + Ok(self + .storage + .db + .get(columns::AUX, key) + .map(|r| r.map(|v| v.to_vec())) + .map_err(db_err)?) + } } -impl client::backend::Backend for Backend where Block: BlockT { - type BlockImportOperation = BlockImportOperation; - type Blockchain = BlockchainDb; - type State = CachingState; - type ChangesTrieStorage = DbChangesTrieStorage; - - fn begin_operation(&self) -> Result { - let old_state = self.state_at(BlockId::Hash(Default::default()))?; - Ok(BlockImportOperation { - pending_block: None, - old_state, - db_updates: PrefixedMemoryDB::default(), - storage_updates: Default::default(), - changes_trie_updates: MemoryDB::default(), - aux_ops: Vec::new(), - finalized_blocks: Vec::new(), - set_head: None, - }) - } - - fn begin_state_operation(&self, operation: &mut Self::BlockImportOperation, block: BlockId) -> Result<(), client::error::Error> { - operation.old_state = self.state_at(block)?; - Ok(()) - } - - fn commit_operation(&self, operation: Self::BlockImportOperation) - -> Result<(), client::error::Error> - { - match self.try_commit_operation(operation) { - Ok(_) => { - self.storage.state_db.apply_pending(); - Ok(()) - }, - e @ Err(_) => { - self.storage.state_db.revert_pending(); - e - } - } - } - - fn finalize_block(&self, block: BlockId, justification: Option) - -> Result<(), client::error::Error> - { - let mut transaction = DBTransaction::new(); - let hash = self.blockchain.expect_block_hash_from_id(&block)?; - let header = self.blockchain.expect_header(block)?; - let mut displaced = None; - let commit = |displaced| { - let (hash, number, is_best, is_finalized) = self.finalize_block_with_transaction( - &mut transaction, - &hash, - &header, - None, - justification, - displaced, - )?; - self.storage.db.write(transaction).map_err(db_err)?; - self.blockchain.update_meta(hash, number, is_best, is_finalized); - Ok(()) - }; - match commit(&mut displaced) { - Ok(()) => self.storage.state_db.apply_pending(), - e @ Err(_) => { - self.storage.state_db.revert_pending(); - if let Some(displaced) = displaced { - self.blockchain.leaves.write().undo().undo_finalization(displaced); - } - return e; - } - } - Ok(()) - } - - fn changes_trie_storage(&self) -> Option<&Self::ChangesTrieStorage> { - Some(&self.changes_tries_storage) - } - - fn revert(&self, n: NumberFor) -> Result, client::error::Error> { - use client::blockchain::HeaderBackend; - - let mut best = self.blockchain.info()?.best_number; - let finalized = self.blockchain.info()?.finalized_number; - let revertible = best - finalized; - let n = if revertible < n { revertible } else { n }; - - for c in 0 .. n.as_() { - if best == As::sa(0) { - return Ok(As::sa(c)) - } - let mut transaction = DBTransaction::new(); - match self.storage.state_db.revert_one() { - Some(commit) => { - apply_state_commit(&mut transaction, commit); - let removed = self.blockchain.header(BlockId::Number(best))?.ok_or_else( - || client::error::ErrorKind::UnknownBlock( - format!("Error reverting to {}. Block hash not found.", best)))?; - - best -= As::sa(1); // prev block - let hash = self.blockchain.hash(best)?.ok_or_else( - || client::error::ErrorKind::UnknownBlock( - format!("Error reverting to {}. Block hash not found.", best)))?; - let key = utils::number_and_hash_to_lookup_key(best.clone(), &hash); - transaction.put(columns::META, meta_keys::BEST_BLOCK, &key); - transaction.delete(columns::KEY_LOOKUP, removed.hash().as_ref()); - children::remove_children(&mut transaction, columns::META, meta_keys::CHILDREN_PREFIX, hash); - self.storage.db.write(transaction).map_err(db_err)?; - self.blockchain.update_meta(hash, best, true, false); - self.blockchain.leaves.write().revert(removed.hash().clone(), removed.number().clone(), removed.parent_hash().clone()); - } - None => return Ok(As::sa(c)) - } - } - Ok(n) - } - - fn blockchain(&self) -> &BlockchainDb { - &self.blockchain - } - - fn state_at(&self, block: BlockId) -> Result { - use client::blockchain::HeaderBackend as BcHeaderBackend; - - // special case for genesis initialization - match block { - BlockId::Hash(h) if h == Default::default() => { - let genesis_storage = DbGenesisStorage::new(); - let root = genesis_storage.0.clone(); - let state = DbState::new(Arc::new(genesis_storage), root); - return Ok(CachingState::new(state, self.shared_cache.clone(), None)); - }, - _ => {} - } - - match self.blockchain.header(block) { - Ok(Some(ref hdr)) => { - let hash = hdr.hash(); - if !self.storage.state_db.is_pruned(&hash, hdr.number().as_()) { - let root = H256::from_slice(hdr.state_root().as_ref()); - let state = DbState::new(self.storage.clone(), root); - Ok(CachingState::new(state, self.shared_cache.clone(), Some(hash))) - } else { - Err(client::error::ErrorKind::UnknownBlock(format!("State already discarded for {:?}", block)).into()) - } - }, - Ok(None) => Err(client::error::ErrorKind::UnknownBlock(format!("Unknown state for block {:?}", block)).into()), - Err(e) => Err(e), - } - } - - fn have_state_at(&self, hash: &Block::Hash, number: NumberFor) -> bool { - !self.storage.state_db.is_pruned(hash, number.as_()) - } - - fn destroy_state(&self, mut state: Self::State) -> Result<(), client::error::Error> { - if let Some(hash) = state.parent_hash.clone() { - let is_best = || self.blockchain.meta.read().best_hash == hash; - state.sync_cache(&[], &[], vec![], None, None, is_best); - } - Ok(()) - } +impl client::backend::Backend for Backend +where + Block: BlockT, +{ + type BlockImportOperation = BlockImportOperation; + type Blockchain = BlockchainDb; + type State = CachingState; + type ChangesTrieStorage = DbChangesTrieStorage; + + fn begin_operation(&self) -> Result { + let old_state = self.state_at(BlockId::Hash(Default::default()))?; + Ok(BlockImportOperation { + pending_block: None, + old_state, + db_updates: PrefixedMemoryDB::default(), + storage_updates: Default::default(), + changes_trie_updates: MemoryDB::default(), + aux_ops: Vec::new(), + finalized_blocks: Vec::new(), + set_head: None, + }) + } + + fn begin_state_operation( + &self, + operation: &mut Self::BlockImportOperation, + block: BlockId, + ) -> Result<(), client::error::Error> { + operation.old_state = self.state_at(block)?; + Ok(()) + } + + fn commit_operation( + &self, + operation: Self::BlockImportOperation, + ) -> Result<(), client::error::Error> { + match self.try_commit_operation(operation) { + Ok(_) => { + self.storage.state_db.apply_pending(); + Ok(()) + } + e @ Err(_) => { + self.storage.state_db.revert_pending(); + e + } + } + } + + fn finalize_block( + &self, + block: BlockId, + justification: Option, + ) -> Result<(), client::error::Error> { + let mut transaction = DBTransaction::new(); + let hash = self.blockchain.expect_block_hash_from_id(&block)?; + let header = self.blockchain.expect_header(block)?; + let mut displaced = None; + let commit = |displaced| { + let (hash, number, is_best, is_finalized) = self.finalize_block_with_transaction( + &mut transaction, + &hash, + &header, + None, + justification, + displaced, + )?; + self.storage.db.write(transaction).map_err(db_err)?; + self.blockchain + .update_meta(hash, number, is_best, is_finalized); + Ok(()) + }; + match commit(&mut displaced) { + Ok(()) => self.storage.state_db.apply_pending(), + e @ Err(_) => { + self.storage.state_db.revert_pending(); + if let Some(displaced) = displaced { + self.blockchain + .leaves + .write() + .undo() + .undo_finalization(displaced); + } + return e; + } + } + Ok(()) + } + + fn changes_trie_storage(&self) -> Option<&Self::ChangesTrieStorage> { + Some(&self.changes_tries_storage) + } + + fn revert(&self, n: NumberFor) -> Result, client::error::Error> { + use client::blockchain::HeaderBackend; + + let mut best = self.blockchain.info()?.best_number; + let finalized = self.blockchain.info()?.finalized_number; + let revertible = best - finalized; + let n = if revertible < n { revertible } else { n }; + + for c in 0..n.as_() { + if best == As::sa(0) { + return Ok(As::sa(c)); + } + let mut transaction = DBTransaction::new(); + match self.storage.state_db.revert_one() { + Some(commit) => { + apply_state_commit(&mut transaction, commit); + let removed = + self.blockchain + .header(BlockId::Number(best))? + .ok_or_else(|| { + client::error::ErrorKind::UnknownBlock(format!( + "Error reverting to {}. Block hash not found.", + best + )) + })?; + + best -= As::sa(1); // prev block + let hash = self.blockchain.hash(best)?.ok_or_else(|| { + client::error::ErrorKind::UnknownBlock(format!( + "Error reverting to {}. Block hash not found.", + best + )) + })?; + let key = utils::number_and_hash_to_lookup_key(best.clone(), &hash); + transaction.put(columns::META, meta_keys::BEST_BLOCK, &key); + transaction.delete(columns::KEY_LOOKUP, removed.hash().as_ref()); + children::remove_children( + &mut transaction, + columns::META, + meta_keys::CHILDREN_PREFIX, + hash, + ); + self.storage.db.write(transaction).map_err(db_err)?; + self.blockchain.update_meta(hash, best, true, false); + self.blockchain.leaves.write().revert( + removed.hash().clone(), + removed.number().clone(), + removed.parent_hash().clone(), + ); + } + None => return Ok(As::sa(c)), + } + } + Ok(n) + } + + fn blockchain(&self) -> &BlockchainDb { + &self.blockchain + } + + fn state_at(&self, block: BlockId) -> Result { + use client::blockchain::HeaderBackend as BcHeaderBackend; + + // special case for genesis initialization + match block { + BlockId::Hash(h) if h == Default::default() => { + let genesis_storage = DbGenesisStorage::new(); + let root = genesis_storage.0.clone(); + let state = DbState::new(Arc::new(genesis_storage), root); + return Ok(CachingState::new(state, self.shared_cache.clone(), None)); + } + _ => {} + } + + match self.blockchain.header(block) { + Ok(Some(ref hdr)) => { + let hash = hdr.hash(); + if !self.storage.state_db.is_pruned(&hash, hdr.number().as_()) { + let root = H256::from_slice(hdr.state_root().as_ref()); + let state = DbState::new(self.storage.clone(), root); + Ok(CachingState::new( + state, + self.shared_cache.clone(), + Some(hash), + )) + } else { + Err(client::error::ErrorKind::UnknownBlock(format!( + "State already discarded for {:?}", + block + )) + .into()) + } + } + Ok(None) => Err(client::error::ErrorKind::UnknownBlock(format!( + "Unknown state for block {:?}", + block + )) + .into()), + Err(e) => Err(e), + } + } + + fn have_state_at(&self, hash: &Block::Hash, number: NumberFor) -> bool { + !self.storage.state_db.is_pruned(hash, number.as_()) + } + + fn destroy_state(&self, mut state: Self::State) -> Result<(), client::error::Error> { + if let Some(hash) = state.parent_hash.clone() { + let is_best = || self.blockchain.meta.read().best_hash == hash; + state.sync_cache(&[], &[], vec![], None, None, is_best); + } + Ok(()) + } } -impl client::backend::LocalBackend for Backend -where Block: BlockT {} +impl client::backend::LocalBackend for Backend where + Block: BlockT +{ +} #[cfg(test)] mod tests { - use hash_db::HashDB; - use super::*; - use crate::columns; - use client::backend::Backend as BTrait; - use client::blockchain::Backend as BLBTrait; - use client::backend::BlockImportOperation as Op; - use runtime_primitives::testing::{Header, Block as RawBlock, ExtrinsicWrapper}; - use runtime_primitives::traits::{Hash, BlakeTwo256}; - use state_machine::{TrieMut, TrieDBMut, ChangesTrieRootsStorage, ChangesTrieStorage}; - use test_client; - - type Block = RawBlock>; - - fn prepare_changes(changes: Vec<(Vec, Vec)>) -> (H256, MemoryDB) { - let mut changes_root = H256::default(); - let mut changes_trie_update = MemoryDB::::default(); - { - let mut trie = TrieDBMut::::new( - &mut changes_trie_update, - &mut changes_root - ); - for (key, value) in changes { - trie.insert(&key, &value).unwrap(); - } - } - - (changes_root, changes_trie_update) - } - - fn insert_header( - backend: &Backend, - number: u64, - parent_hash: H256, - changes: Vec<(Vec, Vec)>, - extrinsics_root: H256, - ) -> H256 { - use runtime_primitives::generic::DigestItem; - use runtime_primitives::testing::Digest; - - let (changes_root, changes_trie_update) = prepare_changes(changes); - let digest = Digest { - logs: vec![ - DigestItem::ChangesTrieRoot(changes_root), - ], - }; - let header = Header { - number, - parent_hash, - state_root: BlakeTwo256::trie_root::<_, &[u8], &[u8]>(Vec::new()), - digest, - extrinsics_root, - }; - let header_hash = header.hash(); - - let block_id = if number == 0 { - BlockId::Hash(Default::default()) - } else { - BlockId::Number(number - 1) - }; - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block_id).unwrap(); - op.set_block_data(header, None, None, NewBlockState::Best).unwrap(); - op.update_changes_trie(changes_trie_update).unwrap(); - backend.commit_operation(op).unwrap(); - - header_hash - } - - #[test] - fn block_hash_inserted_correctly() { - let backing = { - let db = Backend::::new_test(1, 0); - for i in 0..10 { - assert!(db.blockchain().hash(i).unwrap().is_none()); - - { - let id = if i == 0 { - BlockId::Hash(Default::default()) - } else { - BlockId::Number(i - 1) - }; - - let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, id).unwrap(); - let header = Header { - number: i, - parent_hash: if i == 0 { - Default::default() - } else { - db.blockchain.hash(i - 1).unwrap().unwrap() - }, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - op.set_block_data( - header, - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); - db.commit_operation(op).unwrap(); - } - - assert!(db.blockchain().hash(i).unwrap().is_some()) - } - db.storage.db.clone() - }; - - let backend = Backend::::from_kvdb(backing, PruningMode::keep_blocks(1), 0).unwrap(); - assert_eq!(backend.blockchain().info().unwrap().best_number, 9); - for i in 0..10 { - assert!(backend.blockchain().hash(i).unwrap().is_some()) - } - } - - #[test] - fn set_state_data() { - let db = Backend::::new_test(2, 0); - let hash = { - let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, BlockId::Hash(Default::default())).unwrap(); - let mut header = Header { - number: 0, - parent_hash: Default::default(), - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let storage = vec![ - (vec![1, 3, 5], vec![2, 4, 6]), - (vec![1, 2, 3], vec![9, 9, 9]), - ]; - - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))) - ).0.into(); - let hash = header.hash(); - - op.reset_storage(storage.iter().cloned().collect(), Default::default()).unwrap(); - op.set_block_data( - header.clone(), - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); - - db.commit_operation(op).unwrap(); - - let state = db.state_at(BlockId::Number(0)).unwrap(); - - assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6])); - assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); - assert_eq!(state.storage(&[5, 5, 5]).unwrap(), None); - - hash - }; - - { - let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, BlockId::Number(0)).unwrap(); - let mut header = Header { - number: 1, - parent_hash: hash, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let storage = vec![ - (vec![1, 3, 5], None), - (vec![5, 5, 5], Some(vec![4, 5, 6])), - ]; - - let (root, overlay) = op.old_state.storage_root(storage.iter().cloned()); - op.update_db_storage(overlay).unwrap(); - header.state_root = root.into(); - - op.set_block_data( - header, - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); - - db.commit_operation(op).unwrap(); - - let state = db.state_at(BlockId::Number(1)).unwrap(); - - assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None); - assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); - assert_eq!(state.storage(&[5, 5, 5]).unwrap(), Some(vec![4, 5, 6])); - } - } - - #[test] - fn delete_only_when_negative_rc() { - let _ = ::env_logger::try_init(); - let key; - let backend = Backend::::new_test(1, 0); - - let hash = { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(Default::default())).unwrap(); - let mut header = Header { - number: 0, - parent_hash: Default::default(), - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let storage: Vec<(_, _)> = vec![]; - - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))) - ).0.into(); - let hash = header.hash(); - - op.reset_storage(storage.iter().cloned().collect(), Default::default()).unwrap(); - - key = op.db_updates.insert(&[], b"hello"); - op.set_block_data( - header, - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); - - backend.commit_operation(op).unwrap(); - - assert_eq!(backend.storage.db.get(columns::STATE, key.as_bytes()).unwrap().unwrap(), &b"hello"[..]); - hash - }; - - let hash = { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Number(0)).unwrap(); - let mut header = Header { - number: 1, - parent_hash: hash, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let storage: Vec<(_, _)> = vec![]; - - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))) - ).0.into(); - let hash = header.hash(); - - op.db_updates.insert(&[], b"hello"); - op.db_updates.remove(&key, &[]); - op.set_block_data( - header, - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); - - backend.commit_operation(op).unwrap(); - - assert_eq!(backend.storage.db.get(columns::STATE, key.as_bytes()).unwrap().unwrap(), &b"hello"[..]); - hash - }; - - let hash = { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Number(1)).unwrap(); - let mut header = Header { - number: 2, - parent_hash: hash, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let storage: Vec<(_, _)> = vec![]; - - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))) - ).0.into(); - let hash = header.hash(); - - op.db_updates.remove(&key, &[]); - op.set_block_data( - header, - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); - - backend.commit_operation(op).unwrap(); - - assert!(backend.storage.db.get(columns::STATE, key.as_bytes()).unwrap().is_some()); - hash - }; - - { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Number(2)).unwrap(); - let mut header = Header { - number: 3, - parent_hash: hash, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let storage: Vec<(_, _)> = vec![]; - - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))) - ).0.into(); - - op.set_block_data( - header, - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); - - backend.commit_operation(op).unwrap(); - - assert!(backend.storage.db.get(columns::STATE, key.as_bytes()).unwrap().is_none()); - } - - backend.finalize_block(BlockId::Number(1), None).unwrap(); - backend.finalize_block(BlockId::Number(2), None).unwrap(); - backend.finalize_block(BlockId::Number(3), None).unwrap(); - assert!(backend.storage.db.get(columns::STATE, key.as_bytes()).unwrap().is_none()); - } - - #[test] - fn changes_trie_storage_works() { - let backend = Backend::::new_test(1000, 100); - backend.changes_tries_storage.meta.write().finalized_number = 1000; - - - let check_changes = |backend: &Backend, block: u64, changes: Vec<(Vec, Vec)>| { - let (changes_root, mut changes_trie_update) = prepare_changes(changes); - let anchor = state_machine::ChangesTrieAnchorBlockId { - hash: backend.blockchain().header(BlockId::Number(block)).unwrap().unwrap().hash(), - number: block - }; - assert_eq!(backend.changes_tries_storage.root(&anchor, block), Ok(Some(changes_root))); - - for (key, (val, _)) in changes_trie_update.drain() { - assert_eq!(backend.changes_trie_storage().unwrap().get(&key, &[]), Ok(Some(val))); - } - }; - - let changes0 = vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())]; - let changes1 = vec![ - (b"key_at_1".to_vec(), b"val_at_1".to_vec()), - (b"another_key_at_1".to_vec(), b"another_val_at_1".to_vec()), - ]; - let changes2 = vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())]; - - let block0 = insert_header(&backend, 0, Default::default(), changes0.clone(), Default::default()); - let block1 = insert_header(&backend, 1, block0, changes1.clone(), Default::default()); - let _ = insert_header(&backend, 2, block1, changes2.clone(), Default::default()); - - // check that the storage contains tries for all blocks - check_changes(&backend, 0, changes0); - check_changes(&backend, 1, changes1); - check_changes(&backend, 2, changes2); - } - - #[test] - fn changes_trie_storage_works_with_forks() { - let backend = Backend::::new_test(1000, 100); - - let changes0 = vec![(b"k0".to_vec(), b"v0".to_vec())]; - let changes1 = vec![(b"k1".to_vec(), b"v1".to_vec())]; - let changes2 = vec![(b"k2".to_vec(), b"v2".to_vec())]; - let block0 = insert_header(&backend, 0, Default::default(), changes0.clone(), Default::default()); - let block1 = insert_header(&backend, 1, block0, changes1.clone(), Default::default()); - let block2 = insert_header(&backend, 2, block1, changes2.clone(), Default::default()); - - let changes2_1_0 = vec![(b"k3".to_vec(), b"v3".to_vec())]; - let changes2_1_1 = vec![(b"k4".to_vec(), b"v4".to_vec())]; - let block2_1_0 = insert_header(&backend, 3, block2, changes2_1_0.clone(), Default::default()); - let block2_1_1 = insert_header(&backend, 4, block2_1_0, changes2_1_1.clone(), Default::default()); - - let changes2_2_0 = vec![(b"k5".to_vec(), b"v5".to_vec())]; - let changes2_2_1 = vec![(b"k6".to_vec(), b"v6".to_vec())]; - let block2_2_0 = insert_header(&backend, 3, block2, changes2_2_0.clone(), Default::default()); - let block2_2_1 = insert_header(&backend, 4, block2_2_0, changes2_2_1.clone(), Default::default()); - - // finalize block1 - backend.changes_tries_storage.meta.write().finalized_number = 1; - - // branch1: when asking for finalized block hash - let (changes1_root, _) = prepare_changes(changes1); - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root))); - - // branch2: when asking for finalized block hash - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root))); - - // branch1: when asking for non-finalized block hash (search by traversal) - let (changes2_1_0_root, _) = prepare_changes(changes2_1_0); - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_1_0_root))); - - // branch2: when asking for non-finalized block hash (search using canonicalized hint) - let (changes2_2_0_root, _) = prepare_changes(changes2_2_0); - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - - // finalize first block of branch2 (block2_2_0) - backend.changes_tries_storage.meta.write().finalized_number = 3; - - // branch2: when asking for finalized block of this branch - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - - // branch1: when asking for finalized block of other branch - // => result is incorrect (returned for the block of branch1), but this is expected, - // because the other fork is abandoned (forked before finalized header) - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - } - - #[test] - fn changes_tries_with_digest_are_pruned_on_finalization() { - let mut backend = Backend::::new_test(1000, 100); - backend.changes_tries_storage.min_blocks_to_keep = Some(8); - let config = ChangesTrieConfiguration { - digest_interval: 2, - digest_levels: 2, - }; - - // insert some blocks - let block0 = insert_header(&backend, 0, Default::default(), vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())], Default::default()); - let block1 = insert_header(&backend, 1, block0, vec![(b"key_at_1".to_vec(), b"val_at_1".to_vec())], Default::default()); - let block2 = insert_header(&backend, 2, block1, vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())], Default::default()); - let block3 = insert_header(&backend, 3, block2, vec![(b"key_at_3".to_vec(), b"val_at_3".to_vec())], Default::default()); - let block4 = insert_header(&backend, 4, block3, vec![(b"key_at_4".to_vec(), b"val_at_4".to_vec())], Default::default()); - let block5 = insert_header(&backend, 5, block4, vec![(b"key_at_5".to_vec(), b"val_at_5".to_vec())], Default::default()); - let block6 = insert_header(&backend, 6, block5, vec![(b"key_at_6".to_vec(), b"val_at_6".to_vec())], Default::default()); - let block7 = insert_header(&backend, 7, block6, vec![(b"key_at_7".to_vec(), b"val_at_7".to_vec())], Default::default()); - let block8 = insert_header(&backend, 8, block7, vec![(b"key_at_8".to_vec(), b"val_at_8".to_vec())], Default::default()); - let block9 = insert_header(&backend, 9, block8, vec![(b"key_at_9".to_vec(), b"val_at_9".to_vec())], Default::default()); - let block10 = insert_header(&backend, 10, block9, vec![(b"key_at_10".to_vec(), b"val_at_10".to_vec())], Default::default()); - let block11 = insert_header(&backend, 11, block10, vec![(b"key_at_11".to_vec(), b"val_at_11".to_vec())], Default::default()); - let block12 = insert_header(&backend, 12, block11, vec![(b"key_at_12".to_vec(), b"val_at_12".to_vec())], Default::default()); - let block13 = insert_header(&backend, 13, block12, vec![(b"key_at_13".to_vec(), b"val_at_13".to_vec())], Default::default()); - backend.changes_tries_storage.meta.write().finalized_number = 13; - - // check that roots of all tries are in the columns::CHANGES_TRIE - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block13, number: 13 }; - fn read_changes_trie_root(backend: &Backend, num: u64) -> H256 { - backend.blockchain().header(BlockId::Number(num)).unwrap().unwrap().digest().logs().iter() - .find(|i| i.as_changes_trie_root().is_some()).unwrap().as_changes_trie_root().unwrap().clone() - } - let root1 = read_changes_trie_root(&backend, 1); assert_eq!(backend.changes_tries_storage.root(&anchor, 1).unwrap(), Some(root1)); - let root2 = read_changes_trie_root(&backend, 2); assert_eq!(backend.changes_tries_storage.root(&anchor, 2).unwrap(), Some(root2)); - let root3 = read_changes_trie_root(&backend, 3); assert_eq!(backend.changes_tries_storage.root(&anchor, 3).unwrap(), Some(root3)); - let root4 = read_changes_trie_root(&backend, 4); assert_eq!(backend.changes_tries_storage.root(&anchor, 4).unwrap(), Some(root4)); - let root5 = read_changes_trie_root(&backend, 5); assert_eq!(backend.changes_tries_storage.root(&anchor, 5).unwrap(), Some(root5)); - let root6 = read_changes_trie_root(&backend, 6); assert_eq!(backend.changes_tries_storage.root(&anchor, 6).unwrap(), Some(root6)); - let root7 = read_changes_trie_root(&backend, 7); assert_eq!(backend.changes_tries_storage.root(&anchor, 7).unwrap(), Some(root7)); - let root8 = read_changes_trie_root(&backend, 8); assert_eq!(backend.changes_tries_storage.root(&anchor, 8).unwrap(), Some(root8)); - let root9 = read_changes_trie_root(&backend, 9); assert_eq!(backend.changes_tries_storage.root(&anchor, 9).unwrap(), Some(root9)); - let root10 = read_changes_trie_root(&backend, 10); assert_eq!(backend.changes_tries_storage.root(&anchor, 10).unwrap(), Some(root10)); - let root11 = read_changes_trie_root(&backend, 11); assert_eq!(backend.changes_tries_storage.root(&anchor, 11).unwrap(), Some(root11)); - let root12 = read_changes_trie_root(&backend, 12); assert_eq!(backend.changes_tries_storage.root(&anchor, 12).unwrap(), Some(root12)); - - // now simulate finalization of block#12, causing prune of tries at #1..#4 - let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(Some(config.clone()), &mut tx, Default::default(), 12); - backend.storage.db.write(tx).unwrap(); - assert!(backend.changes_tries_storage.get(&root1, &[]).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root2, &[]).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root3, &[]).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root4, &[]).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root5, &[]).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root6, &[]).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root7, &[]).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root8, &[]).unwrap().is_some()); - - // now simulate finalization of block#16, causing prune of tries at #5..#8 - let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(Some(config.clone()), &mut tx, Default::default(), 16); - backend.storage.db.write(tx).unwrap(); - assert!(backend.changes_tries_storage.get(&root5, &[]).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root6, &[]).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root7, &[]).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root8, &[]).unwrap().is_none()); - - // now "change" pruning mode to archive && simulate finalization of block#20 - // => no changes tries are pruned, because we never prune in archive mode - backend.changes_tries_storage.min_blocks_to_keep = None; - let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(Some(config), &mut tx, Default::default(), 20); - backend.storage.db.write(tx).unwrap(); - assert!(backend.changes_tries_storage.get(&root9, &[]).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root10, &[]).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root11, &[]).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root12, &[]).unwrap().is_some()); - } - - #[test] - fn changes_tries_without_digest_are_pruned_on_finalization() { - let mut backend = Backend::::new_test(1000, 100); - backend.changes_tries_storage.min_blocks_to_keep = Some(4); - let config = ChangesTrieConfiguration { - digest_interval: 0, - digest_levels: 0, - }; - - // insert some blocks - let block0 = insert_header(&backend, 0, Default::default(), vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())], Default::default()); - let block1 = insert_header(&backend, 1, block0, vec![(b"key_at_1".to_vec(), b"val_at_1".to_vec())], Default::default()); - let block2 = insert_header(&backend, 2, block1, vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())], Default::default()); - let block3 = insert_header(&backend, 3, block2, vec![(b"key_at_3".to_vec(), b"val_at_3".to_vec())], Default::default()); - let block4 = insert_header(&backend, 4, block3, vec![(b"key_at_4".to_vec(), b"val_at_4".to_vec())], Default::default()); - let block5 = insert_header(&backend, 5, block4, vec![(b"key_at_5".to_vec(), b"val_at_5".to_vec())], Default::default()); - let block6 = insert_header(&backend, 6, block5, vec![(b"key_at_6".to_vec(), b"val_at_6".to_vec())], Default::default()); - - // check that roots of all tries are in the columns::CHANGES_TRIE - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block6, number: 6 }; - fn read_changes_trie_root(backend: &Backend, num: u64) -> H256 { - backend.blockchain().header(BlockId::Number(num)).unwrap().unwrap().digest().logs().iter() - .find(|i| i.as_changes_trie_root().is_some()).unwrap().as_changes_trie_root().unwrap().clone() - } - - let root1 = read_changes_trie_root(&backend, 1); assert_eq!(backend.changes_tries_storage.root(&anchor, 1).unwrap(), Some(root1)); - let root2 = read_changes_trie_root(&backend, 2); assert_eq!(backend.changes_tries_storage.root(&anchor, 2).unwrap(), Some(root2)); - let root3 = read_changes_trie_root(&backend, 3); assert_eq!(backend.changes_tries_storage.root(&anchor, 3).unwrap(), Some(root3)); - let root4 = read_changes_trie_root(&backend, 4); assert_eq!(backend.changes_tries_storage.root(&anchor, 4).unwrap(), Some(root4)); - let root5 = read_changes_trie_root(&backend, 5); assert_eq!(backend.changes_tries_storage.root(&anchor, 5).unwrap(), Some(root5)); - let root6 = read_changes_trie_root(&backend, 6); assert_eq!(backend.changes_tries_storage.root(&anchor, 6).unwrap(), Some(root6)); - - // now simulate finalization of block#5, causing prune of trie at #1 - let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(Some(config.clone()), &mut tx, block5, 5); - backend.storage.db.write(tx).unwrap(); - assert!(backend.changes_tries_storage.get(&root1, &[]).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root2, &[]).unwrap().is_some()); - - // now simulate finalization of block#6, causing prune of tries at #2 - let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(Some(config.clone()), &mut tx, block6, 6); - backend.storage.db.write(tx).unwrap(); - assert!(backend.changes_tries_storage.get(&root2, &[]).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root3, &[]).unwrap().is_some()); - } - - #[test] - fn tree_route_works() { - let backend = Backend::::new_test(1000, 100); - let block0 = insert_header(&backend, 0, Default::default(), Vec::new(), Default::default()); - - // fork from genesis: 3 prong. - let a1 = insert_header(&backend, 1, block0, Vec::new(), Default::default()); - let a2 = insert_header(&backend, 2, a1, Vec::new(), Default::default()); - let a3 = insert_header(&backend, 3, a2, Vec::new(), Default::default()); - - // fork from genesis: 2 prong. - let b1 = insert_header(&backend, 1, block0, Vec::new(), H256::from([1; 32])); - let b2 = insert_header(&backend, 2, b1, Vec::new(), Default::default()); - - { - let tree_route = ::client::blockchain::tree_route( - backend.blockchain(), - BlockId::Hash(a3), - BlockId::Hash(b2) - ).unwrap(); - - assert_eq!(tree_route.common_block().hash, block0); - assert_eq!(tree_route.retracted().iter().map(|r| r.hash).collect::>(), vec![a3, a2, a1]); - assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![b1, b2]); - } - - { - let tree_route = ::client::blockchain::tree_route( - backend.blockchain(), - BlockId::Hash(a1), - BlockId::Hash(a3), - ).unwrap(); - - assert_eq!(tree_route.common_block().hash, a1); - assert!(tree_route.retracted().is_empty()); - assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![a2, a3]); - } - - { - let tree_route = ::client::blockchain::tree_route( - backend.blockchain(), - BlockId::Hash(a3), - BlockId::Hash(a1), - ).unwrap(); - - assert_eq!(tree_route.common_block().hash, a1); - assert_eq!(tree_route.retracted().iter().map(|r| r.hash).collect::>(), vec![a3, a2]); - assert!(tree_route.enacted().is_empty()); - } - - { - let tree_route = ::client::blockchain::tree_route( - backend.blockchain(), - BlockId::Hash(a2), - BlockId::Hash(a2), - ).unwrap(); - - assert_eq!(tree_route.common_block().hash, a2); - assert!(tree_route.retracted().is_empty()); - assert!(tree_route.enacted().is_empty()); - } - } - - #[test] - fn tree_route_child() { - let backend = Backend::::new_test(1000, 100); - - let block0 = insert_header(&backend, 0, Default::default(), Vec::new(), Default::default()); - let block1 = insert_header(&backend, 1, block0, Vec::new(), Default::default()); - - { - let tree_route = ::client::blockchain::tree_route( - backend.blockchain(), - BlockId::Hash(block0), - BlockId::Hash(block1), - ).unwrap(); - - assert_eq!(tree_route.common_block().hash, block0); - assert!(tree_route.retracted().is_empty()); - assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![block1]); - } - } - - #[test] - fn test_leaves_with_complex_block_tree() { - let backend: Arc> = Arc::new(Backend::new_test(20, 20)); - test_client::trait_tests::test_leaves_for_backend(backend); - } - - #[test] - fn test_children_with_complex_block_tree() { - let backend: Arc> = Arc::new(Backend::new_test(20, 20)); - test_client::trait_tests::test_children_for_backend(backend); - } - - #[test] - fn test_blockchain_query_by_number_gets_canonical() { - let backend: Arc> = Arc::new(Backend::new_test(20, 20)); - test_client::trait_tests::test_blockchain_query_by_number_gets_canonical(backend); - } - - #[test] - fn test_leaves_pruned_on_finality() { - let backend: Backend = Backend::new_test(10, 10); - let block0 = insert_header(&backend, 0, Default::default(), Default::default(), Default::default()); - - let block1_a = insert_header(&backend, 1, block0, Default::default(), Default::default()); - let block1_b = insert_header(&backend, 1, block0, Default::default(), [1; 32].into()); - let block1_c = insert_header(&backend, 1, block0, Default::default(), [2; 32].into()); - - assert_eq!(backend.blockchain().leaves().unwrap(), vec![block1_a, block1_b, block1_c]); - - let block2_a = insert_header(&backend, 2, block1_a, Default::default(), Default::default()); - let block2_b = insert_header(&backend, 2, block1_b, Default::default(), Default::default()); - let block2_c = insert_header(&backend, 2, block1_b, Default::default(), [1; 32].into()); - - assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a, block2_b, block2_c, block1_c]); - - backend.finalize_block(BlockId::hash(block1_a), None).unwrap(); - backend.finalize_block(BlockId::hash(block2_a), None).unwrap(); - - // leaves at same height stay. Leaves at lower heights pruned. - assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a, block2_b, block2_c]); - } - - #[test] - fn test_aux() { - let backend: Backend = Backend::new_test(0, 0); - assert!(backend.get_aux(b"test").unwrap().is_none()); - backend.insert_aux(&[(&b"test"[..], &b"hello"[..])], &[]).unwrap(); - assert_eq!(b"hello", &backend.get_aux(b"test").unwrap().unwrap()[..]); - backend.insert_aux(&[], &[&b"test"[..]]).unwrap(); - assert!(backend.get_aux(b"test").unwrap().is_none()); - } - - #[test] - fn test_finalize_block_with_justification() { - use client::blockchain::{Backend as BlockChainBackend}; - - let backend = Backend::::new_test(10, 10); - - let block0 = insert_header(&backend, 0, Default::default(), Default::default(), Default::default()); - let _ = insert_header(&backend, 1, block0, Default::default(), Default::default()); - - let justification = Some(vec![1, 2, 3]); - backend.finalize_block(BlockId::Number(1), justification.clone()).unwrap(); - - assert_eq!( - backend.blockchain().justification(BlockId::Number(1)).unwrap(), - justification, - ); - } - - #[test] - fn test_finalize_multiple_blocks_in_single_op() { - let backend = Backend::::new_test(10, 10); - - let block0 = insert_header(&backend, 0, Default::default(), Default::default(), Default::default()); - let block1 = insert_header(&backend, 1, block0, Default::default(), Default::default()); - let block2 = insert_header(&backend, 2, block1, Default::default(), Default::default()); - { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(block0)).unwrap(); - op.mark_finalized(BlockId::Hash(block1), None).unwrap(); - op.mark_finalized(BlockId::Hash(block2), None).unwrap(); - backend.commit_operation(op).unwrap(); - } - } - - #[test] - fn test_finalize_non_sequential() { - let backend = Backend::::new_test(10, 10); - - let block0 = insert_header(&backend, 0, Default::default(), Default::default(), Default::default()); - let block1 = insert_header(&backend, 1, block0, Default::default(), Default::default()); - let block2 = insert_header(&backend, 2, block1, Default::default(), Default::default()); - { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(block0)).unwrap(); - op.mark_finalized(BlockId::Hash(block2), None).unwrap(); - backend.commit_operation(op).unwrap_err(); - } - } + use super::*; + use crate::columns; + use client::backend::Backend as BTrait; + use client::backend::BlockImportOperation as Op; + use client::blockchain::Backend as BLBTrait; + use hash_db::HashDB; + use runtime_primitives::testing::{Block as RawBlock, ExtrinsicWrapper, Header}; + use runtime_primitives::traits::{BlakeTwo256, Hash}; + use state_machine::{ChangesTrieRootsStorage, ChangesTrieStorage, TrieDBMut, TrieMut}; + use test_client; + + type Block = RawBlock>; + + fn prepare_changes(changes: Vec<(Vec, Vec)>) -> (H256, MemoryDB) { + let mut changes_root = H256::default(); + let mut changes_trie_update = MemoryDB::::default(); + { + let mut trie = + TrieDBMut::::new(&mut changes_trie_update, &mut changes_root); + for (key, value) in changes { + trie.insert(&key, &value).unwrap(); + } + } + + (changes_root, changes_trie_update) + } + + fn insert_header( + backend: &Backend, + number: u64, + parent_hash: H256, + changes: Vec<(Vec, Vec)>, + extrinsics_root: H256, + ) -> H256 { + use runtime_primitives::generic::DigestItem; + use runtime_primitives::testing::Digest; + + let (changes_root, changes_trie_update) = prepare_changes(changes); + let digest = Digest { + logs: vec![DigestItem::ChangesTrieRoot(changes_root)], + }; + let header = Header { + number, + parent_hash, + state_root: BlakeTwo256::trie_root::<_, &[u8], &[u8]>(Vec::new()), + digest, + extrinsics_root, + }; + let header_hash = header.hash(); + + let block_id = if number == 0 { + BlockId::Hash(Default::default()) + } else { + BlockId::Number(number - 1) + }; + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, block_id).unwrap(); + op.set_block_data(header, None, None, NewBlockState::Best) + .unwrap(); + op.update_changes_trie(changes_trie_update).unwrap(); + backend.commit_operation(op).unwrap(); + + header_hash + } + + #[test] + fn block_hash_inserted_correctly() { + let backing = { + let db = Backend::::new_test(1, 0); + for i in 0..10 { + assert!(db.blockchain().hash(i).unwrap().is_none()); + + { + let id = if i == 0 { + BlockId::Hash(Default::default()) + } else { + BlockId::Number(i - 1) + }; + + let mut op = db.begin_operation().unwrap(); + db.begin_state_operation(&mut op, id).unwrap(); + let header = Header { + number: i, + parent_hash: if i == 0 { + Default::default() + } else { + db.blockchain.hash(i - 1).unwrap().unwrap() + }, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + op.set_block_data(header, Some(vec![]), None, NewBlockState::Best) + .unwrap(); + db.commit_operation(op).unwrap(); + } + + assert!(db.blockchain().hash(i).unwrap().is_some()) + } + db.storage.db.clone() + }; + + let backend = Backend::::from_kvdb(backing, PruningMode::keep_blocks(1), 0).unwrap(); + assert_eq!(backend.blockchain().info().unwrap().best_number, 9); + for i in 0..10 { + assert!(backend.blockchain().hash(i).unwrap().is_some()) + } + } + + #[test] + fn set_state_data() { + let db = Backend::::new_test(2, 0); + let hash = { + let mut op = db.begin_operation().unwrap(); + db.begin_state_operation(&mut op, BlockId::Hash(Default::default())) + .unwrap(); + let mut header = Header { + number: 0, + parent_hash: Default::default(), + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage = vec![ + (vec![1, 3, 5], vec![2, 4, 6]), + (vec![1, 2, 3], vec![9, 9, 9]), + ]; + + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); + let hash = header.hash(); + + op.reset_storage(storage.iter().cloned().collect(), Default::default()) + .unwrap(); + op.set_block_data(header.clone(), Some(vec![]), None, NewBlockState::Best) + .unwrap(); + + db.commit_operation(op).unwrap(); + + let state = db.state_at(BlockId::Number(0)).unwrap(); + + assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6])); + assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); + assert_eq!(state.storage(&[5, 5, 5]).unwrap(), None); + + hash + }; + + { + let mut op = db.begin_operation().unwrap(); + db.begin_state_operation(&mut op, BlockId::Number(0)) + .unwrap(); + let mut header = Header { + number: 1, + parent_hash: hash, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage = vec![(vec![1, 3, 5], None), (vec![5, 5, 5], Some(vec![4, 5, 6]))]; + + let (root, overlay) = op.old_state.storage_root(storage.iter().cloned()); + op.update_db_storage(overlay).unwrap(); + header.state_root = root.into(); + + op.set_block_data(header, Some(vec![]), None, NewBlockState::Best) + .unwrap(); + + db.commit_operation(op).unwrap(); + + let state = db.state_at(BlockId::Number(1)).unwrap(); + + assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None); + assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); + assert_eq!(state.storage(&[5, 5, 5]).unwrap(), Some(vec![4, 5, 6])); + } + } + + #[test] + fn delete_only_when_negative_rc() { + let _ = ::env_logger::try_init(); + let key; + let backend = Backend::::new_test(1, 0); + + let hash = { + let mut op = backend.begin_operation().unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Hash(Default::default())) + .unwrap(); + let mut header = Header { + number: 0, + parent_hash: Default::default(), + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage: Vec<(_, _)> = vec![]; + + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); + let hash = header.hash(); + + op.reset_storage(storage.iter().cloned().collect(), Default::default()) + .unwrap(); + + key = op.db_updates.insert(&[], b"hello"); + op.set_block_data(header, Some(vec![]), None, NewBlockState::Best) + .unwrap(); + + backend.commit_operation(op).unwrap(); + + assert_eq!( + backend + .storage + .db + .get(columns::STATE, key.as_bytes()) + .unwrap() + .unwrap(), + &b"hello"[..] + ); + hash + }; + + let hash = { + let mut op = backend.begin_operation().unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Number(0)) + .unwrap(); + let mut header = Header { + number: 1, + parent_hash: hash, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage: Vec<(_, _)> = vec![]; + + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); + let hash = header.hash(); + + op.db_updates.insert(&[], b"hello"); + op.db_updates.remove(&key, &[]); + op.set_block_data(header, Some(vec![]), None, NewBlockState::Best) + .unwrap(); + + backend.commit_operation(op).unwrap(); + + assert_eq!( + backend + .storage + .db + .get(columns::STATE, key.as_bytes()) + .unwrap() + .unwrap(), + &b"hello"[..] + ); + hash + }; + + let hash = { + let mut op = backend.begin_operation().unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Number(1)) + .unwrap(); + let mut header = Header { + number: 2, + parent_hash: hash, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage: Vec<(_, _)> = vec![]; + + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); + let hash = header.hash(); + + op.db_updates.remove(&key, &[]); + op.set_block_data(header, Some(vec![]), None, NewBlockState::Best) + .unwrap(); + + backend.commit_operation(op).unwrap(); + + assert!(backend + .storage + .db + .get(columns::STATE, key.as_bytes()) + .unwrap() + .is_some()); + hash + }; + + { + let mut op = backend.begin_operation().unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Number(2)) + .unwrap(); + let mut header = Header { + number: 3, + parent_hash: hash, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage: Vec<(_, _)> = vec![]; + + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); + + op.set_block_data(header, Some(vec![]), None, NewBlockState::Best) + .unwrap(); + + backend.commit_operation(op).unwrap(); + + assert!(backend + .storage + .db + .get(columns::STATE, key.as_bytes()) + .unwrap() + .is_none()); + } + + backend.finalize_block(BlockId::Number(1), None).unwrap(); + backend.finalize_block(BlockId::Number(2), None).unwrap(); + backend.finalize_block(BlockId::Number(3), None).unwrap(); + assert!(backend + .storage + .db + .get(columns::STATE, key.as_bytes()) + .unwrap() + .is_none()); + } + + #[test] + fn changes_trie_storage_works() { + let backend = Backend::::new_test(1000, 100); + backend.changes_tries_storage.meta.write().finalized_number = 1000; + + let check_changes = + |backend: &Backend, block: u64, changes: Vec<(Vec, Vec)>| { + let (changes_root, mut changes_trie_update) = prepare_changes(changes); + let anchor = state_machine::ChangesTrieAnchorBlockId { + hash: backend + .blockchain() + .header(BlockId::Number(block)) + .unwrap() + .unwrap() + .hash(), + number: block, + }; + assert_eq!( + backend.changes_tries_storage.root(&anchor, block), + Ok(Some(changes_root)) + ); + + for (key, (val, _)) in changes_trie_update.drain() { + assert_eq!( + backend.changes_trie_storage().unwrap().get(&key, &[]), + Ok(Some(val)) + ); + } + }; + + let changes0 = vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())]; + let changes1 = vec![ + (b"key_at_1".to_vec(), b"val_at_1".to_vec()), + (b"another_key_at_1".to_vec(), b"another_val_at_1".to_vec()), + ]; + let changes2 = vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())]; + + let block0 = insert_header( + &backend, + 0, + Default::default(), + changes0.clone(), + Default::default(), + ); + let block1 = insert_header(&backend, 1, block0, changes1.clone(), Default::default()); + let _ = insert_header(&backend, 2, block1, changes2.clone(), Default::default()); + + // check that the storage contains tries for all blocks + check_changes(&backend, 0, changes0); + check_changes(&backend, 1, changes1); + check_changes(&backend, 2, changes2); + } + + #[test] + fn changes_trie_storage_works_with_forks() { + let backend = Backend::::new_test(1000, 100); + + let changes0 = vec![(b"k0".to_vec(), b"v0".to_vec())]; + let changes1 = vec![(b"k1".to_vec(), b"v1".to_vec())]; + let changes2 = vec![(b"k2".to_vec(), b"v2".to_vec())]; + let block0 = insert_header( + &backend, + 0, + Default::default(), + changes0.clone(), + Default::default(), + ); + let block1 = insert_header(&backend, 1, block0, changes1.clone(), Default::default()); + let block2 = insert_header(&backend, 2, block1, changes2.clone(), Default::default()); + + let changes2_1_0 = vec![(b"k3".to_vec(), b"v3".to_vec())]; + let changes2_1_1 = vec![(b"k4".to_vec(), b"v4".to_vec())]; + let block2_1_0 = insert_header( + &backend, + 3, + block2, + changes2_1_0.clone(), + Default::default(), + ); + let block2_1_1 = insert_header( + &backend, + 4, + block2_1_0, + changes2_1_1.clone(), + Default::default(), + ); + + let changes2_2_0 = vec![(b"k5".to_vec(), b"v5".to_vec())]; + let changes2_2_1 = vec![(b"k6".to_vec(), b"v6".to_vec())]; + let block2_2_0 = insert_header( + &backend, + 3, + block2, + changes2_2_0.clone(), + Default::default(), + ); + let block2_2_1 = insert_header( + &backend, + 4, + block2_2_0, + changes2_2_1.clone(), + Default::default(), + ); + + // finalize block1 + backend.changes_tries_storage.meta.write().finalized_number = 1; + + // branch1: when asking for finalized block hash + let (changes1_root, _) = prepare_changes(changes1); + let anchor = state_machine::ChangesTrieAnchorBlockId { + hash: block2_1_1, + number: 4, + }; + assert_eq!( + backend.changes_tries_storage.root(&anchor, 1), + Ok(Some(changes1_root)) + ); + + // branch2: when asking for finalized block hash + let anchor = state_machine::ChangesTrieAnchorBlockId { + hash: block2_2_1, + number: 4, + }; + assert_eq!( + backend.changes_tries_storage.root(&anchor, 1), + Ok(Some(changes1_root)) + ); + + // branch1: when asking for non-finalized block hash (search by traversal) + let (changes2_1_0_root, _) = prepare_changes(changes2_1_0); + let anchor = state_machine::ChangesTrieAnchorBlockId { + hash: block2_1_1, + number: 4, + }; + assert_eq!( + backend.changes_tries_storage.root(&anchor, 3), + Ok(Some(changes2_1_0_root)) + ); + + // branch2: when asking for non-finalized block hash (search using canonicalized hint) + let (changes2_2_0_root, _) = prepare_changes(changes2_2_0); + let anchor = state_machine::ChangesTrieAnchorBlockId { + hash: block2_2_1, + number: 4, + }; + assert_eq!( + backend.changes_tries_storage.root(&anchor, 3), + Ok(Some(changes2_2_0_root)) + ); + + // finalize first block of branch2 (block2_2_0) + backend.changes_tries_storage.meta.write().finalized_number = 3; + + // branch2: when asking for finalized block of this branch + assert_eq!( + backend.changes_tries_storage.root(&anchor, 3), + Ok(Some(changes2_2_0_root)) + ); + + // branch1: when asking for finalized block of other branch + // => result is incorrect (returned for the block of branch1), but this is expected, + // because the other fork is abandoned (forked before finalized header) + let anchor = state_machine::ChangesTrieAnchorBlockId { + hash: block2_1_1, + number: 4, + }; + assert_eq!( + backend.changes_tries_storage.root(&anchor, 3), + Ok(Some(changes2_2_0_root)) + ); + } + + #[test] + fn changes_tries_with_digest_are_pruned_on_finalization() { + let mut backend = Backend::::new_test(1000, 100); + backend.changes_tries_storage.min_blocks_to_keep = Some(8); + let config = ChangesTrieConfiguration { + digest_interval: 2, + digest_levels: 2, + }; + + // insert some blocks + let block0 = insert_header( + &backend, + 0, + Default::default(), + vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())], + Default::default(), + ); + let block1 = insert_header( + &backend, + 1, + block0, + vec![(b"key_at_1".to_vec(), b"val_at_1".to_vec())], + Default::default(), + ); + let block2 = insert_header( + &backend, + 2, + block1, + vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())], + Default::default(), + ); + let block3 = insert_header( + &backend, + 3, + block2, + vec![(b"key_at_3".to_vec(), b"val_at_3".to_vec())], + Default::default(), + ); + let block4 = insert_header( + &backend, + 4, + block3, + vec![(b"key_at_4".to_vec(), b"val_at_4".to_vec())], + Default::default(), + ); + let block5 = insert_header( + &backend, + 5, + block4, + vec![(b"key_at_5".to_vec(), b"val_at_5".to_vec())], + Default::default(), + ); + let block6 = insert_header( + &backend, + 6, + block5, + vec![(b"key_at_6".to_vec(), b"val_at_6".to_vec())], + Default::default(), + ); + let block7 = insert_header( + &backend, + 7, + block6, + vec![(b"key_at_7".to_vec(), b"val_at_7".to_vec())], + Default::default(), + ); + let block8 = insert_header( + &backend, + 8, + block7, + vec![(b"key_at_8".to_vec(), b"val_at_8".to_vec())], + Default::default(), + ); + let block9 = insert_header( + &backend, + 9, + block8, + vec![(b"key_at_9".to_vec(), b"val_at_9".to_vec())], + Default::default(), + ); + let block10 = insert_header( + &backend, + 10, + block9, + vec![(b"key_at_10".to_vec(), b"val_at_10".to_vec())], + Default::default(), + ); + let block11 = insert_header( + &backend, + 11, + block10, + vec![(b"key_at_11".to_vec(), b"val_at_11".to_vec())], + Default::default(), + ); + let block12 = insert_header( + &backend, + 12, + block11, + vec![(b"key_at_12".to_vec(), b"val_at_12".to_vec())], + Default::default(), + ); + let block13 = insert_header( + &backend, + 13, + block12, + vec![(b"key_at_13".to_vec(), b"val_at_13".to_vec())], + Default::default(), + ); + backend.changes_tries_storage.meta.write().finalized_number = 13; + + // check that roots of all tries are in the columns::CHANGES_TRIE + let anchor = state_machine::ChangesTrieAnchorBlockId { + hash: block13, + number: 13, + }; + fn read_changes_trie_root(backend: &Backend, num: u64) -> H256 { + backend + .blockchain() + .header(BlockId::Number(num)) + .unwrap() + .unwrap() + .digest() + .logs() + .iter() + .find(|i| i.as_changes_trie_root().is_some()) + .unwrap() + .as_changes_trie_root() + .unwrap() + .clone() + } + let root1 = read_changes_trie_root(&backend, 1); + assert_eq!( + backend.changes_tries_storage.root(&anchor, 1).unwrap(), + Some(root1) + ); + let root2 = read_changes_trie_root(&backend, 2); + assert_eq!( + backend.changes_tries_storage.root(&anchor, 2).unwrap(), + Some(root2) + ); + let root3 = read_changes_trie_root(&backend, 3); + assert_eq!( + backend.changes_tries_storage.root(&anchor, 3).unwrap(), + Some(root3) + ); + let root4 = read_changes_trie_root(&backend, 4); + assert_eq!( + backend.changes_tries_storage.root(&anchor, 4).unwrap(), + Some(root4) + ); + let root5 = read_changes_trie_root(&backend, 5); + assert_eq!( + backend.changes_tries_storage.root(&anchor, 5).unwrap(), + Some(root5) + ); + let root6 = read_changes_trie_root(&backend, 6); + assert_eq!( + backend.changes_tries_storage.root(&anchor, 6).unwrap(), + Some(root6) + ); + let root7 = read_changes_trie_root(&backend, 7); + assert_eq!( + backend.changes_tries_storage.root(&anchor, 7).unwrap(), + Some(root7) + ); + let root8 = read_changes_trie_root(&backend, 8); + assert_eq!( + backend.changes_tries_storage.root(&anchor, 8).unwrap(), + Some(root8) + ); + let root9 = read_changes_trie_root(&backend, 9); + assert_eq!( + backend.changes_tries_storage.root(&anchor, 9).unwrap(), + Some(root9) + ); + let root10 = read_changes_trie_root(&backend, 10); + assert_eq!( + backend.changes_tries_storage.root(&anchor, 10).unwrap(), + Some(root10) + ); + let root11 = read_changes_trie_root(&backend, 11); + assert_eq!( + backend.changes_tries_storage.root(&anchor, 11).unwrap(), + Some(root11) + ); + let root12 = read_changes_trie_root(&backend, 12); + assert_eq!( + backend.changes_tries_storage.root(&anchor, 12).unwrap(), + Some(root12) + ); + + // now simulate finalization of block#12, causing prune of tries at #1..#4 + let mut tx = DBTransaction::new(); + backend + .changes_tries_storage + .prune(Some(config.clone()), &mut tx, Default::default(), 12); + backend.storage.db.write(tx).unwrap(); + assert!(backend + .changes_tries_storage + .get(&root1, &[]) + .unwrap() + .is_none()); + assert!(backend + .changes_tries_storage + .get(&root2, &[]) + .unwrap() + .is_none()); + assert!(backend + .changes_tries_storage + .get(&root3, &[]) + .unwrap() + .is_none()); + assert!(backend + .changes_tries_storage + .get(&root4, &[]) + .unwrap() + .is_none()); + assert!(backend + .changes_tries_storage + .get(&root5, &[]) + .unwrap() + .is_some()); + assert!(backend + .changes_tries_storage + .get(&root6, &[]) + .unwrap() + .is_some()); + assert!(backend + .changes_tries_storage + .get(&root7, &[]) + .unwrap() + .is_some()); + assert!(backend + .changes_tries_storage + .get(&root8, &[]) + .unwrap() + .is_some()); + + // now simulate finalization of block#16, causing prune of tries at #5..#8 + let mut tx = DBTransaction::new(); + backend + .changes_tries_storage + .prune(Some(config.clone()), &mut tx, Default::default(), 16); + backend.storage.db.write(tx).unwrap(); + assert!(backend + .changes_tries_storage + .get(&root5, &[]) + .unwrap() + .is_none()); + assert!(backend + .changes_tries_storage + .get(&root6, &[]) + .unwrap() + .is_none()); + assert!(backend + .changes_tries_storage + .get(&root7, &[]) + .unwrap() + .is_none()); + assert!(backend + .changes_tries_storage + .get(&root8, &[]) + .unwrap() + .is_none()); + + // now "change" pruning mode to archive && simulate finalization of block#20 + // => no changes tries are pruned, because we never prune in archive mode + backend.changes_tries_storage.min_blocks_to_keep = None; + let mut tx = DBTransaction::new(); + backend + .changes_tries_storage + .prune(Some(config), &mut tx, Default::default(), 20); + backend.storage.db.write(tx).unwrap(); + assert!(backend + .changes_tries_storage + .get(&root9, &[]) + .unwrap() + .is_some()); + assert!(backend + .changes_tries_storage + .get(&root10, &[]) + .unwrap() + .is_some()); + assert!(backend + .changes_tries_storage + .get(&root11, &[]) + .unwrap() + .is_some()); + assert!(backend + .changes_tries_storage + .get(&root12, &[]) + .unwrap() + .is_some()); + } + + #[test] + fn changes_tries_without_digest_are_pruned_on_finalization() { + let mut backend = Backend::::new_test(1000, 100); + backend.changes_tries_storage.min_blocks_to_keep = Some(4); + let config = ChangesTrieConfiguration { + digest_interval: 0, + digest_levels: 0, + }; + + // insert some blocks + let block0 = insert_header( + &backend, + 0, + Default::default(), + vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())], + Default::default(), + ); + let block1 = insert_header( + &backend, + 1, + block0, + vec![(b"key_at_1".to_vec(), b"val_at_1".to_vec())], + Default::default(), + ); + let block2 = insert_header( + &backend, + 2, + block1, + vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())], + Default::default(), + ); + let block3 = insert_header( + &backend, + 3, + block2, + vec![(b"key_at_3".to_vec(), b"val_at_3".to_vec())], + Default::default(), + ); + let block4 = insert_header( + &backend, + 4, + block3, + vec![(b"key_at_4".to_vec(), b"val_at_4".to_vec())], + Default::default(), + ); + let block5 = insert_header( + &backend, + 5, + block4, + vec![(b"key_at_5".to_vec(), b"val_at_5".to_vec())], + Default::default(), + ); + let block6 = insert_header( + &backend, + 6, + block5, + vec![(b"key_at_6".to_vec(), b"val_at_6".to_vec())], + Default::default(), + ); + + // check that roots of all tries are in the columns::CHANGES_TRIE + let anchor = state_machine::ChangesTrieAnchorBlockId { + hash: block6, + number: 6, + }; + fn read_changes_trie_root(backend: &Backend, num: u64) -> H256 { + backend + .blockchain() + .header(BlockId::Number(num)) + .unwrap() + .unwrap() + .digest() + .logs() + .iter() + .find(|i| i.as_changes_trie_root().is_some()) + .unwrap() + .as_changes_trie_root() + .unwrap() + .clone() + } + + let root1 = read_changes_trie_root(&backend, 1); + assert_eq!( + backend.changes_tries_storage.root(&anchor, 1).unwrap(), + Some(root1) + ); + let root2 = read_changes_trie_root(&backend, 2); + assert_eq!( + backend.changes_tries_storage.root(&anchor, 2).unwrap(), + Some(root2) + ); + let root3 = read_changes_trie_root(&backend, 3); + assert_eq!( + backend.changes_tries_storage.root(&anchor, 3).unwrap(), + Some(root3) + ); + let root4 = read_changes_trie_root(&backend, 4); + assert_eq!( + backend.changes_tries_storage.root(&anchor, 4).unwrap(), + Some(root4) + ); + let root5 = read_changes_trie_root(&backend, 5); + assert_eq!( + backend.changes_tries_storage.root(&anchor, 5).unwrap(), + Some(root5) + ); + let root6 = read_changes_trie_root(&backend, 6); + assert_eq!( + backend.changes_tries_storage.root(&anchor, 6).unwrap(), + Some(root6) + ); + + // now simulate finalization of block#5, causing prune of trie at #1 + let mut tx = DBTransaction::new(); + backend + .changes_tries_storage + .prune(Some(config.clone()), &mut tx, block5, 5); + backend.storage.db.write(tx).unwrap(); + assert!(backend + .changes_tries_storage + .get(&root1, &[]) + .unwrap() + .is_none()); + assert!(backend + .changes_tries_storage + .get(&root2, &[]) + .unwrap() + .is_some()); + + // now simulate finalization of block#6, causing prune of tries at #2 + let mut tx = DBTransaction::new(); + backend + .changes_tries_storage + .prune(Some(config.clone()), &mut tx, block6, 6); + backend.storage.db.write(tx).unwrap(); + assert!(backend + .changes_tries_storage + .get(&root2, &[]) + .unwrap() + .is_none()); + assert!(backend + .changes_tries_storage + .get(&root3, &[]) + .unwrap() + .is_some()); + } + + #[test] + fn tree_route_works() { + let backend = Backend::::new_test(1000, 100); + let block0 = insert_header( + &backend, + 0, + Default::default(), + Vec::new(), + Default::default(), + ); + + // fork from genesis: 3 prong. + let a1 = insert_header(&backend, 1, block0, Vec::new(), Default::default()); + let a2 = insert_header(&backend, 2, a1, Vec::new(), Default::default()); + let a3 = insert_header(&backend, 3, a2, Vec::new(), Default::default()); + + // fork from genesis: 2 prong. + let b1 = insert_header(&backend, 1, block0, Vec::new(), H256::from([1; 32])); + let b2 = insert_header(&backend, 2, b1, Vec::new(), Default::default()); + + { + let tree_route = ::client::blockchain::tree_route( + backend.blockchain(), + BlockId::Hash(a3), + BlockId::Hash(b2), + ) + .unwrap(); + + assert_eq!(tree_route.common_block().hash, block0); + assert_eq!( + tree_route + .retracted() + .iter() + .map(|r| r.hash) + .collect::>(), + vec![a3, a2, a1] + ); + assert_eq!( + tree_route + .enacted() + .iter() + .map(|r| r.hash) + .collect::>(), + vec![b1, b2] + ); + } + + { + let tree_route = ::client::blockchain::tree_route( + backend.blockchain(), + BlockId::Hash(a1), + BlockId::Hash(a3), + ) + .unwrap(); + + assert_eq!(tree_route.common_block().hash, a1); + assert!(tree_route.retracted().is_empty()); + assert_eq!( + tree_route + .enacted() + .iter() + .map(|r| r.hash) + .collect::>(), + vec![a2, a3] + ); + } + + { + let tree_route = ::client::blockchain::tree_route( + backend.blockchain(), + BlockId::Hash(a3), + BlockId::Hash(a1), + ) + .unwrap(); + + assert_eq!(tree_route.common_block().hash, a1); + assert_eq!( + tree_route + .retracted() + .iter() + .map(|r| r.hash) + .collect::>(), + vec![a3, a2] + ); + assert!(tree_route.enacted().is_empty()); + } + + { + let tree_route = ::client::blockchain::tree_route( + backend.blockchain(), + BlockId::Hash(a2), + BlockId::Hash(a2), + ) + .unwrap(); + + assert_eq!(tree_route.common_block().hash, a2); + assert!(tree_route.retracted().is_empty()); + assert!(tree_route.enacted().is_empty()); + } + } + + #[test] + fn tree_route_child() { + let backend = Backend::::new_test(1000, 100); + + let block0 = insert_header( + &backend, + 0, + Default::default(), + Vec::new(), + Default::default(), + ); + let block1 = insert_header(&backend, 1, block0, Vec::new(), Default::default()); + + { + let tree_route = ::client::blockchain::tree_route( + backend.blockchain(), + BlockId::Hash(block0), + BlockId::Hash(block1), + ) + .unwrap(); + + assert_eq!(tree_route.common_block().hash, block0); + assert!(tree_route.retracted().is_empty()); + assert_eq!( + tree_route + .enacted() + .iter() + .map(|r| r.hash) + .collect::>(), + vec![block1] + ); + } + } + + #[test] + fn test_leaves_with_complex_block_tree() { + let backend: Arc> = + Arc::new(Backend::new_test(20, 20)); + test_client::trait_tests::test_leaves_for_backend(backend); + } + + #[test] + fn test_children_with_complex_block_tree() { + let backend: Arc> = + Arc::new(Backend::new_test(20, 20)); + test_client::trait_tests::test_children_for_backend(backend); + } + + #[test] + fn test_blockchain_query_by_number_gets_canonical() { + let backend: Arc> = + Arc::new(Backend::new_test(20, 20)); + test_client::trait_tests::test_blockchain_query_by_number_gets_canonical(backend); + } + + #[test] + fn test_leaves_pruned_on_finality() { + let backend: Backend = Backend::new_test(10, 10); + let block0 = insert_header( + &backend, + 0, + Default::default(), + Default::default(), + Default::default(), + ); + + let block1_a = insert_header(&backend, 1, block0, Default::default(), Default::default()); + let block1_b = insert_header(&backend, 1, block0, Default::default(), [1; 32].into()); + let block1_c = insert_header(&backend, 1, block0, Default::default(), [2; 32].into()); + + assert_eq!( + backend.blockchain().leaves().unwrap(), + vec![block1_a, block1_b, block1_c] + ); + + let block2_a = insert_header( + &backend, + 2, + block1_a, + Default::default(), + Default::default(), + ); + let block2_b = insert_header( + &backend, + 2, + block1_b, + Default::default(), + Default::default(), + ); + let block2_c = insert_header(&backend, 2, block1_b, Default::default(), [1; 32].into()); + + assert_eq!( + backend.blockchain().leaves().unwrap(), + vec![block2_a, block2_b, block2_c, block1_c] + ); + + backend + .finalize_block(BlockId::hash(block1_a), None) + .unwrap(); + backend + .finalize_block(BlockId::hash(block2_a), None) + .unwrap(); + + // leaves at same height stay. Leaves at lower heights pruned. + assert_eq!( + backend.blockchain().leaves().unwrap(), + vec![block2_a, block2_b, block2_c] + ); + } + + #[test] + fn test_aux() { + let backend: Backend = Backend::new_test(0, 0); + assert!(backend.get_aux(b"test").unwrap().is_none()); + backend + .insert_aux(&[(&b"test"[..], &b"hello"[..])], &[]) + .unwrap(); + assert_eq!(b"hello", &backend.get_aux(b"test").unwrap().unwrap()[..]); + backend.insert_aux(&[], &[&b"test"[..]]).unwrap(); + assert!(backend.get_aux(b"test").unwrap().is_none()); + } + + #[test] + fn test_finalize_block_with_justification() { + use client::blockchain::Backend as BlockChainBackend; + + let backend = Backend::::new_test(10, 10); + + let block0 = insert_header( + &backend, + 0, + Default::default(), + Default::default(), + Default::default(), + ); + let _ = insert_header(&backend, 1, block0, Default::default(), Default::default()); + + let justification = Some(vec![1, 2, 3]); + backend + .finalize_block(BlockId::Number(1), justification.clone()) + .unwrap(); + + assert_eq!( + backend + .blockchain() + .justification(BlockId::Number(1)) + .unwrap(), + justification, + ); + } + + #[test] + fn test_finalize_multiple_blocks_in_single_op() { + let backend = Backend::::new_test(10, 10); + + let block0 = insert_header( + &backend, + 0, + Default::default(), + Default::default(), + Default::default(), + ); + let block1 = insert_header(&backend, 1, block0, Default::default(), Default::default()); + let block2 = insert_header(&backend, 2, block1, Default::default(), Default::default()); + { + let mut op = backend.begin_operation().unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Hash(block0)) + .unwrap(); + op.mark_finalized(BlockId::Hash(block1), None).unwrap(); + op.mark_finalized(BlockId::Hash(block2), None).unwrap(); + backend.commit_operation(op).unwrap(); + } + } + + #[test] + fn test_finalize_non_sequential() { + let backend = Backend::::new_test(10, 10); + + let block0 = insert_header( + &backend, + 0, + Default::default(), + Default::default(), + Default::default(), + ); + let block1 = insert_header(&backend, 1, block0, Default::default(), Default::default()); + let block2 = insert_header(&backend, 2, block1, Default::default(), Default::default()); + { + let mut op = backend.begin_operation().unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Hash(block0)) + .unwrap(); + op.mark_finalized(BlockId::Hash(block2), None).unwrap(); + backend.commit_operation(op).unwrap_err(); + } + } } diff --git a/core/client/db/src/light.rs b/core/client/db/src/light.rs index 62b6486f54..20d969e8b4 100644 --- a/core/client/db/src/light.rs +++ b/core/client/db/src/light.rs @@ -16,37 +16,41 @@ //! RocksDB-based light client blockchain storage. -use std::{sync::Arc, collections::HashMap}; use parking_lot::RwLock; +use std::{collections::HashMap, sync::Arc}; -use kvdb::{KeyValueDB, DBTransaction}; +use kvdb::{DBTransaction, KeyValueDB}; +use crate::cache::{ComplexBlockId, DbCache, DbCacheSync}; +use crate::utils::{ + self, block_id_to_lookup_key, db_err, meta_keys, open_database, read_db, read_meta, Meta, +}; +use crate::DatabaseSettings; use client::backend::{AuxStore, NewBlockState}; -use client::blockchain::{BlockStatus, Cache as BlockchainCache, - HeaderBackend as BlockchainHeaderBackend, Info as BlockchainInfo}; +use client::blockchain::{ + BlockStatus, Cache as BlockchainCache, HeaderBackend as BlockchainHeaderBackend, + Info as BlockchainInfo, +}; use client::cht; -use client::leaves::{LeafSet, FinalizationDisplaced}; use client::error::{ErrorKind as ClientErrorKind, Result as ClientResult}; +use client::leaves::{FinalizationDisplaced, LeafSet}; use client::light::blockchain::Storage as LightBlockchainStorage; +use consensus_common::well_known_cache_keys; +use log::{debug, trace, warn}; use parity_codec::{Decode, Encode}; use primitives::Blake2Hasher; use runtime_primitives::generic::BlockId; -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, - Zero, One, As, NumberFor, Digest, DigestItem}; -use consensus_common::well_known_cache_keys; -use crate::cache::{DbCacheSync, DbCache, ComplexBlockId}; -use crate::utils::{self, meta_keys, Meta, db_err, open_database, - read_db, block_id_to_lookup_key, read_meta}; -use crate::DatabaseSettings; -use log::{trace, warn, debug}; +use runtime_primitives::traits::{ + As, Block as BlockT, Digest, DigestItem, Header as HeaderT, NumberFor, One, Zero, +}; pub(crate) mod columns { - pub const META: Option = crate::utils::COLUMN_META; - pub const KEY_LOOKUP: Option = Some(1); - pub const HEADER: Option = Some(2); - pub const CACHE: Option = Some(3); - pub const CHT: Option = Some(4); - pub const AUX: Option = Some(5); + pub const META: Option = crate::utils::COLUMN_META; + pub const KEY_LOOKUP: Option = Some(1); + pub const HEADER: Option = Some(2); + pub const CACHE: Option = Some(3); + pub const CHT: Option = Some(4); + pub const AUX: Option = Some(5); } /// Prefix for headers CHT. @@ -57,987 +61,1294 @@ const CHANGES_TRIE_CHT_PREFIX: u8 = 1; /// Light blockchain storage. Stores most recent headers + CHTs for older headers. /// Locks order: meta, leaves, cache. pub struct LightStorage { - db: Arc, - meta: RwLock, Block::Hash>>, - leaves: RwLock>>, - cache: Arc>, + db: Arc, + meta: RwLock, Block::Hash>>, + leaves: RwLock>>, + cache: Arc>, } impl LightStorage - where - Block: BlockT, +where + Block: BlockT, { - /// Create new storage with given settings. - pub fn new(config: DatabaseSettings) -> ClientResult { - let db = open_database(&config, columns::META, "light")?; - - Self::from_kvdb(db as Arc<_>) - } - - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test() -> Self { - use utils::NUM_COLUMNS; - - let db = Arc::new(::kvdb_memorydb::create(NUM_COLUMNS)); - - Self::from_kvdb(db as Arc<_>).expect("failed to create test-db") - } - - fn from_kvdb(db: Arc) -> ClientResult { - let meta = read_meta::(&*db, columns::META, columns::HEADER)?; - let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?; - let cache = DbCache::new( - db.clone(), - columns::KEY_LOOKUP, - columns::HEADER, - columns::CACHE, - ComplexBlockId::new(meta.finalized_hash, meta.finalized_number), - ); - - Ok(LightStorage { - db, - meta: RwLock::new(meta), - cache: Arc::new(DbCacheSync(RwLock::new(cache))), - leaves: RwLock::new(leaves), - }) - } - - #[cfg(test)] - pub(crate) fn cache(&self) -> &DbCacheSync { - &self.cache - } - - fn update_meta( - &self, - hash: Block::Hash, - number: NumberFor, - is_best: bool, - is_finalized: bool, - ) { - let mut meta = self.meta.write(); - - if number.is_zero() { - meta.genesis_hash = hash; - meta.finalized_hash = hash; - } - - if is_best { - meta.best_number = number; - meta.best_hash = hash; - } - - if is_finalized { - meta.finalized_number = number; - meta.finalized_hash = hash; - } - } + /// Create new storage with given settings. + pub fn new(config: DatabaseSettings) -> ClientResult { + let db = open_database(&config, columns::META, "light")?; + + Self::from_kvdb(db as Arc<_>) + } + + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_test() -> Self { + use utils::NUM_COLUMNS; + + let db = Arc::new(::kvdb_memorydb::create(NUM_COLUMNS)); + + Self::from_kvdb(db as Arc<_>).expect("failed to create test-db") + } + + fn from_kvdb(db: Arc) -> ClientResult { + let meta = read_meta::(&*db, columns::META, columns::HEADER)?; + let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?; + let cache = DbCache::new( + db.clone(), + columns::KEY_LOOKUP, + columns::HEADER, + columns::CACHE, + ComplexBlockId::new(meta.finalized_hash, meta.finalized_number), + ); + + Ok(LightStorage { + db, + meta: RwLock::new(meta), + cache: Arc::new(DbCacheSync(RwLock::new(cache))), + leaves: RwLock::new(leaves), + }) + } + + #[cfg(test)] + pub(crate) fn cache(&self) -> &DbCacheSync { + &self.cache + } + + fn update_meta( + &self, + hash: Block::Hash, + number: NumberFor, + is_best: bool, + is_finalized: bool, + ) { + let mut meta = self.meta.write(); + + if number.is_zero() { + meta.genesis_hash = hash; + meta.finalized_hash = hash; + } + + if is_best { + meta.best_number = number; + meta.best_hash = hash; + } + + if is_finalized { + meta.finalized_number = number; + meta.finalized_hash = hash; + } + } } impl BlockchainHeaderBackend for LightStorage - where - Block: BlockT, +where + Block: BlockT, { - fn header(&self, id: BlockId) -> ClientResult> { - utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) - } - - fn info(&self) -> ClientResult> { - let meta = self.meta.read(); - Ok(BlockchainInfo { - best_hash: meta.best_hash, - best_number: meta.best_number, - genesis_hash: meta.genesis_hash, - finalized_hash: meta.finalized_hash, - finalized_number: meta.finalized_number, - }) - } - - fn status(&self, id: BlockId) -> ClientResult { - let exists = match id { - BlockId::Hash(_) => read_db( - &*self.db, - columns::KEY_LOOKUP, - columns::HEADER, - id - )?.is_some(), - BlockId::Number(n) => n <= self.meta.read().best_number, - }; - match exists { - true => Ok(BlockStatus::InChain), - false => Ok(BlockStatus::Unknown), - } - } - - fn number(&self, hash: Block::Hash) -> ClientResult>> { - if let Some(lookup_key) = block_id_to_lookup_key::(&*self.db, columns::KEY_LOOKUP, BlockId::Hash(hash))? { - let number = utils::lookup_key_to_number(&lookup_key)?; - Ok(Some(number)) - } else { - Ok(None) - } - } - - fn hash(&self, number: NumberFor) -> ClientResult> { - Ok(self.header(BlockId::Number(number))?.map(|header| header.hash().clone())) - } + fn header(&self, id: BlockId) -> ClientResult> { + utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) + } + + fn info(&self) -> ClientResult> { + let meta = self.meta.read(); + Ok(BlockchainInfo { + best_hash: meta.best_hash, + best_number: meta.best_number, + genesis_hash: meta.genesis_hash, + finalized_hash: meta.finalized_hash, + finalized_number: meta.finalized_number, + }) + } + + fn status(&self, id: BlockId) -> ClientResult { + let exists = match id { + BlockId::Hash(_) => { + read_db(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id)?.is_some() + } + BlockId::Number(n) => n <= self.meta.read().best_number, + }; + match exists { + true => Ok(BlockStatus::InChain), + false => Ok(BlockStatus::Unknown), + } + } + + fn number(&self, hash: Block::Hash) -> ClientResult>> { + if let Some(lookup_key) = + block_id_to_lookup_key::(&*self.db, columns::KEY_LOOKUP, BlockId::Hash(hash))? + { + let number = utils::lookup_key_to_number(&lookup_key)?; + Ok(Some(number)) + } else { + Ok(None) + } + } + + fn hash(&self, number: NumberFor) -> ClientResult> { + Ok(self + .header(BlockId::Number(number))? + .map(|header| header.hash().clone())) + } } impl LightStorage { - // Get block changes trie root, if available. - fn changes_trie_root(&self, block: BlockId) -> ClientResult> { - self.header(block) - .map(|header| header.and_then(|header| - header.digest().log(DigestItem::as_changes_trie_root) - .cloned())) - } - - /// Handle setting head within a transaction. `route_to` should be the last - /// block that existed in the database. `best_to` should be the best block - /// to be set. - /// - /// In the case where the new best block is a block to be imported, `route_to` - /// should be the parent of `best_to`. In the case where we set an existing block - /// to be best, `route_to` should equal to `best_to`. - fn set_head_with_transaction(&self, transaction: &mut DBTransaction, route_to: Block::Hash, best_to: (NumberFor, Block::Hash)) -> Result<(), client::error::Error> { - let lookup_key = utils::number_and_hash_to_lookup_key(best_to.0, &best_to.1); - - // handle reorg. - let meta = self.meta.read(); - if meta.best_hash != Default::default() { - let tree_route = ::client::blockchain::tree_route( - self, - BlockId::Hash(meta.best_hash), - BlockId::Hash(route_to), - )?; - - // update block number to hash lookup entries. - for retracted in tree_route.retracted() { - if retracted.hash == meta.finalized_hash { - // TODO: can we recover here? - warn!("Safety failure: reverting finalized block {:?}", - (&retracted.number, &retracted.hash)); - } - - utils::remove_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - retracted.number - ); - } - - for enacted in tree_route.enacted() { - utils::insert_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - enacted.number, - enacted.hash - ); - } - } - - transaction.put(columns::META, meta_keys::BEST_BLOCK, &lookup_key); - utils::insert_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - best_to.0, - best_to.1, - ); - - Ok(()) - } - - // Note that a block is finalized. Only call with child of last finalized block. - fn note_finalized( - &self, - transaction: &mut DBTransaction, - header: &Block::Header, - hash: Block::Hash, - displaced: &mut Option>>, - ) -> ClientResult<()> { - let meta = self.meta.read(); - if &meta.finalized_hash != header.parent_hash() { - return Err(::client::error::ErrorKind::NonSequentialFinalization( - format!("Last finalized {:?} not parent of {:?}", - meta.finalized_hash, hash), - ).into()) - } - - let lookup_key = utils::number_and_hash_to_lookup_key(header.number().clone(), hash); - transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &lookup_key); - - // build new CHT(s) if required - if let Some(new_cht_number) = cht::is_build_required(cht::SIZE, *header.number()) { - let new_cht_start: NumberFor = cht::start_number(cht::SIZE, new_cht_number); - - let new_header_cht_root = cht::compute_root::( - cht::SIZE, new_cht_number, (new_cht_start.as_()..) - .map(|num| self.hash(As::sa(num))) - )?; - transaction.put( - columns::CHT, - &cht_key(HEADER_CHT_PREFIX, new_cht_start), - new_header_cht_root.as_ref() - ); - - // if the header includes changes trie root, let's build a changes tries roots CHT - if header.digest().log(DigestItem::as_changes_trie_root).is_some() { - let new_changes_trie_cht_root = cht::compute_root::( - cht::SIZE, new_cht_number, (new_cht_start.as_()..) - .map(|num| self.changes_trie_root(BlockId::Number(As::sa(num)))) - )?; - transaction.put( - columns::CHT, - &cht_key(CHANGES_TRIE_CHT_PREFIX, new_cht_start), - new_changes_trie_cht_root.as_ref() - ); - } - - // prune headers that are replaced with CHT - let mut prune_block = new_cht_start; - let new_cht_end = cht::end_number(cht::SIZE, new_cht_number); - trace!(target: "db", "Replacing blocks [{}..{}] with CHT#{}", + // Get block changes trie root, if available. + fn changes_trie_root(&self, block: BlockId) -> ClientResult> { + self.header(block).map(|header| { + header.and_then(|header| { + header + .digest() + .log(DigestItem::as_changes_trie_root) + .cloned() + }) + }) + } + + /// Handle setting head within a transaction. `route_to` should be the last + /// block that existed in the database. `best_to` should be the best block + /// to be set. + /// + /// In the case where the new best block is a block to be imported, `route_to` + /// should be the parent of `best_to`. In the case where we set an existing block + /// to be best, `route_to` should equal to `best_to`. + fn set_head_with_transaction( + &self, + transaction: &mut DBTransaction, + route_to: Block::Hash, + best_to: (NumberFor, Block::Hash), + ) -> Result<(), client::error::Error> { + let lookup_key = utils::number_and_hash_to_lookup_key(best_to.0, &best_to.1); + + // handle reorg. + let meta = self.meta.read(); + if meta.best_hash != Default::default() { + let tree_route = ::client::blockchain::tree_route( + self, + BlockId::Hash(meta.best_hash), + BlockId::Hash(route_to), + )?; + + // update block number to hash lookup entries. + for retracted in tree_route.retracted() { + if retracted.hash == meta.finalized_hash { + // TODO: can we recover here? + warn!( + "Safety failure: reverting finalized block {:?}", + (&retracted.number, &retracted.hash) + ); + } + + utils::remove_number_to_key_mapping( + transaction, + columns::KEY_LOOKUP, + retracted.number, + ); + } + + for enacted in tree_route.enacted() { + utils::insert_number_to_key_mapping( + transaction, + columns::KEY_LOOKUP, + enacted.number, + enacted.hash, + ); + } + } + + transaction.put(columns::META, meta_keys::BEST_BLOCK, &lookup_key); + utils::insert_number_to_key_mapping(transaction, columns::KEY_LOOKUP, best_to.0, best_to.1); + + Ok(()) + } + + // Note that a block is finalized. Only call with child of last finalized block. + fn note_finalized( + &self, + transaction: &mut DBTransaction, + header: &Block::Header, + hash: Block::Hash, + displaced: &mut Option>>, + ) -> ClientResult<()> { + let meta = self.meta.read(); + if &meta.finalized_hash != header.parent_hash() { + return Err( + ::client::error::ErrorKind::NonSequentialFinalization(format!( + "Last finalized {:?} not parent of {:?}", + meta.finalized_hash, hash + )) + .into(), + ); + } + + let lookup_key = utils::number_and_hash_to_lookup_key(header.number().clone(), hash); + transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &lookup_key); + + // build new CHT(s) if required + if let Some(new_cht_number) = cht::is_build_required(cht::SIZE, *header.number()) { + let new_cht_start: NumberFor = cht::start_number(cht::SIZE, new_cht_number); + + let new_header_cht_root = cht::compute_root::( + cht::SIZE, + new_cht_number, + (new_cht_start.as_()..).map(|num| self.hash(As::sa(num))), + )?; + transaction.put( + columns::CHT, + &cht_key(HEADER_CHT_PREFIX, new_cht_start), + new_header_cht_root.as_ref(), + ); + + // if the header includes changes trie root, let's build a changes tries roots CHT + if header + .digest() + .log(DigestItem::as_changes_trie_root) + .is_some() + { + let new_changes_trie_cht_root = cht::compute_root::( + cht::SIZE, + new_cht_number, + (new_cht_start.as_()..) + .map(|num| self.changes_trie_root(BlockId::Number(As::sa(num)))), + )?; + transaction.put( + columns::CHT, + &cht_key(CHANGES_TRIE_CHT_PREFIX, new_cht_start), + new_changes_trie_cht_root.as_ref(), + ); + } + + // prune headers that are replaced with CHT + let mut prune_block = new_cht_start; + let new_cht_end = cht::end_number(cht::SIZE, new_cht_number); + trace!(target: "db", "Replacing blocks [{}..{}] with CHT#{}", new_cht_start, new_cht_end, new_cht_number); - while prune_block <= new_cht_end { - if let Some(hash) = self.hash(prune_block)? { - let lookup_key = block_id_to_lookup_key::(&*self.db, columns::KEY_LOOKUP, BlockId::Number(prune_block))? + while prune_block <= new_cht_end { + if let Some(hash) = self.hash(prune_block)? { + let lookup_key = block_id_to_lookup_key::(&*self.db, columns::KEY_LOOKUP, BlockId::Number(prune_block))? .expect("retrieved hash for `prune_block` right above. therefore retrieving lookup key must succeed. q.e.d."); - utils::remove_key_mappings( - transaction, - columns::KEY_LOOKUP, - prune_block, - hash - ); - transaction.delete(columns::HEADER, &lookup_key); - } - prune_block += One::one(); - } - } - - let new_displaced = self.leaves.write().finalize_height(header.number().clone()); - match displaced { - x @ &mut None => *x = Some(new_displaced), - &mut Some(ref mut displaced) => displaced.merge(new_displaced), - } - - Ok(()) - } - - /// Read CHT root of given type for the block. - fn read_cht_root( - &self, - cht_type: u8, - cht_size: u64, - block: NumberFor - ) -> ClientResult { - let no_cht_for_block = || ClientErrorKind::Backend(format!("CHT for block {} not exists", block)).into(); - - let cht_number = cht::block_to_cht_number(cht_size, block).ok_or_else(no_cht_for_block)?; - let cht_start = cht::start_number(cht_size, cht_number); - self.db.get(columns::CHT, &cht_key(cht_type, cht_start)).map_err(db_err)? - .ok_or_else(no_cht_for_block) - .and_then(|hash| Block::Hash::decode(&mut &*hash).ok_or_else(no_cht_for_block)) - } + utils::remove_key_mappings(transaction, columns::KEY_LOOKUP, prune_block, hash); + transaction.delete(columns::HEADER, &lookup_key); + } + prune_block += One::one(); + } + } + + let new_displaced = self.leaves.write().finalize_height(header.number().clone()); + match displaced { + x @ &mut None => *x = Some(new_displaced), + &mut Some(ref mut displaced) => displaced.merge(new_displaced), + } + + Ok(()) + } + + /// Read CHT root of given type for the block. + fn read_cht_root( + &self, + cht_type: u8, + cht_size: u64, + block: NumberFor, + ) -> ClientResult { + let no_cht_for_block = + || ClientErrorKind::Backend(format!("CHT for block {} not exists", block)).into(); + + let cht_number = cht::block_to_cht_number(cht_size, block).ok_or_else(no_cht_for_block)?; + let cht_start = cht::start_number(cht_size, cht_number); + self.db + .get(columns::CHT, &cht_key(cht_type, cht_start)) + .map_err(db_err)? + .ok_or_else(no_cht_for_block) + .and_then(|hash| Block::Hash::decode(&mut &*hash).ok_or_else(no_cht_for_block)) + } } impl AuxStore for LightStorage - where Block: BlockT, +where + Block: BlockT, { - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> ClientResult<()> { - let mut transaction = DBTransaction::new(); - for (k, v) in insert { - transaction.put(columns::AUX, k, v); - } - for k in delete { - transaction.delete(columns::AUX, k); - } - self.db.write(transaction).map_err(db_err) - } - - fn get_aux(&self, key: &[u8]) -> ClientResult>> { - self.db.get(columns::AUX, key).map(|r| r.map(|v| v.to_vec())).map_err(db_err) - } + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> ClientResult<()> { + let mut transaction = DBTransaction::new(); + for (k, v) in insert { + transaction.put(columns::AUX, k, v); + } + for k in delete { + transaction.delete(columns::AUX, k); + } + self.db.write(transaction).map_err(db_err) + } + + fn get_aux(&self, key: &[u8]) -> ClientResult>> { + self.db + .get(columns::AUX, key) + .map(|r| r.map(|v| v.to_vec())) + .map_err(db_err) + } } impl LightBlockchainStorage for LightStorage - where Block: BlockT, +where + Block: BlockT, { - fn import_header( - &self, - header: Block::Header, - cache_at: HashMap>, - leaf_state: NewBlockState, - aux_ops: Vec<(Vec, Option>)>, - ) -> ClientResult<()> { - let mut finalization_displaced_leaves = None; - let mut transaction = DBTransaction::new(); - - let hash = header.hash(); - let number = *header.number(); - let parent_hash = *header.parent_hash(); - - for (key, maybe_val) in aux_ops { - match maybe_val { - Some(val) => transaction.put_vec(columns::AUX, &key, val), - None => transaction.delete(columns::AUX, &key), - } - } - - // blocks are keyed by number + hash. - let lookup_key = utils::number_and_hash_to_lookup_key(number, &hash); - - if leaf_state.is_best() { - self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))?; - } - - utils::insert_hash_to_key_mapping( - &mut transaction, - columns::KEY_LOOKUP, - number, - hash, - ); - transaction.put(columns::HEADER, &lookup_key, &header.encode()); - - let is_genesis = number.is_zero(); - if is_genesis { - transaction.put(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); - } - - let finalized = match leaf_state { - _ if is_genesis => true, - NewBlockState::Final => true, - _ => false, - }; - - if finalized { - self.note_finalized( - &mut transaction, - &header, - hash, - &mut finalization_displaced_leaves, - )?; - } - - { - let mut leaves = self.leaves.write(); - let displaced_leaf = leaves.import(hash, number, parent_hash); - - let mut cache = self.cache.0.write(); - let cache_ops = cache.transaction(&mut transaction) - .on_block_insert( - ComplexBlockId::new(*header.parent_hash(), if number.is_zero() { Zero::zero() } else { number - One::one() }), - ComplexBlockId::new(hash, number), - cache_at, - finalized, - )? - .into_ops(); - - debug!("Light DB Commit {:?} ({})", hash, number); - let write_result = self.db.write(transaction).map_err(db_err); - if let Err(e) = write_result { - let mut leaves = self.leaves.write(); - let mut undo = leaves.undo(); - - // revert leaves set update if there was one. - if let Some(displaced_leaf) = displaced_leaf { - undo.undo_import(displaced_leaf); - } - - if let Some(finalization_displaced) = finalization_displaced_leaves { - undo.undo_finalization(finalization_displaced); - } - - return Err(e); - } - - cache.commit(cache_ops); - } - - self.update_meta(hash, number, leaf_state.is_best(), finalized); - - Ok(()) - } - - fn set_head(&self, id: BlockId) -> ClientResult<()> { - if let Some(header) = self.header(id)? { - let hash = header.hash(); - let number = header.number(); - - let mut transaction = DBTransaction::new(); - self.set_head_with_transaction(&mut transaction, hash.clone(), (number.clone(), hash.clone()))?; - self.db.write(transaction).map_err(db_err)?; - Ok(()) - } else { - Err(ClientErrorKind::UnknownBlock(format!("Cannot set head {:?}", id)).into()) - } - } - - fn header_cht_root(&self, cht_size: u64, block: NumberFor) -> ClientResult { - self.read_cht_root(HEADER_CHT_PREFIX, cht_size, block) - } - - fn changes_trie_cht_root(&self, cht_size: u64, block: NumberFor) -> ClientResult { - self.read_cht_root(CHANGES_TRIE_CHT_PREFIX, cht_size, block) - } - - fn finalize_header(&self, id: BlockId) -> ClientResult<()> { - if let Some(header) = self.header(id)? { - let mut displaced = None; - let mut transaction = DBTransaction::new(); - let hash = header.hash(); - let number = *header.number(); - self.note_finalized(&mut transaction, &header, hash.clone(), &mut displaced)?; - { - let mut cache = self.cache.0.write(); - let cache_ops = cache.transaction(&mut transaction) - .on_block_finalize( - ComplexBlockId::new(*header.parent_hash(), if number.is_zero() { Zero::zero() } else { number - One::one() }), - ComplexBlockId::new(hash, number) - )? - .into_ops(); - - if let Err(e) = self.db.write(transaction).map_err(db_err) { - if let Some(displaced) = displaced { - self.leaves.write().undo().undo_finalization(displaced); - } - return Err(e); - } - cache.commit(cache_ops); - } - self.update_meta(hash, header.number().clone(), false, true); - - Ok(()) - } else { - Err(ClientErrorKind::UnknownBlock(format!("Cannot finalize block {:?}", id)).into()) - } - } - - fn last_finalized(&self) -> ClientResult { - Ok(self.meta.read().finalized_hash.clone()) - } - - fn cache(&self) -> Option>> { - Some(self.cache.clone()) - } + fn import_header( + &self, + header: Block::Header, + cache_at: HashMap>, + leaf_state: NewBlockState, + aux_ops: Vec<(Vec, Option>)>, + ) -> ClientResult<()> { + let mut finalization_displaced_leaves = None; + let mut transaction = DBTransaction::new(); + + let hash = header.hash(); + let number = *header.number(); + let parent_hash = *header.parent_hash(); + + for (key, maybe_val) in aux_ops { + match maybe_val { + Some(val) => transaction.put_vec(columns::AUX, &key, val), + None => transaction.delete(columns::AUX, &key), + } + } + + // blocks are keyed by number + hash. + let lookup_key = utils::number_and_hash_to_lookup_key(number, &hash); + + if leaf_state.is_best() { + self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))?; + } + + utils::insert_hash_to_key_mapping(&mut transaction, columns::KEY_LOOKUP, number, hash); + transaction.put(columns::HEADER, &lookup_key, &header.encode()); + + let is_genesis = number.is_zero(); + if is_genesis { + transaction.put(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); + } + + let finalized = match leaf_state { + _ if is_genesis => true, + NewBlockState::Final => true, + _ => false, + }; + + if finalized { + self.note_finalized( + &mut transaction, + &header, + hash, + &mut finalization_displaced_leaves, + )?; + } + + { + let mut leaves = self.leaves.write(); + let displaced_leaf = leaves.import(hash, number, parent_hash); + + let mut cache = self.cache.0.write(); + let cache_ops = cache + .transaction(&mut transaction) + .on_block_insert( + ComplexBlockId::new( + *header.parent_hash(), + if number.is_zero() { + Zero::zero() + } else { + number - One::one() + }, + ), + ComplexBlockId::new(hash, number), + cache_at, + finalized, + )? + .into_ops(); + + debug!("Light DB Commit {:?} ({})", hash, number); + let write_result = self.db.write(transaction).map_err(db_err); + if let Err(e) = write_result { + let mut leaves = self.leaves.write(); + let mut undo = leaves.undo(); + + // revert leaves set update if there was one. + if let Some(displaced_leaf) = displaced_leaf { + undo.undo_import(displaced_leaf); + } + + if let Some(finalization_displaced) = finalization_displaced_leaves { + undo.undo_finalization(finalization_displaced); + } + + return Err(e); + } + + cache.commit(cache_ops); + } + + self.update_meta(hash, number, leaf_state.is_best(), finalized); + + Ok(()) + } + + fn set_head(&self, id: BlockId) -> ClientResult<()> { + if let Some(header) = self.header(id)? { + let hash = header.hash(); + let number = header.number(); + + let mut transaction = DBTransaction::new(); + self.set_head_with_transaction( + &mut transaction, + hash.clone(), + (number.clone(), hash.clone()), + )?; + self.db.write(transaction).map_err(db_err)?; + Ok(()) + } else { + Err(ClientErrorKind::UnknownBlock(format!("Cannot set head {:?}", id)).into()) + } + } + + fn header_cht_root(&self, cht_size: u64, block: NumberFor) -> ClientResult { + self.read_cht_root(HEADER_CHT_PREFIX, cht_size, block) + } + + fn changes_trie_cht_root( + &self, + cht_size: u64, + block: NumberFor, + ) -> ClientResult { + self.read_cht_root(CHANGES_TRIE_CHT_PREFIX, cht_size, block) + } + + fn finalize_header(&self, id: BlockId) -> ClientResult<()> { + if let Some(header) = self.header(id)? { + let mut displaced = None; + let mut transaction = DBTransaction::new(); + let hash = header.hash(); + let number = *header.number(); + self.note_finalized(&mut transaction, &header, hash.clone(), &mut displaced)?; + { + let mut cache = self.cache.0.write(); + let cache_ops = cache + .transaction(&mut transaction) + .on_block_finalize( + ComplexBlockId::new( + *header.parent_hash(), + if number.is_zero() { + Zero::zero() + } else { + number - One::one() + }, + ), + ComplexBlockId::new(hash, number), + )? + .into_ops(); + + if let Err(e) = self.db.write(transaction).map_err(db_err) { + if let Some(displaced) = displaced { + self.leaves.write().undo().undo_finalization(displaced); + } + return Err(e); + } + cache.commit(cache_ops); + } + self.update_meta(hash, header.number().clone(), false, true); + + Ok(()) + } else { + Err(ClientErrorKind::UnknownBlock(format!("Cannot finalize block {:?}", id)).into()) + } + } + + fn last_finalized(&self) -> ClientResult { + Ok(self.meta.read().finalized_hash.clone()) + } + + fn cache(&self) -> Option>> { + Some(self.cache.clone()) + } } /// Build the key for inserting header-CHT at given block. fn cht_key>(cht_type: u8, block: N) -> [u8; 5] { - let mut key = [cht_type; 5]; - key[1..].copy_from_slice(&utils::number_index_key(block)); - key + let mut key = [cht_type; 5]; + key[1..].copy_from_slice(&utils::number_index_key(block)); + key } #[cfg(test)] pub(crate) mod tests { - use client::cht; - use runtime_primitives::generic::DigestItem; - use runtime_primitives::testing::{H256 as Hash, Header, Block as RawBlock, ExtrinsicWrapper}; - use runtime_primitives::traits::AuthorityIdFor; - use super::*; - - type Block = RawBlock>; - type AuthorityId = AuthorityIdFor; - - pub fn default_header(parent: &Hash, number: u64) -> Header { - Header { - number: number.into(), - parent_hash: *parent, - state_root: Hash::random(), - digest: Default::default(), - extrinsics_root: Default::default(), - } - } - - fn header_with_changes_trie(parent: &Hash, number: u64) -> Header { - let mut header = default_header(parent, number); - header.digest.logs.push(DigestItem::ChangesTrieRoot([(number % 256) as u8; 32].into())); - header - } - - fn header_with_extrinsics_root(parent: &Hash, number: u64, extrinsics_root: Hash) -> Header { - let mut header = default_header(parent, number); - header.extrinsics_root = extrinsics_root; - header - } - - pub fn insert_block Header>( - db: &LightStorage, - cache: HashMap>, - header: F, - ) -> Hash { - let header = header(); - let hash = header.hash(); - db.import_header(header, cache, NewBlockState::Best, Vec::new()).unwrap(); - hash - } - - fn insert_final_block Header>( - db: &LightStorage, - cache: HashMap>, - header: F, - ) -> Hash { - let header = header(); - let hash = header.hash(); - db.import_header(header, cache, NewBlockState::Final, Vec::new()).unwrap(); - hash - } - - fn insert_non_best_block Header>( - db: &LightStorage, - cache: HashMap>, - header: F, - ) -> Hash { - let header = header(); - let hash = header.hash(); - db.import_header(header, cache, NewBlockState::Normal, Vec::new()).unwrap(); - hash - } - - #[test] - fn returns_known_header() { - let db = LightStorage::new_test(); - let known_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - let header_by_hash = db.header(BlockId::Hash(known_hash)).unwrap().unwrap(); - let header_by_number = db.header(BlockId::Number(0)).unwrap().unwrap(); - assert_eq!(header_by_hash, header_by_number); - } - - #[test] - fn does_not_return_unknown_header() { - let db = LightStorage::::new_test(); - assert!(db.header(BlockId::Hash(Hash::from_low_u64_be(1))).unwrap().is_none()); - assert!(db.header(BlockId::Number(0)).unwrap().is_none()); - } - - #[test] - fn returns_info() { - let db = LightStorage::new_test(); - let genesis_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - let info = db.info().unwrap(); - assert_eq!(info.best_hash, genesis_hash); - assert_eq!(info.best_number, 0); - assert_eq!(info.genesis_hash, genesis_hash); - let best_hash = insert_block(&db, HashMap::new(), || default_header(&genesis_hash, 1)); - let info = db.info().unwrap(); - assert_eq!(info.best_hash, best_hash); - assert_eq!(info.best_number, 1); - assert_eq!(info.genesis_hash, genesis_hash); - } - - #[test] - fn returns_block_status() { - let db = LightStorage::new_test(); - let genesis_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - assert_eq!(db.status(BlockId::Hash(genesis_hash)).unwrap(), BlockStatus::InChain); - assert_eq!(db.status(BlockId::Number(0)).unwrap(), BlockStatus::InChain); - assert_eq!(db.status(BlockId::Hash(Hash::from_low_u64_be(1))).unwrap(), BlockStatus::Unknown); - assert_eq!(db.status(BlockId::Number(1)).unwrap(), BlockStatus::Unknown); - } - - #[test] - fn returns_block_hash() { - let db = LightStorage::new_test(); - let genesis_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - assert_eq!(db.hash(0).unwrap(), Some(genesis_hash)); - assert_eq!(db.hash(1).unwrap(), None); - } - - #[test] - fn import_header_works() { - let db = LightStorage::new_test(); - - let genesis_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - assert_eq!(db.db.iter(columns::HEADER).count(), 1); - assert_eq!(db.db.iter(columns::KEY_LOOKUP).count(), 2); - - let _ = insert_block(&db, HashMap::new(), || default_header(&genesis_hash, 1)); - assert_eq!(db.db.iter(columns::HEADER).count(), 2); - assert_eq!(db.db.iter(columns::KEY_LOOKUP).count(), 4); - } - - #[test] - fn finalized_ancient_headers_are_replaced_with_cht() { - fn insert_headers Header>(header_producer: F) -> LightStorage { - let db = LightStorage::new_test(); - - // insert genesis block header (never pruned) - let mut prev_hash = insert_final_block(&db, HashMap::new(), || header_producer(&Default::default(), 0)); - - // insert SIZE blocks && ensure that nothing is pruned - for number in 0..cht::SIZE { - prev_hash = insert_block(&db, HashMap::new(), || header_producer(&prev_hash, 1 + number)); - } - assert_eq!(db.db.iter(columns::HEADER).count(), (1 + cht::SIZE) as usize); - assert_eq!(db.db.iter(columns::CHT).count(), 0); - - // insert next SIZE blocks && ensure that nothing is pruned - for number in 0..cht::SIZE { - prev_hash = insert_block(&db, HashMap::new(), || header_producer(&prev_hash, 1 + cht::SIZE + number)); - } - assert_eq!(db.db.iter(columns::HEADER).count(), (1 + cht::SIZE + cht::SIZE) as usize); - assert_eq!(db.db.iter(columns::CHT).count(), 0); - - // insert block #{2 * cht::SIZE + 1} && check that new CHT is created + headers of this CHT are pruned - // nothing is yet finalized, so nothing is pruned. - prev_hash = insert_block(&db, HashMap::new(), || header_producer(&prev_hash, 1 + cht::SIZE + cht::SIZE)); - assert_eq!(db.db.iter(columns::HEADER).count(), (2 + cht::SIZE + cht::SIZE) as usize); - assert_eq!(db.db.iter(columns::CHT).count(), 0); - - // now finalize the block. - for i in (0..(cht::SIZE + cht::SIZE)).map(|i| i + 1) { - db.finalize_header(BlockId::Number(i)).unwrap(); - } - db.finalize_header(BlockId::Hash(prev_hash)).unwrap(); - db - } - - // when headers are created without changes tries roots - let db = insert_headers(default_header); - assert_eq!(db.db.iter(columns::HEADER).count(), (1 + cht::SIZE + 1) as usize); - assert_eq!(db.db.iter(columns::KEY_LOOKUP).count(), (2 * (1 + cht::SIZE + 1)) as usize); - assert_eq!(db.db.iter(columns::CHT).count(), 1); - assert!((0..cht::SIZE).all(|i| db.header(BlockId::Number(1 + i)).unwrap().is_none())); - assert!(db.header_cht_root(cht::SIZE, cht::SIZE / 2).is_ok()); - assert!(db.header_cht_root(cht::SIZE, cht::SIZE + cht::SIZE / 2).is_err()); - assert!(db.changes_trie_cht_root(cht::SIZE, cht::SIZE / 2).is_err()); - assert!(db.changes_trie_cht_root(cht::SIZE, cht::SIZE + cht::SIZE / 2).is_err()); - - // when headers are created with changes tries roots - let db = insert_headers(header_with_changes_trie); - assert_eq!(db.db.iter(columns::HEADER).count(), (1 + cht::SIZE + 1) as usize); - assert_eq!(db.db.iter(columns::CHT).count(), 2); - assert!((0..cht::SIZE).all(|i| db.header(BlockId::Number(1 + i)).unwrap().is_none())); - assert!(db.header_cht_root(cht::SIZE, cht::SIZE / 2).is_ok()); - assert!(db.header_cht_root(cht::SIZE, cht::SIZE + cht::SIZE / 2).is_err()); - assert!(db.changes_trie_cht_root(cht::SIZE, cht::SIZE / 2).is_ok()); - assert!(db.changes_trie_cht_root(cht::SIZE, cht::SIZE + cht::SIZE / 2).is_err()); - } - - #[test] - fn get_cht_fails_for_genesis_block() { - assert!(LightStorage::::new_test().header_cht_root(cht::SIZE, 0).is_err()); - } - - #[test] - fn get_cht_fails_for_non_existant_cht() { - assert!(LightStorage::::new_test().header_cht_root(cht::SIZE, (cht::SIZE / 2) as u64).is_err()); - } - - #[test] - fn get_cht_works() { - let db = LightStorage::new_test(); - - // insert 1 + SIZE + SIZE + 1 blocks so that CHT#0 is created - let mut prev_hash = insert_final_block(&db, HashMap::new(), || header_with_changes_trie(&Default::default(), 0)); - for i in 1..1 + cht::SIZE + cht::SIZE + 1 { - prev_hash = insert_block(&db, HashMap::new(), || header_with_changes_trie(&prev_hash, i as u64)); - db.finalize_header(BlockId::Hash(prev_hash)).unwrap(); - } - - let cht_root_1 = db.header_cht_root(cht::SIZE, cht::start_number(cht::SIZE, 0)).unwrap(); - let cht_root_2 = db.header_cht_root(cht::SIZE, (cht::start_number(cht::SIZE, 0) + cht::SIZE / 2) as u64).unwrap(); - let cht_root_3 = db.header_cht_root(cht::SIZE, cht::end_number(cht::SIZE, 0)).unwrap(); - assert_eq!(cht_root_1, cht_root_2); - assert_eq!(cht_root_2, cht_root_3); - - let cht_root_1 = db.changes_trie_cht_root(cht::SIZE, cht::start_number(cht::SIZE, 0)).unwrap(); - let cht_root_2 = db.changes_trie_cht_root(cht::SIZE, (cht::start_number(cht::SIZE, 0) + cht::SIZE / 2) as u64).unwrap(); - let cht_root_3 = db.changes_trie_cht_root(cht::SIZE, cht::end_number(cht::SIZE, 0)).unwrap(); - assert_eq!(cht_root_1, cht_root_2); - assert_eq!(cht_root_2, cht_root_3); - } - - #[test] - fn tree_route_works() { - let db = LightStorage::new_test(); - let block0 = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - - // fork from genesis: 3 prong. - let a1 = insert_block(&db, HashMap::new(), || default_header(&block0, 1)); - let a2 = insert_block(&db, HashMap::new(), || default_header(&a1, 2)); - let a3 = insert_block(&db, HashMap::new(), || default_header(&a2, 3)); - - // fork from genesis: 2 prong. - let b1 = insert_block(&db, HashMap::new(), || header_with_extrinsics_root(&block0, 1, Hash::from([1; 32]))); - let b2 = insert_block(&db, HashMap::new(), || default_header(&b1, 2)); - - { - let tree_route = ::client::blockchain::tree_route( - &db, - BlockId::Hash(a3), - BlockId::Hash(b2) - ).unwrap(); - - assert_eq!(tree_route.common_block().hash, block0); - assert_eq!(tree_route.retracted().iter().map(|r| r.hash).collect::>(), vec![a3, a2, a1]); - assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![b1, b2]); - } - - { - let tree_route = ::client::blockchain::tree_route( - &db, - BlockId::Hash(a1), - BlockId::Hash(a3), - ).unwrap(); - - assert_eq!(tree_route.common_block().hash, a1); - assert!(tree_route.retracted().is_empty()); - assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![a2, a3]); - } - - { - let tree_route = ::client::blockchain::tree_route( - &db, - BlockId::Hash(a3), - BlockId::Hash(a1), - ).unwrap(); - - assert_eq!(tree_route.common_block().hash, a1); - assert_eq!(tree_route.retracted().iter().map(|r| r.hash).collect::>(), vec![a3, a2]); - assert!(tree_route.enacted().is_empty()); - } - - { - let tree_route = ::client::blockchain::tree_route( - &db, - BlockId::Hash(a2), - BlockId::Hash(a2), - ).unwrap(); - - assert_eq!(tree_route.common_block().hash, a2); - assert!(tree_route.retracted().is_empty()); - assert!(tree_route.enacted().is_empty()); - } - } - - #[test] - fn authorities_are_cached() { - let db = LightStorage::new_test(); - - fn run_checks(db: &LightStorage, max: u64, checks: &[(u64, Option>>)]) { - for (at, expected) in checks.iter().take_while(|(at, _)| *at <= max) { - let actual = get_authorities(db.cache(), BlockId::Number(*at)); - assert_eq!(*expected, actual); - } - } - - fn same_authorities() -> HashMap> { - HashMap::new() - } - - fn make_authorities(authorities: Vec) -> HashMap> { - let mut map = HashMap::new(); - map.insert(well_known_cache_keys::AUTHORITIES, authorities.encode()); - map - } - - fn get_authorities(cache: &BlockchainCache, at: BlockId) -> Option> { - cache.get_at(&well_known_cache_keys::AUTHORITIES, &at).and_then(|val| Decode::decode(&mut &val[..])) - } - - let auth1 = || AuthorityId::from_raw([1u8; 32]); - let auth2 = || AuthorityId::from_raw([2u8; 32]); - let auth3 = || AuthorityId::from_raw([3u8; 32]); - let auth4 = || AuthorityId::from_raw([4u8; 32]); - let auth5 = || AuthorityId::from_raw([5u8; 32]); - let auth6 = || AuthorityId::from_raw([6u8; 32]); - - let (hash2, hash6) = { - // first few blocks are instantly finalized - // B0(None) -> B1(None) -> B2(1) -> B3(1) -> B4(1, 2) -> B5(1, 2) -> B6(1, 2) - let checks = vec![ - (0, None), - (1, None), - (2, Some(vec![auth1()])), - (3, Some(vec![auth1()])), - (4, Some(vec![auth1(), auth2()])), - (5, Some(vec![auth1(), auth2()])), - (6, Some(vec![auth1(), auth2()])), - ]; - - let hash0 = insert_final_block(&db, same_authorities(), || default_header(&Default::default(), 0)); - run_checks(&db, 0, &checks); - let hash1 = insert_final_block(&db, same_authorities(), || default_header(&hash0, 1)); - run_checks(&db, 1, &checks); - let hash2 = insert_final_block(&db, make_authorities(vec![auth1()]), || default_header(&hash1, 2)); - run_checks(&db, 2, &checks); - let hash3 = insert_final_block(&db, make_authorities(vec![auth1()]), || default_header(&hash2, 3)); - run_checks(&db, 3, &checks); - let hash4 = insert_final_block(&db, make_authorities(vec![auth1(), auth2()]), || default_header(&hash3, 4)); - run_checks(&db, 4, &checks); - let hash5 = insert_final_block(&db, make_authorities(vec![auth1(), auth2()]), || default_header(&hash4, 5)); - run_checks(&db, 5, &checks); - let hash6 = insert_final_block(&db, same_authorities(), || default_header(&hash5, 6)); - run_checks(&db, 6, &checks); - - (hash2, hash6) - }; - - { - // some older non-best blocks are inserted - // ... -> B2(1) -> B2_1(1) -> B2_2(2) - // => the cache ignores all writes before best finalized block - let hash2_1 = insert_non_best_block(&db, make_authorities(vec![auth1()]), || default_header(&hash2, 3)); - assert_eq!(None, get_authorities(db.cache(), BlockId::Hash(hash2_1))); - let hash2_2 = insert_non_best_block(&db, make_authorities(vec![auth1(), auth2()]), || default_header(&hash2_1, 4)); - assert_eq!(None, get_authorities(db.cache(), BlockId::Hash(hash2_2))); - } - - let (hash7, hash8, hash6_1, hash6_2, hash6_1_1, hash6_1_2) = { - // inserting non-finalized blocks - // B6(None) -> B7(3) -> B8(3) - // \> B6_1(4) -> B6_2(4) - // \> B6_1_1(5) - // \> B6_1_2(6) -> B6_1_3(7) - - let hash7 = insert_block(&db, make_authorities(vec![auth3()]), || default_header(&hash6, 7)); - assert_eq!( - get_authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - let hash8 = insert_block(&db, make_authorities(vec![auth3()]), || default_header(&hash7, 8)); - assert_eq!( - get_authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - let hash6_1 = insert_block(&db, make_authorities(vec![auth4()]), || default_header(&hash6, 7)); - assert_eq!( - get_authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - let hash6_1_1 = insert_non_best_block(&db, make_authorities(vec![auth5()]), || default_header(&hash6_1, 8)); - assert_eq!( - get_authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - let hash6_1_2 = insert_non_best_block(&db, make_authorities(vec![auth6()]), || default_header(&hash6_1, 8)); - assert_eq!( - get_authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1_2)), Some(vec![auth6()])); - let hash6_2 = insert_block(&db, make_authorities(vec![auth4()]), || default_header(&hash6_1, 8)); - assert_eq!( - get_authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1_2)), Some(vec![auth6()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_2)), Some(vec![auth4()])); - - (hash7, hash8, hash6_1, hash6_2, hash6_1_1, hash6_1_2) - }; - - { - // finalize block hash6_1 - db.finalize_header(BlockId::Hash(hash6_1)).unwrap(); - assert_eq!( - get_authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash7)), None); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash8)), None); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1_2)), Some(vec![auth6()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_2)), Some(vec![auth4()])); - // finalize block hash6_2 - db.finalize_header(BlockId::Hash(hash6_2)).unwrap(); - assert_eq!( - get_authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash7)), None); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash8)), None); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1_1)), None); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1_2)), None); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_2)), Some(vec![auth4()])); - } - } - - #[test] - fn database_is_reopened() { - let db = LightStorage::new_test(); - let hash0 = insert_final_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - assert_eq!(db.info().unwrap().best_hash, hash0); - assert_eq!(db.header(BlockId::Hash(hash0)).unwrap().unwrap().hash(), hash0); - - let db = db.db; - let db = LightStorage::from_kvdb(db).unwrap(); - assert_eq!(db.info().unwrap().best_hash, hash0); - assert_eq!(db.header(BlockId::Hash::(hash0)).unwrap().unwrap().hash(), hash0); - } - - #[test] - fn aux_store_works() { - let db = LightStorage::::new_test(); - - // insert aux1 + aux2 using direct store access - db.insert_aux(&[(&[1][..], &[101][..]), (&[2][..], &[102][..])], ::std::iter::empty()).unwrap(); - - // check aux values - assert_eq!(db.get_aux(&[1]).unwrap(), Some(vec![101])); - assert_eq!(db.get_aux(&[2]).unwrap(), Some(vec![102])); - assert_eq!(db.get_aux(&[3]).unwrap(), None); - - // delete aux1 + insert aux3 using import operation - db.import_header(default_header(&Default::default(), 0), HashMap::new(), NewBlockState::Best, vec![ - (vec![3], Some(vec![103])), - (vec![1], None), - ]).unwrap(); - - // check aux values - assert_eq!(db.get_aux(&[1]).unwrap(), None); - assert_eq!(db.get_aux(&[2]).unwrap(), Some(vec![102])); - assert_eq!(db.get_aux(&[3]).unwrap(), Some(vec![103])); - } - - #[test] - fn test_leaves_pruned_on_finality() { - let db = LightStorage::::new_test(); - let block0 = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - - let block1_a = insert_block(&db, HashMap::new(), || default_header(&block0, 1)); - let block1_b = insert_block(&db, HashMap::new(), || header_with_extrinsics_root(&block0, 1, [1; 32].into())); - let block1_c = insert_block(&db, HashMap::new(), || header_with_extrinsics_root(&block0, 1, [2; 32].into())); - - assert_eq!(db.leaves.read().hashes(), vec![block1_a, block1_b, block1_c]); - - let block2_a = insert_block(&db, HashMap::new(), || default_header(&block1_a, 2)); - let block2_b = insert_block(&db, HashMap::new(), || header_with_extrinsics_root(&block1_b, 2, [1; 32].into())); - let block2_c = insert_block(&db, HashMap::new(), || header_with_extrinsics_root(&block1_b, 2, [2; 32].into())); - - assert_eq!(db.leaves.read().hashes(), vec![block2_a, block2_b, block2_c, block1_c]); - - db.finalize_header(BlockId::hash(block1_a)).unwrap(); - db.finalize_header(BlockId::hash(block2_a)).unwrap(); - - // leaves at same height stay. Leaves at lower heights pruned. - assert_eq!(db.leaves.read().hashes(), vec![block2_a, block2_b, block2_c]); - } + use super::*; + use client::cht; + use runtime_primitives::generic::DigestItem; + use runtime_primitives::testing::{Block as RawBlock, ExtrinsicWrapper, Header, H256 as Hash}; + use runtime_primitives::traits::AuthorityIdFor; + + type Block = RawBlock>; + type AuthorityId = AuthorityIdFor; + + pub fn default_header(parent: &Hash, number: u64) -> Header { + Header { + number: number.into(), + parent_hash: *parent, + state_root: Hash::random(), + digest: Default::default(), + extrinsics_root: Default::default(), + } + } + + fn header_with_changes_trie(parent: &Hash, number: u64) -> Header { + let mut header = default_header(parent, number); + header.digest.logs.push(DigestItem::ChangesTrieRoot( + [(number % 256) as u8; 32].into(), + )); + header + } + + fn header_with_extrinsics_root(parent: &Hash, number: u64, extrinsics_root: Hash) -> Header { + let mut header = default_header(parent, number); + header.extrinsics_root = extrinsics_root; + header + } + + pub fn insert_block Header>( + db: &LightStorage, + cache: HashMap>, + header: F, + ) -> Hash { + let header = header(); + let hash = header.hash(); + db.import_header(header, cache, NewBlockState::Best, Vec::new()) + .unwrap(); + hash + } + + fn insert_final_block Header>( + db: &LightStorage, + cache: HashMap>, + header: F, + ) -> Hash { + let header = header(); + let hash = header.hash(); + db.import_header(header, cache, NewBlockState::Final, Vec::new()) + .unwrap(); + hash + } + + fn insert_non_best_block Header>( + db: &LightStorage, + cache: HashMap>, + header: F, + ) -> Hash { + let header = header(); + let hash = header.hash(); + db.import_header(header, cache, NewBlockState::Normal, Vec::new()) + .unwrap(); + hash + } + + #[test] + fn returns_known_header() { + let db = LightStorage::new_test(); + let known_hash = insert_block(&db, HashMap::new(), || { + default_header(&Default::default(), 0) + }); + let header_by_hash = db.header(BlockId::Hash(known_hash)).unwrap().unwrap(); + let header_by_number = db.header(BlockId::Number(0)).unwrap().unwrap(); + assert_eq!(header_by_hash, header_by_number); + } + + #[test] + fn does_not_return_unknown_header() { + let db = LightStorage::::new_test(); + assert!(db + .header(BlockId::Hash(Hash::from_low_u64_be(1))) + .unwrap() + .is_none()); + assert!(db.header(BlockId::Number(0)).unwrap().is_none()); + } + + #[test] + fn returns_info() { + let db = LightStorage::new_test(); + let genesis_hash = insert_block(&db, HashMap::new(), || { + default_header(&Default::default(), 0) + }); + let info = db.info().unwrap(); + assert_eq!(info.best_hash, genesis_hash); + assert_eq!(info.best_number, 0); + assert_eq!(info.genesis_hash, genesis_hash); + let best_hash = insert_block(&db, HashMap::new(), || default_header(&genesis_hash, 1)); + let info = db.info().unwrap(); + assert_eq!(info.best_hash, best_hash); + assert_eq!(info.best_number, 1); + assert_eq!(info.genesis_hash, genesis_hash); + } + + #[test] + fn returns_block_status() { + let db = LightStorage::new_test(); + let genesis_hash = insert_block(&db, HashMap::new(), || { + default_header(&Default::default(), 0) + }); + assert_eq!( + db.status(BlockId::Hash(genesis_hash)).unwrap(), + BlockStatus::InChain + ); + assert_eq!(db.status(BlockId::Number(0)).unwrap(), BlockStatus::InChain); + assert_eq!( + db.status(BlockId::Hash(Hash::from_low_u64_be(1))).unwrap(), + BlockStatus::Unknown + ); + assert_eq!(db.status(BlockId::Number(1)).unwrap(), BlockStatus::Unknown); + } + + #[test] + fn returns_block_hash() { + let db = LightStorage::new_test(); + let genesis_hash = insert_block(&db, HashMap::new(), || { + default_header(&Default::default(), 0) + }); + assert_eq!(db.hash(0).unwrap(), Some(genesis_hash)); + assert_eq!(db.hash(1).unwrap(), None); + } + + #[test] + fn import_header_works() { + let db = LightStorage::new_test(); + + let genesis_hash = insert_block(&db, HashMap::new(), || { + default_header(&Default::default(), 0) + }); + assert_eq!(db.db.iter(columns::HEADER).count(), 1); + assert_eq!(db.db.iter(columns::KEY_LOOKUP).count(), 2); + + let _ = insert_block(&db, HashMap::new(), || default_header(&genesis_hash, 1)); + assert_eq!(db.db.iter(columns::HEADER).count(), 2); + assert_eq!(db.db.iter(columns::KEY_LOOKUP).count(), 4); + } + + #[test] + fn finalized_ancient_headers_are_replaced_with_cht() { + fn insert_headers Header>(header_producer: F) -> LightStorage { + let db = LightStorage::new_test(); + + // insert genesis block header (never pruned) + let mut prev_hash = insert_final_block(&db, HashMap::new(), || { + header_producer(&Default::default(), 0) + }); + + // insert SIZE blocks && ensure that nothing is pruned + for number in 0..cht::SIZE { + prev_hash = insert_block(&db, HashMap::new(), || { + header_producer(&prev_hash, 1 + number) + }); + } + assert_eq!( + db.db.iter(columns::HEADER).count(), + (1 + cht::SIZE) as usize + ); + assert_eq!(db.db.iter(columns::CHT).count(), 0); + + // insert next SIZE blocks && ensure that nothing is pruned + for number in 0..cht::SIZE { + prev_hash = insert_block(&db, HashMap::new(), || { + header_producer(&prev_hash, 1 + cht::SIZE + number) + }); + } + assert_eq!( + db.db.iter(columns::HEADER).count(), + (1 + cht::SIZE + cht::SIZE) as usize + ); + assert_eq!(db.db.iter(columns::CHT).count(), 0); + + // insert block #{2 * cht::SIZE + 1} && check that new CHT is created + headers of this CHT are pruned + // nothing is yet finalized, so nothing is pruned. + prev_hash = insert_block(&db, HashMap::new(), || { + header_producer(&prev_hash, 1 + cht::SIZE + cht::SIZE) + }); + assert_eq!( + db.db.iter(columns::HEADER).count(), + (2 + cht::SIZE + cht::SIZE) as usize + ); + assert_eq!(db.db.iter(columns::CHT).count(), 0); + + // now finalize the block. + for i in (0..(cht::SIZE + cht::SIZE)).map(|i| i + 1) { + db.finalize_header(BlockId::Number(i)).unwrap(); + } + db.finalize_header(BlockId::Hash(prev_hash)).unwrap(); + db + } + + // when headers are created without changes tries roots + let db = insert_headers(default_header); + assert_eq!( + db.db.iter(columns::HEADER).count(), + (1 + cht::SIZE + 1) as usize + ); + assert_eq!( + db.db.iter(columns::KEY_LOOKUP).count(), + (2 * (1 + cht::SIZE + 1)) as usize + ); + assert_eq!(db.db.iter(columns::CHT).count(), 1); + assert!((0..cht::SIZE).all(|i| db.header(BlockId::Number(1 + i)).unwrap().is_none())); + assert!(db.header_cht_root(cht::SIZE, cht::SIZE / 2).is_ok()); + assert!(db + .header_cht_root(cht::SIZE, cht::SIZE + cht::SIZE / 2) + .is_err()); + assert!(db.changes_trie_cht_root(cht::SIZE, cht::SIZE / 2).is_err()); + assert!(db + .changes_trie_cht_root(cht::SIZE, cht::SIZE + cht::SIZE / 2) + .is_err()); + + // when headers are created with changes tries roots + let db = insert_headers(header_with_changes_trie); + assert_eq!( + db.db.iter(columns::HEADER).count(), + (1 + cht::SIZE + 1) as usize + ); + assert_eq!(db.db.iter(columns::CHT).count(), 2); + assert!((0..cht::SIZE).all(|i| db.header(BlockId::Number(1 + i)).unwrap().is_none())); + assert!(db.header_cht_root(cht::SIZE, cht::SIZE / 2).is_ok()); + assert!(db + .header_cht_root(cht::SIZE, cht::SIZE + cht::SIZE / 2) + .is_err()); + assert!(db.changes_trie_cht_root(cht::SIZE, cht::SIZE / 2).is_ok()); + assert!(db + .changes_trie_cht_root(cht::SIZE, cht::SIZE + cht::SIZE / 2) + .is_err()); + } + + #[test] + fn get_cht_fails_for_genesis_block() { + assert!(LightStorage::::new_test() + .header_cht_root(cht::SIZE, 0) + .is_err()); + } + + #[test] + fn get_cht_fails_for_non_existant_cht() { + assert!(LightStorage::::new_test() + .header_cht_root(cht::SIZE, (cht::SIZE / 2) as u64) + .is_err()); + } + + #[test] + fn get_cht_works() { + let db = LightStorage::new_test(); + + // insert 1 + SIZE + SIZE + 1 blocks so that CHT#0 is created + let mut prev_hash = insert_final_block(&db, HashMap::new(), || { + header_with_changes_trie(&Default::default(), 0) + }); + for i in 1..1 + cht::SIZE + cht::SIZE + 1 { + prev_hash = insert_block(&db, HashMap::new(), || { + header_with_changes_trie(&prev_hash, i as u64) + }); + db.finalize_header(BlockId::Hash(prev_hash)).unwrap(); + } + + let cht_root_1 = db + .header_cht_root(cht::SIZE, cht::start_number(cht::SIZE, 0)) + .unwrap(); + let cht_root_2 = db + .header_cht_root( + cht::SIZE, + (cht::start_number(cht::SIZE, 0) + cht::SIZE / 2) as u64, + ) + .unwrap(); + let cht_root_3 = db + .header_cht_root(cht::SIZE, cht::end_number(cht::SIZE, 0)) + .unwrap(); + assert_eq!(cht_root_1, cht_root_2); + assert_eq!(cht_root_2, cht_root_3); + + let cht_root_1 = db + .changes_trie_cht_root(cht::SIZE, cht::start_number(cht::SIZE, 0)) + .unwrap(); + let cht_root_2 = db + .changes_trie_cht_root( + cht::SIZE, + (cht::start_number(cht::SIZE, 0) + cht::SIZE / 2) as u64, + ) + .unwrap(); + let cht_root_3 = db + .changes_trie_cht_root(cht::SIZE, cht::end_number(cht::SIZE, 0)) + .unwrap(); + assert_eq!(cht_root_1, cht_root_2); + assert_eq!(cht_root_2, cht_root_3); + } + + #[test] + fn tree_route_works() { + let db = LightStorage::new_test(); + let block0 = insert_block(&db, HashMap::new(), || { + default_header(&Default::default(), 0) + }); + + // fork from genesis: 3 prong. + let a1 = insert_block(&db, HashMap::new(), || default_header(&block0, 1)); + let a2 = insert_block(&db, HashMap::new(), || default_header(&a1, 2)); + let a3 = insert_block(&db, HashMap::new(), || default_header(&a2, 3)); + + // fork from genesis: 2 prong. + let b1 = insert_block(&db, HashMap::new(), || { + header_with_extrinsics_root(&block0, 1, Hash::from([1; 32])) + }); + let b2 = insert_block(&db, HashMap::new(), || default_header(&b1, 2)); + + { + let tree_route = + ::client::blockchain::tree_route(&db, BlockId::Hash(a3), BlockId::Hash(b2)) + .unwrap(); + + assert_eq!(tree_route.common_block().hash, block0); + assert_eq!( + tree_route + .retracted() + .iter() + .map(|r| r.hash) + .collect::>(), + vec![a3, a2, a1] + ); + assert_eq!( + tree_route + .enacted() + .iter() + .map(|r| r.hash) + .collect::>(), + vec![b1, b2] + ); + } + + { + let tree_route = + ::client::blockchain::tree_route(&db, BlockId::Hash(a1), BlockId::Hash(a3)) + .unwrap(); + + assert_eq!(tree_route.common_block().hash, a1); + assert!(tree_route.retracted().is_empty()); + assert_eq!( + tree_route + .enacted() + .iter() + .map(|r| r.hash) + .collect::>(), + vec![a2, a3] + ); + } + + { + let tree_route = + ::client::blockchain::tree_route(&db, BlockId::Hash(a3), BlockId::Hash(a1)) + .unwrap(); + + assert_eq!(tree_route.common_block().hash, a1); + assert_eq!( + tree_route + .retracted() + .iter() + .map(|r| r.hash) + .collect::>(), + vec![a3, a2] + ); + assert!(tree_route.enacted().is_empty()); + } + + { + let tree_route = + ::client::blockchain::tree_route(&db, BlockId::Hash(a2), BlockId::Hash(a2)) + .unwrap(); + + assert_eq!(tree_route.common_block().hash, a2); + assert!(tree_route.retracted().is_empty()); + assert!(tree_route.enacted().is_empty()); + } + } + + #[test] + fn authorities_are_cached() { + let db = LightStorage::new_test(); + + fn run_checks( + db: &LightStorage, + max: u64, + checks: &[(u64, Option>>)], + ) { + for (at, expected) in checks.iter().take_while(|(at, _)| *at <= max) { + let actual = get_authorities(db.cache(), BlockId::Number(*at)); + assert_eq!(*expected, actual); + } + } + + fn same_authorities() -> HashMap> { + HashMap::new() + } + + fn make_authorities( + authorities: Vec, + ) -> HashMap> { + let mut map = HashMap::new(); + map.insert(well_known_cache_keys::AUTHORITIES, authorities.encode()); + map + } + + fn get_authorities( + cache: &BlockchainCache, + at: BlockId, + ) -> Option> { + cache + .get_at(&well_known_cache_keys::AUTHORITIES, &at) + .and_then(|val| Decode::decode(&mut &val[..])) + } + + let auth1 = || AuthorityId::from_raw([1u8; 32]); + let auth2 = || AuthorityId::from_raw([2u8; 32]); + let auth3 = || AuthorityId::from_raw([3u8; 32]); + let auth4 = || AuthorityId::from_raw([4u8; 32]); + let auth5 = || AuthorityId::from_raw([5u8; 32]); + let auth6 = || AuthorityId::from_raw([6u8; 32]); + + let (hash2, hash6) = { + // first few blocks are instantly finalized + // B0(None) -> B1(None) -> B2(1) -> B3(1) -> B4(1, 2) -> B5(1, 2) -> B6(1, 2) + let checks = vec![ + (0, None), + (1, None), + (2, Some(vec![auth1()])), + (3, Some(vec![auth1()])), + (4, Some(vec![auth1(), auth2()])), + (5, Some(vec![auth1(), auth2()])), + (6, Some(vec![auth1(), auth2()])), + ]; + + let hash0 = insert_final_block(&db, same_authorities(), || { + default_header(&Default::default(), 0) + }); + run_checks(&db, 0, &checks); + let hash1 = insert_final_block(&db, same_authorities(), || default_header(&hash0, 1)); + run_checks(&db, 1, &checks); + let hash2 = insert_final_block(&db, make_authorities(vec![auth1()]), || { + default_header(&hash1, 2) + }); + run_checks(&db, 2, &checks); + let hash3 = insert_final_block(&db, make_authorities(vec![auth1()]), || { + default_header(&hash2, 3) + }); + run_checks(&db, 3, &checks); + let hash4 = insert_final_block(&db, make_authorities(vec![auth1(), auth2()]), || { + default_header(&hash3, 4) + }); + run_checks(&db, 4, &checks); + let hash5 = insert_final_block(&db, make_authorities(vec![auth1(), auth2()]), || { + default_header(&hash4, 5) + }); + run_checks(&db, 5, &checks); + let hash6 = insert_final_block(&db, same_authorities(), || default_header(&hash5, 6)); + run_checks(&db, 6, &checks); + + (hash2, hash6) + }; + + { + // some older non-best blocks are inserted + // ... -> B2(1) -> B2_1(1) -> B2_2(2) + // => the cache ignores all writes before best finalized block + let hash2_1 = insert_non_best_block(&db, make_authorities(vec![auth1()]), || { + default_header(&hash2, 3) + }); + assert_eq!(None, get_authorities(db.cache(), BlockId::Hash(hash2_1))); + let hash2_2 = + insert_non_best_block(&db, make_authorities(vec![auth1(), auth2()]), || { + default_header(&hash2_1, 4) + }); + assert_eq!(None, get_authorities(db.cache(), BlockId::Hash(hash2_2))); + } + + let (hash7, hash8, hash6_1, hash6_2, hash6_1_1, hash6_1_2) = { + // inserting non-finalized blocks + // B6(None) -> B7(3) -> B8(3) + // \> B6_1(4) -> B6_2(4) + // \> B6_1_1(5) + // \> B6_1_2(6) -> B6_1_3(7) + + let hash7 = insert_block(&db, make_authorities(vec![auth3()]), || { + default_header(&hash6, 7) + }); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6)), + Some(vec![auth1(), auth2()]), + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash7)), + Some(vec![auth3()]) + ); + let hash8 = insert_block(&db, make_authorities(vec![auth3()]), || { + default_header(&hash7, 8) + }); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6)), + Some(vec![auth1(), auth2()]), + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash7)), + Some(vec![auth3()]) + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash8)), + Some(vec![auth3()]) + ); + let hash6_1 = insert_block(&db, make_authorities(vec![auth4()]), || { + default_header(&hash6, 7) + }); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6)), + Some(vec![auth1(), auth2()]), + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash7)), + Some(vec![auth3()]) + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash8)), + Some(vec![auth3()]) + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6_1)), + Some(vec![auth4()]) + ); + let hash6_1_1 = insert_non_best_block(&db, make_authorities(vec![auth5()]), || { + default_header(&hash6_1, 8) + }); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6)), + Some(vec![auth1(), auth2()]), + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash7)), + Some(vec![auth3()]) + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash8)), + Some(vec![auth3()]) + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6_1)), + Some(vec![auth4()]) + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6_1_1)), + Some(vec![auth5()]) + ); + let hash6_1_2 = insert_non_best_block(&db, make_authorities(vec![auth6()]), || { + default_header(&hash6_1, 8) + }); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6)), + Some(vec![auth1(), auth2()]), + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash7)), + Some(vec![auth3()]) + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash8)), + Some(vec![auth3()]) + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6_1)), + Some(vec![auth4()]) + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6_1_1)), + Some(vec![auth5()]) + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6_1_2)), + Some(vec![auth6()]) + ); + let hash6_2 = insert_block(&db, make_authorities(vec![auth4()]), || { + default_header(&hash6_1, 8) + }); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6)), + Some(vec![auth1(), auth2()]), + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash7)), + Some(vec![auth3()]) + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash8)), + Some(vec![auth3()]) + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6_1)), + Some(vec![auth4()]) + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6_1_1)), + Some(vec![auth5()]) + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6_1_2)), + Some(vec![auth6()]) + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6_2)), + Some(vec![auth4()]) + ); + + (hash7, hash8, hash6_1, hash6_2, hash6_1_1, hash6_1_2) + }; + + { + // finalize block hash6_1 + db.finalize_header(BlockId::Hash(hash6_1)).unwrap(); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6)), + Some(vec![auth1(), auth2()]), + ); + assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash7)), None); + assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash8)), None); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6_1)), + Some(vec![auth4()]) + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6_1_1)), + Some(vec![auth5()]) + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6_1_2)), + Some(vec![auth6()]) + ); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6_2)), + Some(vec![auth4()]) + ); + // finalize block hash6_2 + db.finalize_header(BlockId::Hash(hash6_2)).unwrap(); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6)), + Some(vec![auth1(), auth2()]), + ); + assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash7)), None); + assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash8)), None); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6_1)), + Some(vec![auth4()]) + ); + assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1_1)), None); + assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1_2)), None); + assert_eq!( + get_authorities(db.cache(), BlockId::Hash(hash6_2)), + Some(vec![auth4()]) + ); + } + } + + #[test] + fn database_is_reopened() { + let db = LightStorage::new_test(); + let hash0 = insert_final_block(&db, HashMap::new(), || { + default_header(&Default::default(), 0) + }); + assert_eq!(db.info().unwrap().best_hash, hash0); + assert_eq!( + db.header(BlockId::Hash(hash0)).unwrap().unwrap().hash(), + hash0 + ); + + let db = db.db; + let db = LightStorage::from_kvdb(db).unwrap(); + assert_eq!(db.info().unwrap().best_hash, hash0); + assert_eq!( + db.header(BlockId::Hash::(hash0)) + .unwrap() + .unwrap() + .hash(), + hash0 + ); + } + + #[test] + fn aux_store_works() { + let db = LightStorage::::new_test(); + + // insert aux1 + aux2 using direct store access + db.insert_aux( + &[(&[1][..], &[101][..]), (&[2][..], &[102][..])], + ::std::iter::empty(), + ) + .unwrap(); + + // check aux values + assert_eq!(db.get_aux(&[1]).unwrap(), Some(vec![101])); + assert_eq!(db.get_aux(&[2]).unwrap(), Some(vec![102])); + assert_eq!(db.get_aux(&[3]).unwrap(), None); + + // delete aux1 + insert aux3 using import operation + db.import_header( + default_header(&Default::default(), 0), + HashMap::new(), + NewBlockState::Best, + vec![(vec![3], Some(vec![103])), (vec![1], None)], + ) + .unwrap(); + + // check aux values + assert_eq!(db.get_aux(&[1]).unwrap(), None); + assert_eq!(db.get_aux(&[2]).unwrap(), Some(vec![102])); + assert_eq!(db.get_aux(&[3]).unwrap(), Some(vec![103])); + } + + #[test] + fn test_leaves_pruned_on_finality() { + let db = LightStorage::::new_test(); + let block0 = insert_block(&db, HashMap::new(), || { + default_header(&Default::default(), 0) + }); + + let block1_a = insert_block(&db, HashMap::new(), || default_header(&block0, 1)); + let block1_b = insert_block(&db, HashMap::new(), || { + header_with_extrinsics_root(&block0, 1, [1; 32].into()) + }); + let block1_c = insert_block(&db, HashMap::new(), || { + header_with_extrinsics_root(&block0, 1, [2; 32].into()) + }); + + assert_eq!( + db.leaves.read().hashes(), + vec![block1_a, block1_b, block1_c] + ); + + let block2_a = insert_block(&db, HashMap::new(), || default_header(&block1_a, 2)); + let block2_b = insert_block(&db, HashMap::new(), || { + header_with_extrinsics_root(&block1_b, 2, [1; 32].into()) + }); + let block2_c = insert_block(&db, HashMap::new(), || { + header_with_extrinsics_root(&block1_b, 2, [2; 32].into()) + }); + + assert_eq!( + db.leaves.read().hashes(), + vec![block2_a, block2_b, block2_c, block1_c] + ); + + db.finalize_header(BlockId::hash(block1_a)).unwrap(); + db.finalize_header(BlockId::hash(block2_a)).unwrap(); + + // leaves at same height stay. Leaves at lower heights pruned. + assert_eq!( + db.leaves.read().hashes(), + vec![block2_a, block2_b, block2_c] + ); + } } diff --git a/core/client/db/src/storage_cache.rs b/core/client/db/src/storage_cache.rs index 6cfdbdd09b..b2fa4cbbd4 100644 --- a/core/client/db/src/storage_cache.rs +++ b/core/client/db/src/storage_cache.rs @@ -16,14 +16,14 @@ //! Global cache state. -use std::collections::{VecDeque, HashSet, HashMap}; -use std::sync::Arc; -use parking_lot::{Mutex, RwLock, RwLockUpgradableReadGuard}; -use lru_cache::LruCache; use hash_db::Hasher; +use log::trace; +use lru_cache::LruCache; +use parking_lot::{Mutex, RwLock, RwLockUpgradableReadGuard}; use runtime_primitives::traits::{Block, Header}; use state_machine::{backend::Backend as StateBackend, TrieBackend}; -use log::trace; +use std::collections::{HashMap, HashSet, VecDeque}; +use std::sync::Arc; const STATE_CACHE_BLOCKS: usize = 12; @@ -32,48 +32,48 @@ type StorageValue = Vec; /// Shared canonical state cache. pub struct Cache { - /// Storage cache. `None` indicates that key is known to be missing. - storage: LruCache>, - /// Storage hashes cache. `None` indicates that key is known to be missing. - hashes: LruCache>, - /// Information on the modifications in recently committed blocks; specifically which keys - /// changed in which block. Ordered by block number. - modifications: VecDeque>, + /// Storage cache. `None` indicates that key is known to be missing. + storage: LruCache>, + /// Storage hashes cache. `None` indicates that key is known to be missing. + hashes: LruCache>, + /// Information on the modifications in recently committed blocks; specifically which keys + /// changed in which block. Ordered by block number. + modifications: VecDeque>, } pub type SharedCache = Arc>>; /// Create new shared cache instance with given max memory usage. pub fn new_shared_cache(shared_cache_size: usize) -> SharedCache { - let cache_items = shared_cache_size / 100; // Guestimate, potentially inaccurate - Arc::new(Mutex::new(Cache { - storage: LruCache::new(cache_items), - hashes: LruCache::new(cache_items), - modifications: VecDeque::new(), - })) + let cache_items = shared_cache_size / 100; // Guestimate, potentially inaccurate + Arc::new(Mutex::new(Cache { + storage: LruCache::new(cache_items), + hashes: LruCache::new(cache_items), + modifications: VecDeque::new(), + })) } #[derive(Debug)] /// Accumulates a list of storage changed in a block. struct BlockChanges { - /// Block number. - number: B::Number, - /// Block hash. - hash: B::Hash, - /// Parent block hash. - parent: B::Hash, - /// A set of modified storage keys. - storage: HashSet, - /// Block is part of the canonical chain. - is_canon: bool, + /// Block number. + number: B::Number, + /// Block hash. + hash: B::Hash, + /// Parent block hash. + parent: B::Hash, + /// A set of modified storage keys. + storage: HashSet, + /// Block is part of the canonical chain. + is_canon: bool, } /// Cached values specific to a state. struct LocalCache { - /// Storage cache. `None` indicates that key is known to be missing. - storage: HashMap>, - /// Storage hashes cache. `None` indicates that key is known to be missing. - hashes: HashMap>, + /// Storage cache. `None` indicates that key is known to be missing. + storage: HashMap>, + /// Storage hashes cache. `None` indicates that key is known to be missing. + hashes: HashMap>, } /// State abstraction. @@ -84,338 +84,453 @@ struct LocalCache { /// in `sync_cache` along with the change overlay. /// For non-canonical clones local cache and changes are dropped. pub struct CachingState, B: Block> { - /// Backing state. - state: S, - /// Shared canonical state cache. - shared_cache: SharedCache, - /// Local cache of values for this state. - local_cache: RwLock>, - /// Hash of the block on top of which this instance was created or - /// `None` if cache is disabled - pub parent_hash: Option, + /// Backing state. + state: S, + /// Shared canonical state cache. + shared_cache: SharedCache, + /// Local cache of values for this state. + local_cache: RwLock>, + /// Hash of the block on top of which this instance was created or + /// `None` if cache is disabled + pub parent_hash: Option, } impl, B: Block> CachingState { - /// Create a new instance wrapping generic State and shared cache. - pub fn new(state: S, shared_cache: SharedCache, parent_hash: Option) -> CachingState { - CachingState { - state, - shared_cache, - local_cache: RwLock::new(LocalCache { - storage: Default::default(), - hashes: Default::default(), - }), - parent_hash: parent_hash, - } - } - - /// Propagate local cache into the shared cache and synchronize - /// the shared cache with the best block state. - /// This function updates the shared cache by removing entries - /// that are invalidated by chain reorganization. `sync_cache` - /// should be called after the block has been committed and the - /// blockchain route has been calculated. - pub fn sync_cache bool> ( - &mut self, - enacted: &[B::Hash], - retracted: &[B::Hash], - changes: Vec<(StorageKey, Option)>, - commit_hash: Option, - commit_number: Option<::Number>, - is_best: F, - ) { - let mut cache = self.shared_cache.lock(); - let is_best = is_best(); - trace!("Syncing cache, id = (#{:?}, {:?}), parent={:?}, best={}", commit_number, commit_hash, self.parent_hash, is_best); - let cache = &mut *cache; - - // Purge changes from re-enacted and retracted blocks. - // Filter out commiting block if any. - let mut clear = false; - for block in enacted.iter().filter(|h| commit_hash.as_ref().map_or(true, |p| *h != p)) { - clear = clear || { - if let Some(ref mut m) = cache.modifications.iter_mut().find(|m| &m.hash == block) { - trace!("Reverting enacted block {:?}", block); - m.is_canon = true; - for a in &m.storage { - trace!("Reverting enacted key {:?}", a); - cache.storage.remove(a); - } - false - } else { - true - } - }; - } - - for block in retracted { - clear = clear || { - if let Some(ref mut m) = cache.modifications.iter_mut().find(|m| &m.hash == block) { - trace!("Retracting block {:?}", block); - m.is_canon = false; - for a in &m.storage { - trace!("Retracted key {:?}", a); - cache.storage.remove(a); - } - false - } else { - true - } - }; - } - if clear { - // We don't know anything about the block; clear everything - trace!("Wiping cache"); - cache.storage.clear(); - cache.modifications.clear(); - } - - // Propagate cache only if committing on top of the latest canonical state - // blocks are ordered by number and only one block with a given number is marked as canonical - // (contributed to canonical state cache) - if let Some(_) = self.parent_hash { - let mut local_cache = self.local_cache.write(); - if is_best { - trace!("Committing {} local, {} hashes, {} modified entries", local_cache.storage.len(), local_cache.hashes.len(), changes.len()); - for (k, v) in local_cache.storage.drain() { - cache.storage.insert(k, v); - } - for (k, v) in local_cache.hashes.drain() { - cache.hashes.insert(k, v); - } - } - } - - if let ( - Some(ref number), Some(ref hash), Some(ref parent)) - = (commit_number, commit_hash, self.parent_hash) - { - if cache.modifications.len() == STATE_CACHE_BLOCKS { - cache.modifications.pop_back(); - } - let mut modifications = HashSet::new(); - for (k, v) in changes.into_iter() { - modifications.insert(k.clone()); - if is_best { - cache.hashes.remove(&k); - cache.storage.insert(k, v); - } - } - // Save modified storage. These are ordered by the block number. - let block_changes = BlockChanges { - storage: modifications, - number: *number, - hash: hash.clone(), - is_canon: is_best, - parent: parent.clone(), - }; - let insert_at = cache.modifications.iter() - .enumerate() - .find(|&(_, m)| m.number < *number) - .map(|(i, _)| i); - trace!("Inserting modifications at {:?}", insert_at); - if let Some(insert_at) = insert_at { - cache.modifications.insert(insert_at, block_changes); - } else { - cache.modifications.push_back(block_changes); - } - } - } - - /// Check if the key can be returned from cache by matching current block parent hash against canonical - /// state and filtering out entries modified in later blocks. - fn is_allowed( - key: &[u8], - parent_hash: &Option, - modifications: - &VecDeque> - ) -> bool - { - let mut parent = match *parent_hash { - None => { - trace!("Cache lookup skipped for {:?}: no parent hash", key); - return false; - } - Some(ref parent) => parent, - }; - if modifications.is_empty() { - trace!("Cache lookup allowed for {:?}", key); - return true; - } - // Ignore all storage modified in later blocks - // Modifications contains block ordered by the number - // We search for our parent in that list first and then for - // all its parent until we hit the canonical block, - // checking against all the intermediate modifications. - for m in modifications { - if &m.hash == parent { - if m.is_canon { - return true; - } - parent = &m.parent; - } - if m.storage.contains(key) { - trace!("Cache lookup skipped for {:?}: modified in a later block", key); - return false; - } - } - trace!("Cache lookup skipped for {:?}: parent hash is unknown", key); - false - } + /// Create a new instance wrapping generic State and shared cache. + pub fn new( + state: S, + shared_cache: SharedCache, + parent_hash: Option, + ) -> CachingState { + CachingState { + state, + shared_cache, + local_cache: RwLock::new(LocalCache { + storage: Default::default(), + hashes: Default::default(), + }), + parent_hash: parent_hash, + } + } + + /// Propagate local cache into the shared cache and synchronize + /// the shared cache with the best block state. + /// This function updates the shared cache by removing entries + /// that are invalidated by chain reorganization. `sync_cache` + /// should be called after the block has been committed and the + /// blockchain route has been calculated. + pub fn sync_cache bool>( + &mut self, + enacted: &[B::Hash], + retracted: &[B::Hash], + changes: Vec<(StorageKey, Option)>, + commit_hash: Option, + commit_number: Option<::Number>, + is_best: F, + ) { + let mut cache = self.shared_cache.lock(); + let is_best = is_best(); + trace!( + "Syncing cache, id = (#{:?}, {:?}), parent={:?}, best={}", + commit_number, + commit_hash, + self.parent_hash, + is_best + ); + let cache = &mut *cache; + + // Purge changes from re-enacted and retracted blocks. + // Filter out commiting block if any. + let mut clear = false; + for block in enacted + .iter() + .filter(|h| commit_hash.as_ref().map_or(true, |p| *h != p)) + { + clear = clear || { + if let Some(ref mut m) = cache.modifications.iter_mut().find(|m| &m.hash == block) { + trace!("Reverting enacted block {:?}", block); + m.is_canon = true; + for a in &m.storage { + trace!("Reverting enacted key {:?}", a); + cache.storage.remove(a); + } + false + } else { + true + } + }; + } + + for block in retracted { + clear = clear || { + if let Some(ref mut m) = cache.modifications.iter_mut().find(|m| &m.hash == block) { + trace!("Retracting block {:?}", block); + m.is_canon = false; + for a in &m.storage { + trace!("Retracted key {:?}", a); + cache.storage.remove(a); + } + false + } else { + true + } + }; + } + if clear { + // We don't know anything about the block; clear everything + trace!("Wiping cache"); + cache.storage.clear(); + cache.modifications.clear(); + } + + // Propagate cache only if committing on top of the latest canonical state + // blocks are ordered by number and only one block with a given number is marked as canonical + // (contributed to canonical state cache) + if let Some(_) = self.parent_hash { + let mut local_cache = self.local_cache.write(); + if is_best { + trace!( + "Committing {} local, {} hashes, {} modified entries", + local_cache.storage.len(), + local_cache.hashes.len(), + changes.len() + ); + for (k, v) in local_cache.storage.drain() { + cache.storage.insert(k, v); + } + for (k, v) in local_cache.hashes.drain() { + cache.hashes.insert(k, v); + } + } + } + + if let (Some(ref number), Some(ref hash), Some(ref parent)) = + (commit_number, commit_hash, self.parent_hash) + { + if cache.modifications.len() == STATE_CACHE_BLOCKS { + cache.modifications.pop_back(); + } + let mut modifications = HashSet::new(); + for (k, v) in changes.into_iter() { + modifications.insert(k.clone()); + if is_best { + cache.hashes.remove(&k); + cache.storage.insert(k, v); + } + } + // Save modified storage. These are ordered by the block number. + let block_changes = BlockChanges { + storage: modifications, + number: *number, + hash: hash.clone(), + is_canon: is_best, + parent: parent.clone(), + }; + let insert_at = cache + .modifications + .iter() + .enumerate() + .find(|&(_, m)| m.number < *number) + .map(|(i, _)| i); + trace!("Inserting modifications at {:?}", insert_at); + if let Some(insert_at) = insert_at { + cache.modifications.insert(insert_at, block_changes); + } else { + cache.modifications.push_back(block_changes); + } + } + } + + /// Check if the key can be returned from cache by matching current block parent hash against canonical + /// state and filtering out entries modified in later blocks. + fn is_allowed( + key: &[u8], + parent_hash: &Option, + modifications: &VecDeque>, + ) -> bool { + let mut parent = match *parent_hash { + None => { + trace!("Cache lookup skipped for {:?}: no parent hash", key); + return false; + } + Some(ref parent) => parent, + }; + if modifications.is_empty() { + trace!("Cache lookup allowed for {:?}", key); + return true; + } + // Ignore all storage modified in later blocks + // Modifications contains block ordered by the number + // We search for our parent in that list first and then for + // all its parent until we hit the canonical block, + // checking against all the intermediate modifications. + for m in modifications { + if &m.hash == parent { + if m.is_canon { + return true; + } + parent = &m.parent; + } + if m.storage.contains(key) { + trace!( + "Cache lookup skipped for {:?}: modified in a later block", + key + ); + return false; + } + } + trace!("Cache lookup skipped for {:?}: parent hash is unknown", key); + false + } } -impl, B:Block> StateBackend for CachingState { - type Error = S::Error; - type Transaction = S::Transaction; - type TrieBackendStorage = S::TrieBackendStorage; - - fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - let local_cache = self.local_cache.upgradable_read(); - if let Some(entry) = local_cache.storage.get(key).cloned() { - trace!("Found in local cache: {:?}", key); - return Ok(entry) - } - let mut cache = self.shared_cache.lock(); - if Self::is_allowed(key, &self.parent_hash, &cache.modifications) { - if let Some(entry) = cache.storage.get_mut(key).map(|a| a.clone()) { - trace!("Found in shared cache: {:?}", key); - return Ok(entry) - } - } - trace!("Cache miss: {:?}", key); - let value = self.state.storage(key)?; - RwLockUpgradableReadGuard::upgrade(local_cache).storage.insert(key.to_vec(), value.clone()); - Ok(value) - } - - fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { - let local_cache = self.local_cache.upgradable_read(); - if let Some(entry) = local_cache.hashes.get(key).cloned() { - trace!("Found hash in local cache: {:?}", key); - return Ok(entry) - } - let mut cache = self.shared_cache.lock(); - if Self::is_allowed(key, &self.parent_hash, &cache.modifications) { - if let Some(entry) = cache.hashes.get_mut(key).map(|a| a.clone()) { - trace!("Found hash in shared cache: {:?}", key); - return Ok(entry) - } - } - trace!("Cache hash miss: {:?}", key); - let hash = self.state.storage_hash(key)?; - RwLockUpgradableReadGuard::upgrade(local_cache).hashes.insert(key.to_vec(), hash.clone()); - Ok(hash) - } - - fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { - self.state.child_storage(storage_key, key) - } - - fn exists_storage(&self, key: &[u8]) -> Result { - Ok(self.storage(key)?.is_some()) - } - - fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result { - self.state.exists_child_storage(storage_key, key) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.state.for_keys_with_prefix(prefix, f) - } - - fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F) { - self.state.for_keys_in_child_storage(storage_key, f) - } - - fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) - where - I: IntoIterator, Option>)>, - H::Out: Ord - { - self.state.storage_root(delta) - } - - fn child_storage_root(&self, storage_key: &[u8], delta: I) -> (Vec, bool, Self::Transaction) - where - I: IntoIterator, Option>)>, - H::Out: Ord - { - self.state.child_storage_root(storage_key, delta) - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - self.state.pairs() - } - - fn keys(&self, prefix: &Vec) -> Vec> { - self.state.keys(prefix) - } - - fn try_into_trie_backend(self) -> Option> { - self.state.try_into_trie_backend() - } +impl, B: Block> StateBackend for CachingState { + type Error = S::Error; + type Transaction = S::Transaction; + type TrieBackendStorage = S::TrieBackendStorage; + + fn storage(&self, key: &[u8]) -> Result>, Self::Error> { + let local_cache = self.local_cache.upgradable_read(); + if let Some(entry) = local_cache.storage.get(key).cloned() { + trace!("Found in local cache: {:?}", key); + return Ok(entry); + } + let mut cache = self.shared_cache.lock(); + if Self::is_allowed(key, &self.parent_hash, &cache.modifications) { + if let Some(entry) = cache.storage.get_mut(key).map(|a| a.clone()) { + trace!("Found in shared cache: {:?}", key); + return Ok(entry); + } + } + trace!("Cache miss: {:?}", key); + let value = self.state.storage(key)?; + RwLockUpgradableReadGuard::upgrade(local_cache) + .storage + .insert(key.to_vec(), value.clone()); + Ok(value) + } + + fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { + let local_cache = self.local_cache.upgradable_read(); + if let Some(entry) = local_cache.hashes.get(key).cloned() { + trace!("Found hash in local cache: {:?}", key); + return Ok(entry); + } + let mut cache = self.shared_cache.lock(); + if Self::is_allowed(key, &self.parent_hash, &cache.modifications) { + if let Some(entry) = cache.hashes.get_mut(key).map(|a| a.clone()) { + trace!("Found hash in shared cache: {:?}", key); + return Ok(entry); + } + } + trace!("Cache hash miss: {:?}", key); + let hash = self.state.storage_hash(key)?; + RwLockUpgradableReadGuard::upgrade(local_cache) + .hashes + .insert(key.to_vec(), hash.clone()); + Ok(hash) + } + + fn child_storage( + &self, + storage_key: &[u8], + key: &[u8], + ) -> Result>, Self::Error> { + self.state.child_storage(storage_key, key) + } + + fn exists_storage(&self, key: &[u8]) -> Result { + Ok(self.storage(key)?.is_some()) + } + + fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result { + self.state.exists_child_storage(storage_key, key) + } + + fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { + self.state.for_keys_with_prefix(prefix, f) + } + + fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F) { + self.state.for_keys_in_child_storage(storage_key, f) + } + + fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) + where + I: IntoIterator, Option>)>, + H::Out: Ord, + { + self.state.storage_root(delta) + } + + fn child_storage_root( + &self, + storage_key: &[u8], + delta: I, + ) -> (Vec, bool, Self::Transaction) + where + I: IntoIterator, Option>)>, + H::Out: Ord, + { + self.state.child_storage_root(storage_key, delta) + } + + fn pairs(&self) -> Vec<(Vec, Vec)> { + self.state.pairs() + } + + fn keys(&self, prefix: &Vec) -> Vec> { + self.state.keys(prefix) + } + + fn try_into_trie_backend(self) -> Option> { + self.state.try_into_trie_backend() + } } #[cfg(test)] mod tests { - use super::*; - use runtime_primitives::testing::{H256, Block as RawBlock, ExtrinsicWrapper}; - use state_machine::backend::InMemory; - use primitives::Blake2Hasher; - - type Block = RawBlock>; - #[test] - fn smoke() { - //init_log(); - let root_parent = H256::random(); - let key = H256::random()[..].to_vec(); - let h0 = H256::random(); - let h1a = H256::random(); - let h1b = H256::random(); - let h2a = H256::random(); - let h2b = H256::random(); - let h3a = H256::random(); - let h3b = H256::random(); - - let shared = new_shared_cache::(256*1024); - - // blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ] - // state [ 5 5 4 3 2 2 ] - let mut s = CachingState::new(InMemory::::default(), shared.clone(), Some(root_parent.clone())); - s.sync_cache(&[], &[], vec![(key.clone(), Some(vec![2]))], Some(h0.clone()), Some(0), || true); - - let mut s = CachingState::new(InMemory::::default(), shared.clone(), Some(h0.clone())); - s.sync_cache(&[], &[], vec![], Some(h1a.clone()), Some(1), || true); - - let mut s = CachingState::new(InMemory::::default(), shared.clone(), Some(h0.clone())); - s.sync_cache(&[], &[], vec![(key.clone(), Some(vec![3]))], Some(h1b.clone()), Some(1), || false); - - let mut s = CachingState::new(InMemory::::default(), shared.clone(), Some(h1b.clone())); - s.sync_cache(&[], &[], vec![(key.clone(), Some(vec![4]))], Some(h2b.clone()), Some(2), || false); - - let mut s = CachingState::new(InMemory::::default(), shared.clone(), Some(h1a.clone())); - s.sync_cache(&[], &[], vec![(key.clone(), Some(vec![5]))], Some(h2a.clone()), Some(2), || true); - - let mut s = CachingState::new(InMemory::::default(), shared.clone(), Some(h2a.clone())); - s.sync_cache(&[], &[], vec![], Some(h3a.clone()), Some(3), || true); - - let s = CachingState::new(InMemory::::default(), shared.clone(), Some(h3a.clone())); - assert_eq!(s.storage(&key).unwrap().unwrap(), vec![5]); - - let s = CachingState::new(InMemory::::default(), shared.clone(), Some(h1a.clone())); - assert!(s.storage(&key).unwrap().is_none()); - - let s = CachingState::new(InMemory::::default(), shared.clone(), Some(h2b.clone())); - assert!(s.storage(&key).unwrap().is_none()); - - let s = CachingState::new(InMemory::::default(), shared.clone(), Some(h1b.clone())); - assert!(s.storage(&key).unwrap().is_none()); - - // reorg to 3b - // blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ] - let mut s = CachingState::new(InMemory::::default(), shared.clone(), Some(h2b.clone())); - s.sync_cache(&[h1b.clone(), h2b.clone(), h3b.clone()], &[h1a.clone(), h2a.clone(), h3a.clone()], vec![], Some(h3b.clone()), Some(3), || true); - let s = CachingState::new(InMemory::::default(), shared.clone(), Some(h3a.clone())); - assert!(s.storage(&key).unwrap().is_none()); - } + use super::*; + use primitives::Blake2Hasher; + use runtime_primitives::testing::{Block as RawBlock, ExtrinsicWrapper, H256}; + use state_machine::backend::InMemory; + + type Block = RawBlock>; + #[test] + fn smoke() { + //init_log(); + let root_parent = H256::random(); + let key = H256::random()[..].to_vec(); + let h0 = H256::random(); + let h1a = H256::random(); + let h1b = H256::random(); + let h2a = H256::random(); + let h2b = H256::random(); + let h3a = H256::random(); + let h3b = H256::random(); + + let shared = new_shared_cache::(256 * 1024); + + // blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ] + // state [ 5 5 4 3 2 2 ] + let mut s = CachingState::new( + InMemory::::default(), + shared.clone(), + Some(root_parent.clone()), + ); + s.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![2]))], + Some(h0.clone()), + Some(0), + || true, + ); + + let mut s = CachingState::new( + InMemory::::default(), + shared.clone(), + Some(h0.clone()), + ); + s.sync_cache(&[], &[], vec![], Some(h1a.clone()), Some(1), || true); + + let mut s = CachingState::new( + InMemory::::default(), + shared.clone(), + Some(h0.clone()), + ); + s.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![3]))], + Some(h1b.clone()), + Some(1), + || false, + ); + + let mut s = CachingState::new( + InMemory::::default(), + shared.clone(), + Some(h1b.clone()), + ); + s.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![4]))], + Some(h2b.clone()), + Some(2), + || false, + ); + + let mut s = CachingState::new( + InMemory::::default(), + shared.clone(), + Some(h1a.clone()), + ); + s.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![5]))], + Some(h2a.clone()), + Some(2), + || true, + ); + + let mut s = CachingState::new( + InMemory::::default(), + shared.clone(), + Some(h2a.clone()), + ); + s.sync_cache(&[], &[], vec![], Some(h3a.clone()), Some(3), || true); + + let s = CachingState::new( + InMemory::::default(), + shared.clone(), + Some(h3a.clone()), + ); + assert_eq!(s.storage(&key).unwrap().unwrap(), vec![5]); + + let s = CachingState::new( + InMemory::::default(), + shared.clone(), + Some(h1a.clone()), + ); + assert!(s.storage(&key).unwrap().is_none()); + + let s = CachingState::new( + InMemory::::default(), + shared.clone(), + Some(h2b.clone()), + ); + assert!(s.storage(&key).unwrap().is_none()); + + let s = CachingState::new( + InMemory::::default(), + shared.clone(), + Some(h1b.clone()), + ); + assert!(s.storage(&key).unwrap().is_none()); + + // reorg to 3b + // blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ] + let mut s = CachingState::new( + InMemory::::default(), + shared.clone(), + Some(h2b.clone()), + ); + s.sync_cache( + &[h1b.clone(), h2b.clone(), h3b.clone()], + &[h1a.clone(), h2a.clone(), h3a.clone()], + vec![], + Some(h3b.clone()), + Some(3), + || true, + ); + let s = CachingState::new( + InMemory::::default(), + shared.clone(), + Some(h3a.clone()), + ); + assert!(s.storage(&key).unwrap().is_none()); + } } diff --git a/core/client/db/src/utils.rs b/core/client/db/src/utils.rs index ce843a93a2..36bf37e652 100644 --- a/core/client/db/src/utils.rs +++ b/core/client/db/src/utils.rs @@ -17,19 +17,19 @@ //! Db-based backend utility structures and functions, used by both //! full and light storages. -use std::sync::Arc; use std::io; +use std::sync::Arc; -use kvdb::{KeyValueDB, DBTransaction}; +use kvdb::{DBTransaction, KeyValueDB}; use kvdb_rocksdb::{Database, DatabaseConfig}; use log::debug; +use crate::DatabaseSettings; use client; use parity_codec::Decode; -use trie::DBValue; use runtime_primitives::generic::BlockId; use runtime_primitives::traits::{As, Block as BlockT, Header as HeaderT, Zero}; -use crate::DatabaseSettings; +use trie::DBValue; /// Number of columns in the db. Must be the same for both full && light dbs. /// Otherwise RocksDb will fail to open database && check its type. @@ -39,35 +39,35 @@ pub const COLUMN_META: Option = Some(0); /// Keys of entries in COLUMN_META. pub mod meta_keys { - /// Type of storage (full or light). - pub const TYPE: &[u8; 4] = b"type"; - /// Best block key. - pub const BEST_BLOCK: &[u8; 4] = b"best"; - /// Last finalized block key. - pub const FINALIZED_BLOCK: &[u8; 5] = b"final"; - /// Meta information prefix for list-based caches. - pub const CACHE_META_PREFIX: &[u8; 5] = b"cache"; - /// Genesis block hash. - pub const GENESIS_HASH: &[u8; 3] = b"gen"; - /// Leaves prefix list key. - pub const LEAF_PREFIX: &[u8; 4] = b"leaf"; - /// Children prefix list key. - pub const CHILDREN_PREFIX: &[u8; 8] = b"children"; + /// Type of storage (full or light). + pub const TYPE: &[u8; 4] = b"type"; + /// Best block key. + pub const BEST_BLOCK: &[u8; 4] = b"best"; + /// Last finalized block key. + pub const FINALIZED_BLOCK: &[u8; 5] = b"final"; + /// Meta information prefix for list-based caches. + pub const CACHE_META_PREFIX: &[u8; 5] = b"cache"; + /// Genesis block hash. + pub const GENESIS_HASH: &[u8; 3] = b"gen"; + /// Leaves prefix list key. + pub const LEAF_PREFIX: &[u8; 4] = b"leaf"; + /// Children prefix list key. + pub const CHILDREN_PREFIX: &[u8; 8] = b"children"; } /// Database metadata. #[derive(Debug)] pub struct Meta { - /// Hash of the best known block. - pub best_hash: H, - /// Number of the best known block. - pub best_number: N, - /// Hash of the best finalized block. - pub finalized_hash: H, - /// Number of the best finalized block. - pub finalized_number: N, - /// Hash of the genesis block. - pub genesis_hash: H, + /// Hash of the best known block. + pub best_hash: H, + /// Number of the best known block. + pub best_number: N, + /// Hash of the best finalized block. + pub finalized_hash: H, + /// Number of the best finalized block. + pub finalized_number: N, + /// Hash of the genesis block. + pub genesis_hash: H, } /// A block lookup key: used for canonical lookup from block number to hash @@ -78,228 +78,265 @@ pub type NumberIndexKey = [u8; 4]; /// /// In the current database schema, this kind of key is only used for /// lookups into an index, NOT for storing header data or others. -pub fn number_index_key(n: N) -> NumberIndexKey where N: As { - let n: u64 = n.as_(); - assert!(n & 0xffffffff00000000 == 0); - - [ - (n >> 24) as u8, - ((n >> 16) & 0xff) as u8, - ((n >> 8) & 0xff) as u8, - (n & 0xff) as u8 - ] +pub fn number_index_key(n: N) -> NumberIndexKey +where + N: As, +{ + let n: u64 = n.as_(); + assert!(n & 0xffffffff00000000 == 0); + + [ + (n >> 24) as u8, + ((n >> 16) & 0xff) as u8, + ((n >> 8) & 0xff) as u8, + (n & 0xff) as u8, + ] } /// Convert number and hash into long lookup key for blocks that are /// not in the canonical chain. -pub fn number_and_hash_to_lookup_key(number: N, hash: H) -> Vec where - N: As, - H: AsRef<[u8]> +pub fn number_and_hash_to_lookup_key(number: N, hash: H) -> Vec +where + N: As, + H: AsRef<[u8]>, { - let mut lookup_key = number_index_key(number).to_vec(); - lookup_key.extend_from_slice(hash.as_ref()); - lookup_key + let mut lookup_key = number_index_key(number).to_vec(); + lookup_key.extend_from_slice(hash.as_ref()); + lookup_key } /// Convert block lookup key into block number. /// all block lookup keys start with the block number. -pub fn lookup_key_to_number(key: &[u8]) -> client::error::Result where N: As { - if key.len() < 4 { - return Err(client::error::ErrorKind::Backend("Invalid block key".into()).into()); - } - Ok((key[0] as u64) << 24 - | (key[1] as u64) << 16 - | (key[2] as u64) << 8 - | (key[3] as u64)).map(As::sa) +pub fn lookup_key_to_number(key: &[u8]) -> client::error::Result +where + N: As, +{ + if key.len() < 4 { + return Err(client::error::ErrorKind::Backend("Invalid block key".into()).into()); + } + Ok((key[0] as u64) << 24 | (key[1] as u64) << 16 | (key[2] as u64) << 8 | (key[3] as u64)) + .map(As::sa) } /// Delete number to hash mapping in DB transaction. pub fn remove_number_to_key_mapping>( - transaction: &mut DBTransaction, - key_lookup_col: Option, - number: N, + transaction: &mut DBTransaction, + key_lookup_col: Option, + number: N, ) { - transaction.delete(key_lookup_col, number_index_key(number).as_ref()) + transaction.delete(key_lookup_col, number_index_key(number).as_ref()) } /// Remove key mappings. pub fn remove_key_mappings, H: AsRef<[u8]>>( - transaction: &mut DBTransaction, - key_lookup_col: Option, - number: N, - hash: H, + transaction: &mut DBTransaction, + key_lookup_col: Option, + number: N, + hash: H, ) { - remove_number_to_key_mapping(transaction, key_lookup_col, number); - transaction.delete(key_lookup_col, hash.as_ref()); + remove_number_to_key_mapping(transaction, key_lookup_col, number); + transaction.delete(key_lookup_col, hash.as_ref()); } /// Place a number mapping into the database. This maps number to current perceived /// block hash at that position. pub fn insert_number_to_key_mapping + Clone, H: AsRef<[u8]>>( - transaction: &mut DBTransaction, - key_lookup_col: Option, - number: N, - hash: H, + transaction: &mut DBTransaction, + key_lookup_col: Option, + number: N, + hash: H, ) { - transaction.put_vec( - key_lookup_col, - number_index_key(number.clone()).as_ref(), - number_and_hash_to_lookup_key(number, hash), - ) + transaction.put_vec( + key_lookup_col, + number_index_key(number.clone()).as_ref(), + number_and_hash_to_lookup_key(number, hash), + ) } /// Insert a hash to key mapping in the database. pub fn insert_hash_to_key_mapping, H: AsRef<[u8]> + Clone>( - transaction: &mut DBTransaction, - key_lookup_col: Option, - number: N, - hash: H, + transaction: &mut DBTransaction, + key_lookup_col: Option, + number: N, + hash: H, ) { - transaction.put_vec( - key_lookup_col, - hash.clone().as_ref(), - number_and_hash_to_lookup_key(number, hash), - ) + transaction.put_vec( + key_lookup_col, + hash.clone().as_ref(), + number_and_hash_to_lookup_key(number, hash), + ) } /// Convert block id to block lookup key. /// block lookup key is the DB-key header, block and justification are stored under. /// looks up lookup key by hash from DB as necessary. pub fn block_id_to_lookup_key( - db: &KeyValueDB, - key_lookup_col: Option, - id: BlockId -) -> Result>, client::error::Error> where - Block: BlockT, - ::runtime_primitives::traits::NumberFor: As, + db: &KeyValueDB, + key_lookup_col: Option, + id: BlockId, +) -> Result>, client::error::Error> +where + Block: BlockT, + ::runtime_primitives::traits::NumberFor: As, { - let res = match id { - BlockId::Number(n) => db.get( - key_lookup_col, - number_index_key(n).as_ref(), - ), - BlockId::Hash(h) => db.get(key_lookup_col, h.as_ref()), - }; - - res.map(|v| v.map(|v| v.into_vec())).map_err(db_err) + let res = match id { + BlockId::Number(n) => db.get(key_lookup_col, number_index_key(n).as_ref()), + BlockId::Hash(h) => db.get(key_lookup_col, h.as_ref()), + }; + + res.map(|v| v.map(|v| v.into_vec())).map_err(db_err) } /// Maps database error to client error pub fn db_err(err: io::Error) -> client::error::Error { - use std::error::Error; - client::error::ErrorKind::Backend(err.description().into()).into() + use std::error::Error; + client::error::ErrorKind::Backend(err.description().into()).into() } /// Open RocksDB database. -pub fn open_database(config: &DatabaseSettings, col_meta: Option, db_type: &str) -> client::error::Result> { - let mut db_config = DatabaseConfig::with_columns(Some(NUM_COLUMNS)); - db_config.memory_budget = config.cache_size; - let path = config.path.to_str().ok_or_else(|| client::error::ErrorKind::Backend("Invalid database path".into()))?; - let db = Database::open(&db_config, &path).map_err(db_err)?; - - // check database type - match db.get(col_meta, meta_keys::TYPE).map_err(db_err)? { - Some(stored_type) => { - if db_type.as_bytes() != &*stored_type { - return Err(client::error::ErrorKind::Backend( - format!("Unexpected database type. Expected: {}", db_type)).into()); - } - }, - None => { - let mut transaction = DBTransaction::new(); - transaction.put(col_meta, meta_keys::TYPE, db_type.as_bytes()); - db.write(transaction).map_err(db_err)?; - }, - } - - Ok(Arc::new(db)) +pub fn open_database( + config: &DatabaseSettings, + col_meta: Option, + db_type: &str, +) -> client::error::Result> { + let mut db_config = DatabaseConfig::with_columns(Some(NUM_COLUMNS)); + db_config.memory_budget = config.cache_size; + let path = config + .path + .to_str() + .ok_or_else(|| client::error::ErrorKind::Backend("Invalid database path".into()))?; + let db = Database::open(&db_config, &path).map_err(db_err)?; + + // check database type + match db.get(col_meta, meta_keys::TYPE).map_err(db_err)? { + Some(stored_type) => { + if db_type.as_bytes() != &*stored_type { + return Err(client::error::ErrorKind::Backend(format!( + "Unexpected database type. Expected: {}", + db_type + )) + .into()); + } + } + None => { + let mut transaction = DBTransaction::new(); + transaction.put(col_meta, meta_keys::TYPE, db_type.as_bytes()); + db.write(transaction).map_err(db_err)?; + } + } + + Ok(Arc::new(db)) } /// Read database column entry for the given block. -pub fn read_db(db: &KeyValueDB, col_index: Option, col: Option, id: BlockId) -> client::error::Result> - where - Block: BlockT, +pub fn read_db( + db: &KeyValueDB, + col_index: Option, + col: Option, + id: BlockId, +) -> client::error::Result> +where + Block: BlockT, { - block_id_to_lookup_key(db, col_index, id).and_then(|key| match key { - Some(key) => db.get(col, key.as_ref()).map_err(db_err), - None => Ok(None), - }) + block_id_to_lookup_key(db, col_index, id).and_then(|key| match key { + Some(key) => db.get(col, key.as_ref()).map_err(db_err), + None => Ok(None), + }) } /// Read a header from the database. pub fn read_header( - db: &KeyValueDB, - col_index: Option, - col: Option, - id: BlockId, + db: &KeyValueDB, + col_index: Option, + col: Option, + id: BlockId, ) -> client::error::Result> { - match read_db(db, col_index, col, id)? { - Some(header) => match Block::Header::decode(&mut &header[..]) { - Some(header) => Ok(Some(header)), - None => return Err( - client::error::ErrorKind::Backend("Error decoding header".into()).into() - ), - } - None => Ok(None), - } + match read_db(db, col_index, col, id)? { + Some(header) => match Block::Header::decode(&mut &header[..]) { + Some(header) => Ok(Some(header)), + None => { + return Err(client::error::ErrorKind::Backend("Error decoding header".into()).into()); + } + }, + None => Ok(None), + } } /// Required header from the database. pub fn require_header( - db: &KeyValueDB, - col_index: Option, - col: Option, - id: BlockId, + db: &KeyValueDB, + col_index: Option, + col: Option, + id: BlockId, ) -> client::error::Result { - read_header(db, col_index, col, id) - .and_then(|header| header.ok_or_else(|| client::error::ErrorKind::UnknownBlock(format!("{}", id)).into())) + read_header(db, col_index, col, id).and_then(|header| { + header.ok_or_else(|| client::error::ErrorKind::UnknownBlock(format!("{}", id)).into()) + }) } /// Read meta from the database. -pub fn read_meta(db: &KeyValueDB, col_meta: Option, col_header: Option) -> Result< - Meta<<::Header as HeaderT>::Number, Block::Hash>, - client::error::Error, -> - where - Block: BlockT, +pub fn read_meta( + db: &KeyValueDB, + col_meta: Option, + col_header: Option, +) -> Result::Header as HeaderT>::Number, Block::Hash>, client::error::Error> +where + Block: BlockT, { - let genesis_hash: Block::Hash = match db.get(col_meta, meta_keys::GENESIS_HASH).map_err(db_err)? { - Some(h) => match Decode::decode(&mut &h[..]) { - Some(h) => h, - None => return Err(client::error::ErrorKind::Backend("Error decoding genesis hash".into()).into()), - }, - None => return Ok(Meta { - best_hash: Default::default(), - best_number: Zero::zero(), - finalized_hash: Default::default(), - finalized_number: Zero::zero(), - genesis_hash: Default::default(), - }), - }; - - let load_meta_block = |desc, key| -> Result<_, client::error::Error> { - if let Some(Some(header)) = db.get(col_meta, key).and_then(|id| - match id { - Some(id) => db.get(col_header, &id).map(|h| h.map(|b| Block::Header::decode(&mut &b[..]))), - None => Ok(None), - }).map_err(db_err)? - { - let hash = header.hash(); - debug!("DB Opened blockchain db, fetched {} = {:?} ({})", desc, hash, header.number()); - Ok((hash, *header.number())) - } else { - Ok((genesis_hash.clone(), Zero::zero())) - } - }; - - let (best_hash, best_number) = load_meta_block("best", meta_keys::BEST_BLOCK)?; - let (finalized_hash, finalized_number) = load_meta_block("final", meta_keys::FINALIZED_BLOCK)?; - - Ok(Meta { - best_hash, - best_number, - finalized_hash, - finalized_number, - genesis_hash, - }) + let genesis_hash: Block::Hash = + match db.get(col_meta, meta_keys::GENESIS_HASH).map_err(db_err)? { + Some(h) => match Decode::decode(&mut &h[..]) { + Some(h) => h, + None => { + return Err(client::error::ErrorKind::Backend( + "Error decoding genesis hash".into(), + ) + .into()); + } + }, + None => { + return Ok(Meta { + best_hash: Default::default(), + best_number: Zero::zero(), + finalized_hash: Default::default(), + finalized_number: Zero::zero(), + genesis_hash: Default::default(), + }); + } + }; + + let load_meta_block = |desc, key| -> Result<_, client::error::Error> { + if let Some(Some(header)) = db + .get(col_meta, key) + .and_then(|id| match id { + Some(id) => db + .get(col_header, &id) + .map(|h| h.map(|b| Block::Header::decode(&mut &b[..]))), + None => Ok(None), + }) + .map_err(db_err)? + { + let hash = header.hash(); + debug!( + "DB Opened blockchain db, fetched {} = {:?} ({})", + desc, + hash, + header.number() + ); + Ok((hash, *header.number())) + } else { + Ok((genesis_hash.clone(), Zero::zero())) + } + }; + + let (best_hash, best_number) = load_meta_block("best", meta_keys::BEST_BLOCK)?; + let (finalized_hash, finalized_number) = load_meta_block("final", meta_keys::FINALIZED_BLOCK)?; + + Ok(Meta { + best_hash, + best_number, + finalized_hash, + finalized_number, + genesis_hash, + }) } diff --git a/core/client/src/backend.rs b/core/client/src/backend.rs index 8a6ffe4384..a6c136dec3 100644 --- a/core/client/src/backend.rs +++ b/core/client/src/backend.rs @@ -16,96 +16,113 @@ //! Substrate Client data backend -use std::collections::HashMap; use crate::error; +use consensus::well_known_cache_keys; +use hash_db::Hasher; use primitives::ChangesTrieConfiguration; -use runtime_primitives::{generic::BlockId, Justification, StorageOverlay, ChildrenStorageOverlay}; use runtime_primitives::traits::{Block as BlockT, NumberFor}; +use runtime_primitives::{generic::BlockId, ChildrenStorageOverlay, Justification, StorageOverlay}; use state_machine::backend::Backend as StateBackend; use state_machine::ChangesTrieStorage as StateChangesTrieStorage; -use consensus::well_known_cache_keys; -use hash_db::Hasher; +use std::collections::HashMap; use trie::MemoryDB; /// State of a new block. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum NewBlockState { - /// Normal block. - Normal, - /// New best block. - Best, - /// Newly finalized block (implicitly best). - Final, + /// Normal block. + Normal, + /// New best block. + Best, + /// Newly finalized block (implicitly best). + Final, } impl NewBlockState { - /// Whether this block is the new best block. - pub fn is_best(self) -> bool { - match self { - NewBlockState::Best | NewBlockState::Final => true, - NewBlockState::Normal => false, - } - } - - /// Whether this block is considered final. - pub fn is_final(self) -> bool { - match self { - NewBlockState::Final => true, - NewBlockState::Best | NewBlockState::Normal => false, - } - } + /// Whether this block is the new best block. + pub fn is_best(self) -> bool { + match self { + NewBlockState::Best | NewBlockState::Final => true, + NewBlockState::Normal => false, + } + } + + /// Whether this block is considered final. + pub fn is_final(self) -> bool { + match self { + NewBlockState::Final => true, + NewBlockState::Best | NewBlockState::Normal => false, + } + } } /// Block insertion operation. Keeps hold if the inserted block state and data. -pub trait BlockImportOperation where - Block: BlockT, - H: Hasher, +pub trait BlockImportOperation +where + Block: BlockT, + H: Hasher, { - /// Associated state backend type. - type State: StateBackend; - - /// Returns pending state. Returns None for backends with locally-unavailable state data. - fn state(&self) -> error::Result>; - /// Append block data to the transaction. - fn set_block_data( - &mut self, - header: Block::Header, - body: Option>, - justification: Option, - state: NewBlockState, - ) -> error::Result<()>; - - /// Update cached data. - fn update_cache(&mut self, cache: HashMap>); - /// Inject storage data into the database. - fn update_db_storage(&mut self, update: >::Transaction) -> error::Result<()>; - /// Inject storage data into the database replacing any existing data. - fn reset_storage(&mut self, top: StorageOverlay, children: ChildrenStorageOverlay) -> error::Result; - /// Set top level storage changes. - fn update_storage(&mut self, update: Vec<(Vec, Option>)>) -> error::Result<()>; - /// Inject changes trie data into the database. - fn update_changes_trie(&mut self, update: MemoryDB) -> error::Result<()>; - /// Insert auxiliary keys. Values are `None` if should be deleted. - fn insert_aux(&mut self, ops: I) -> error::Result<()> - where I: IntoIterator, Option>)>; - /// Mark a block as finalized. - fn mark_finalized(&mut self, id: BlockId, justification: Option) -> error::Result<()>; - /// Mark a block as new head. If both block import and set head are specified, set head overrides block import's best block rule. - fn mark_head(&mut self, id: BlockId) -> error::Result<()>; + /// Associated state backend type. + type State: StateBackend; + + /// Returns pending state. Returns None for backends with locally-unavailable state data. + fn state(&self) -> error::Result>; + /// Append block data to the transaction. + fn set_block_data( + &mut self, + header: Block::Header, + body: Option>, + justification: Option, + state: NewBlockState, + ) -> error::Result<()>; + + /// Update cached data. + fn update_cache(&mut self, cache: HashMap>); + /// Inject storage data into the database. + fn update_db_storage( + &mut self, + update: >::Transaction, + ) -> error::Result<()>; + /// Inject storage data into the database replacing any existing data. + fn reset_storage( + &mut self, + top: StorageOverlay, + children: ChildrenStorageOverlay, + ) -> error::Result; + /// Set top level storage changes. + fn update_storage(&mut self, update: Vec<(Vec, Option>)>) -> error::Result<()>; + /// Inject changes trie data into the database. + fn update_changes_trie(&mut self, update: MemoryDB) -> error::Result<()>; + /// Insert auxiliary keys. Values are `None` if should be deleted. + fn insert_aux(&mut self, ops: I) -> error::Result<()> + where + I: IntoIterator, Option>)>; + /// Mark a block as finalized. + fn mark_finalized( + &mut self, + id: BlockId, + justification: Option, + ) -> error::Result<()>; + /// Mark a block as new head. If both block import and set head are specified, set head overrides block import's best block rule. + fn mark_head(&mut self, id: BlockId) -> error::Result<()>; } /// Provides access to an auxiliary database. pub trait AuxStore { - /// Insert auxiliary data into key-value store. Deletions occur after insertions. - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> error::Result<()>; - /// Query auxiliary data from key-value store. - fn get_aux(&self, key: &[u8]) -> error::Result>>; + /// Insert auxiliary data into key-value store. Deletions occur after insertions. + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> error::Result<()>; + /// Query auxiliary data from key-value store. + fn get_aux(&self, key: &[u8]) -> error::Result>>; } /// Client backend. Manages the data layer. @@ -116,83 +133,100 @@ pub trait AuxStore { /// /// The same applies for live `BlockImportOperation`s: while an import operation building on a parent `P` /// is alive, the state for `P` should not be pruned. -pub trait Backend: AuxStore + Send + Sync where - Block: BlockT, - H: Hasher, +pub trait Backend: AuxStore + Send + Sync +where + Block: BlockT, + H: Hasher, { - /// Associated block insertion operation type. - type BlockImportOperation: BlockImportOperation; - /// Associated blockchain backend type. - type Blockchain: crate::blockchain::Backend; - /// Associated state backend type. - type State: StateBackend; - /// Changes trie storage. - type ChangesTrieStorage: PrunableStateChangesTrieStorage; - - /// Begin a new block insertion transaction with given parent block id. - /// When constructing the genesis, this is called with all-zero hash. - fn begin_operation(&self) -> error::Result; - /// Note an operation to contain state transition. - fn begin_state_operation(&self, operation: &mut Self::BlockImportOperation, block: BlockId) -> error::Result<()>; - /// Commit block insertion. - fn commit_operation(&self, transaction: Self::BlockImportOperation) -> error::Result<()>; - /// Finalize block with given Id. This should only be called if the parent of the given - /// block has been finalized. - fn finalize_block(&self, block: BlockId, justification: Option) -> error::Result<()>; - /// Returns reference to blockchain backend. - fn blockchain(&self) -> &Self::Blockchain; - /// Returns reference to changes trie storage. - fn changes_trie_storage(&self) -> Option<&Self::ChangesTrieStorage>; - /// Returns true if state for given block is available. - fn have_state_at(&self, hash: &Block::Hash, _number: NumberFor) -> bool { - self.state_at(BlockId::Hash(hash.clone())).is_ok() - } - /// Returns state backend with post-state of given block. - fn state_at(&self, block: BlockId) -> error::Result; - /// Destroy state and save any useful data, such as cache. - fn destroy_state(&self, _state: Self::State) -> error::Result<()> { - Ok(()) - } - /// Attempts to revert the chain by `n` blocks. Returns the number of blocks that were - /// successfully reverted. - fn revert(&self, n: NumberFor) -> error::Result>; - - /// Insert auxiliary data into key-value store. - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> error::Result<()> - { - AuxStore::insert_aux(self, insert, delete) - } - /// Query auxiliary data from key-value store. - fn get_aux(&self, key: &[u8]) -> error::Result>> { - AuxStore::get_aux(self, key) - } + /// Associated block insertion operation type. + type BlockImportOperation: BlockImportOperation; + /// Associated blockchain backend type. + type Blockchain: crate::blockchain::Backend; + /// Associated state backend type. + type State: StateBackend; + /// Changes trie storage. + type ChangesTrieStorage: PrunableStateChangesTrieStorage; + + /// Begin a new block insertion transaction with given parent block id. + /// When constructing the genesis, this is called with all-zero hash. + fn begin_operation(&self) -> error::Result; + /// Note an operation to contain state transition. + fn begin_state_operation( + &self, + operation: &mut Self::BlockImportOperation, + block: BlockId, + ) -> error::Result<()>; + /// Commit block insertion. + fn commit_operation(&self, transaction: Self::BlockImportOperation) -> error::Result<()>; + /// Finalize block with given Id. This should only be called if the parent of the given + /// block has been finalized. + fn finalize_block( + &self, + block: BlockId, + justification: Option, + ) -> error::Result<()>; + /// Returns reference to blockchain backend. + fn blockchain(&self) -> &Self::Blockchain; + /// Returns reference to changes trie storage. + fn changes_trie_storage(&self) -> Option<&Self::ChangesTrieStorage>; + /// Returns true if state for given block is available. + fn have_state_at(&self, hash: &Block::Hash, _number: NumberFor) -> bool { + self.state_at(BlockId::Hash(hash.clone())).is_ok() + } + /// Returns state backend with post-state of given block. + fn state_at(&self, block: BlockId) -> error::Result; + /// Destroy state and save any useful data, such as cache. + fn destroy_state(&self, _state: Self::State) -> error::Result<()> { + Ok(()) + } + /// Attempts to revert the chain by `n` blocks. Returns the number of blocks that were + /// successfully reverted. + fn revert(&self, n: NumberFor) -> error::Result>; + + /// Insert auxiliary data into key-value store. + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> error::Result<()> { + AuxStore::insert_aux(self, insert, delete) + } + /// Query auxiliary data from key-value store. + fn get_aux(&self, key: &[u8]) -> error::Result>> { + AuxStore::get_aux(self, key) + } } /// Changes trie storage that supports pruning. pub trait PrunableStateChangesTrieStorage: StateChangesTrieStorage { - /// Get number block of oldest, non-pruned changes trie. - fn oldest_changes_trie_block(&self, config: &ChangesTrieConfiguration, best_finalized: u64) -> u64; + /// Get number block of oldest, non-pruned changes trie. + fn oldest_changes_trie_block( + &self, + config: &ChangesTrieConfiguration, + best_finalized: u64, + ) -> u64; } /// Mark for all Backend implementations, that are making use of state data, stored locally. pub trait LocalBackend: Backend where - Block: BlockT, - H: Hasher, -{} + Block: BlockT, + H: Hasher, +{ +} /// Mark for all Backend implementations, that are fetching required state data from remote nodes. pub trait RemoteBackend: Backend where - Block: BlockT, - H: Hasher, + Block: BlockT, + H: Hasher, { - /// Returns true if the state for given block is available locally. - fn is_local_state_available(&self, block: &BlockId) -> bool; + /// Returns true if the state for given block is available locally. + fn is_local_state_available(&self, block: &BlockId) -> bool; } diff --git a/core/client/src/block_builder/api.rs b/core/client/src/block_builder/api.rs index 48abb38024..473486ec1e 100644 --- a/core/client/src/block_builder/api.rs +++ b/core/client/src/block_builder/api.rs @@ -16,24 +16,24 @@ //! The runtime api for building blocks. -use runtime_primitives::{traits::Block as BlockT, ApplyResult}; +pub use inherents::{CheckInherentsResult, InherentData}; use rstd::vec::Vec; +use runtime_primitives::{traits::Block as BlockT, ApplyResult}; use sr_api_macros::decl_runtime_apis; -pub use inherents::{InherentData, CheckInherentsResult}; decl_runtime_apis! { - /// The `BlockBuilder` api trait that provides required functions for building a block for a runtime. - #[api_version(2)] - pub trait BlockBuilder { - /// Apply the given extrinsics. - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyResult; - /// Finish the current block. - fn finalize_block() -> ::Header; - /// Generate inherent extrinsics. The inherent data will vary from chain to chain. - fn inherent_extrinsics(inherent: InherentData) -> Vec<::Extrinsic>; - /// Check that the inherents are valid. The inherent data will vary from chain to chain. - fn check_inherents(block: Block, data: InherentData) -> CheckInherentsResult; - /// Generate a random seed. - fn random_seed() -> ::Hash; - } + /// The `BlockBuilder` api trait that provides required functions for building a block for a runtime. + #[api_version(2)] + pub trait BlockBuilder { + /// Apply the given extrinsics. + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyResult; + /// Finish the current block. + fn finalize_block() -> ::Header; + /// Generate inherent extrinsics. The inherent data will vary from chain to chain. + fn inherent_extrinsics(inherent: InherentData) -> Vec<::Extrinsic>; + /// Check that the inherents are valid. The inherent data will vary from chain to chain. + fn check_inherents(block: Block, data: InherentData) -> CheckInherentsResult; + /// Generate a random seed. + fn random_seed() -> ::Hash; + } } diff --git a/core/client/src/block_builder/block_builder.rs b/core/client/src/block_builder/block_builder.rs index fd1ce4aefb..c22fef1e95 100644 --- a/core/client/src/block_builder/block_builder.rs +++ b/core/client/src/block_builder/block_builder.rs @@ -15,95 +15,104 @@ // along with Substrate. If not, see . use super::api::BlockBuilder as BlockBuilderApi; -use std::vec::Vec; +use crate::blockchain::HeaderBackend; +use crate::error; +use crate::runtime_api::Core; use parity_codec::Encode; -use runtime_primitives::ApplyOutcome; +use primitives::{ExecutionContext, H256}; use runtime_primitives::generic::BlockId; use runtime_primitives::traits::{ - Header as HeaderT, Hash, Block as BlockT, One, HashFor, ProvideRuntimeApi, ApiRef + ApiRef, Block as BlockT, Hash, HashFor, Header as HeaderT, One, ProvideRuntimeApi, }; -use primitives::{H256, ExecutionContext}; -use crate::blockchain::HeaderBackend; -use crate::runtime_api::Core; -use crate::error; - +use runtime_primitives::ApplyOutcome; +use std::vec::Vec; /// Utility for building new (valid) blocks from a stream of extrinsics. -pub struct BlockBuilder<'a, Block, A: ProvideRuntimeApi> where Block: BlockT { - header: ::Header, - extrinsics: Vec<::Extrinsic>, - api: ApiRef<'a, A::Api>, - block_id: BlockId, +pub struct BlockBuilder<'a, Block, A: ProvideRuntimeApi> +where + Block: BlockT, +{ + header: ::Header, + extrinsics: Vec<::Extrinsic>, + api: ApiRef<'a, A::Api>, + block_id: BlockId, } impl<'a, Block, A> BlockBuilder<'a, Block, A> where - Block: BlockT, - A: ProvideRuntimeApi + HeaderBackend + 'a, - A::Api: BlockBuilderApi, + Block: BlockT, + A: ProvideRuntimeApi + HeaderBackend + 'a, + A::Api: BlockBuilderApi, { - /// Create a new instance of builder from the given client, building on the latest block. - pub fn new(api: &'a A) -> error::Result { - api.info().and_then(|i| Self::at_block(&BlockId::Hash(i.best_hash), api)) - } + /// Create a new instance of builder from the given client, building on the latest block. + pub fn new(api: &'a A) -> error::Result { + api.info() + .and_then(|i| Self::at_block(&BlockId::Hash(i.best_hash), api)) + } - /// Create a new instance of builder from the given client using a particular block's ID to - /// build upon. - pub fn at_block(block_id: &BlockId, api: &'a A) -> error::Result { - let number = api.block_number_from_id(block_id)? - .ok_or_else(|| error::ErrorKind::UnknownBlock(format!("{}", block_id)))? - + One::one(); + /// Create a new instance of builder from the given client using a particular block's ID to + /// build upon. + pub fn at_block(block_id: &BlockId, api: &'a A) -> error::Result { + let number = api + .block_number_from_id(block_id)? + .ok_or_else(|| error::ErrorKind::UnknownBlock(format!("{}", block_id)))? + + One::one(); - let parent_hash = api.block_hash_from_id(block_id)? - .ok_or_else(|| error::ErrorKind::UnknownBlock(format!("{}", block_id)))?; - let header = <::Header as HeaderT>::new( - number, - Default::default(), - Default::default(), - parent_hash, - Default::default() - ); - let api = api.runtime_api(); - api.initialize_block_with_context(block_id, ExecutionContext::BlockConstruction, &header)?; - Ok(BlockBuilder { - header, - extrinsics: Vec::new(), - api, - block_id: *block_id, - }) - } + let parent_hash = api + .block_hash_from_id(block_id)? + .ok_or_else(|| error::ErrorKind::UnknownBlock(format!("{}", block_id)))?; + let header = <::Header as HeaderT>::new( + number, + Default::default(), + Default::default(), + parent_hash, + Default::default(), + ); + let api = api.runtime_api(); + api.initialize_block_with_context(block_id, ExecutionContext::BlockConstruction, &header)?; + Ok(BlockBuilder { + header, + extrinsics: Vec::new(), + api, + block_id: *block_id, + }) + } - /// Push onto the block's list of extrinsics. - /// - /// This will ensure the extrinsic can be validly executed (by executing it); - pub fn push(&mut self, xt: ::Extrinsic) -> error::Result<()> { - use crate::runtime_api::ApiExt; + /// Push onto the block's list of extrinsics. + /// + /// This will ensure the extrinsic can be validly executed (by executing it); + pub fn push(&mut self, xt: ::Extrinsic) -> error::Result<()> { + use crate::runtime_api::ApiExt; - let block_id = &self.block_id; - let extrinsics = &mut self.extrinsics; + let block_id = &self.block_id; + let extrinsics = &mut self.extrinsics; - self.api.map_api_result(|api| { - match api.apply_extrinsic_with_context(block_id, ExecutionContext::BlockConstruction, xt.clone())? { - Ok(ApplyOutcome::Success) | Ok(ApplyOutcome::Fail) => { - extrinsics.push(xt); - Ok(()) - } - Err(e) => { - Err(error::ErrorKind::ApplyExtrinsicFailed(e).into()) - } - } - }) - } + self.api.map_api_result(|api| { + match api.apply_extrinsic_with_context( + block_id, + ExecutionContext::BlockConstruction, + xt.clone(), + )? { + Ok(ApplyOutcome::Success) | Ok(ApplyOutcome::Fail) => { + extrinsics.push(xt); + Ok(()) + } + Err(e) => Err(error::ErrorKind::ApplyExtrinsicFailed(e).into()), + } + }) + } - /// Consume the builder to return a valid `Block` containing all pushed extrinsics. - pub fn bake(mut self) -> error::Result { - self.header = self.api.finalize_block_with_context(&self.block_id, ExecutionContext::BlockConstruction)?; + /// Consume the builder to return a valid `Block` containing all pushed extrinsics. + pub fn bake(mut self) -> error::Result { + self.header = self + .api + .finalize_block_with_context(&self.block_id, ExecutionContext::BlockConstruction)?; - debug_assert_eq!( - self.header.extrinsics_root().clone(), - HashFor::::ordered_trie_root(self.extrinsics.iter().map(Encode::encode)), - ); + debug_assert_eq!( + self.header.extrinsics_root().clone(), + HashFor::::ordered_trie_root(self.extrinsics.iter().map(Encode::encode)), + ); - Ok(::new(self.header, self.extrinsics)) - } + Ok(::new(self.header, self.extrinsics)) + } } diff --git a/core/client/src/blockchain.rs b/core/client/src/blockchain.rs index 5d7b2a9c23..7b1879b40c 100644 --- a/core/client/src/blockchain.rs +++ b/core/client/src/blockchain.rs @@ -18,123 +18,127 @@ use std::sync::Arc; -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use consensus::well_known_cache_keys; use runtime_primitives::generic::BlockId; +use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use runtime_primitives::Justification; -use consensus::well_known_cache_keys; use crate::error::{ErrorKind, Result}; /// Blockchain database header backend. Does not perform any validation. pub trait HeaderBackend: Send + Sync { - /// Get block header. Returns `None` if block is not found. - fn header(&self, id: BlockId) -> Result>; - /// Get blockchain info. - fn info(&self) -> Result>; - /// Get block status. - fn status(&self, id: BlockId) -> Result; - /// Get block number by hash. Returns `None` if the header is not in the chain. - fn number(&self, hash: Block::Hash) -> Result::Header as HeaderT>::Number>>; - /// Get block hash by number. Returns `None` if the header is not in the chain. - fn hash(&self, number: NumberFor) -> Result>; - - /// Convert an arbitrary block ID into a block hash. - fn block_hash_from_id(&self, id: &BlockId) -> Result> { - match *id { - BlockId::Hash(h) => Ok(Some(h)), - BlockId::Number(n) => self.hash(n), - } - } - - /// Convert an arbitrary block ID into a block hash. - fn block_number_from_id(&self, id: &BlockId) -> Result>> { - match *id { - BlockId::Hash(_) => Ok(self.header(*id)?.map(|h| h.number().clone())), - BlockId::Number(n) => Ok(Some(n)), - } - } - - /// Get block header. Returns `UnknownBlock` error if block is not found. - fn expect_header(&self, id: BlockId) -> Result { - self.header(id)?.ok_or_else(|| ErrorKind::UnknownBlock(format!("{}", id)).into()) - } - - /// Convert an arbitrary block ID into a block number. Returns `UnknownBlock` error if block is not found. - fn expect_block_number_from_id(&self, id: &BlockId) -> Result> { - self.block_number_from_id(id) - .and_then(|n| n.ok_or_else(|| ErrorKind::UnknownBlock(format!("{}", id)).into())) - } - - /// Convert an arbitrary block ID into a block hash. Returns `UnknownBlock` error if block is not found. - fn expect_block_hash_from_id(&self, id: &BlockId) -> Result { - self.block_hash_from_id(id) - .and_then(|n| n.ok_or_else(|| ErrorKind::UnknownBlock(format!("{}", id)).into())) - } + /// Get block header. Returns `None` if block is not found. + fn header(&self, id: BlockId) -> Result>; + /// Get blockchain info. + fn info(&self) -> Result>; + /// Get block status. + fn status(&self, id: BlockId) -> Result; + /// Get block number by hash. Returns `None` if the header is not in the chain. + fn number( + &self, + hash: Block::Hash, + ) -> Result::Header as HeaderT>::Number>>; + /// Get block hash by number. Returns `None` if the header is not in the chain. + fn hash(&self, number: NumberFor) -> Result>; + + /// Convert an arbitrary block ID into a block hash. + fn block_hash_from_id(&self, id: &BlockId) -> Result> { + match *id { + BlockId::Hash(h) => Ok(Some(h)), + BlockId::Number(n) => self.hash(n), + } + } + + /// Convert an arbitrary block ID into a block hash. + fn block_number_from_id(&self, id: &BlockId) -> Result>> { + match *id { + BlockId::Hash(_) => Ok(self.header(*id)?.map(|h| h.number().clone())), + BlockId::Number(n) => Ok(Some(n)), + } + } + + /// Get block header. Returns `UnknownBlock` error if block is not found. + fn expect_header(&self, id: BlockId) -> Result { + self.header(id)? + .ok_or_else(|| ErrorKind::UnknownBlock(format!("{}", id)).into()) + } + + /// Convert an arbitrary block ID into a block number. Returns `UnknownBlock` error if block is not found. + fn expect_block_number_from_id(&self, id: &BlockId) -> Result> { + self.block_number_from_id(id) + .and_then(|n| n.ok_or_else(|| ErrorKind::UnknownBlock(format!("{}", id)).into())) + } + + /// Convert an arbitrary block ID into a block hash. Returns `UnknownBlock` error if block is not found. + fn expect_block_hash_from_id(&self, id: &BlockId) -> Result { + self.block_hash_from_id(id) + .and_then(|n| n.ok_or_else(|| ErrorKind::UnknownBlock(format!("{}", id)).into())) + } } /// Blockchain database backend. Does not perform any validation. pub trait Backend: HeaderBackend { - /// Get block body. Returns `None` if block is not found. - fn body(&self, id: BlockId) -> Result::Extrinsic>>>; - /// Get block justification. Returns `None` if justification does not exist. - fn justification(&self, id: BlockId) -> Result>; - /// Get last finalized block hash. - fn last_finalized(&self) -> Result; - /// Returns data cache reference, if it is enabled on this backend. - fn cache(&self) -> Option>>; - - /// Returns hashes of all blocks that are leaves of the block tree. - /// in other words, that have no children, are chain heads. - /// Results must be ordered best (longest, heighest) chain first. - fn leaves(&self) -> Result>; - - /// Return hashes of all blocks that are children of the block with `parent_hash`. - fn children(&self, parent_hash: Block::Hash) -> Result>; + /// Get block body. Returns `None` if block is not found. + fn body(&self, id: BlockId) -> Result::Extrinsic>>>; + /// Get block justification. Returns `None` if justification does not exist. + fn justification(&self, id: BlockId) -> Result>; + /// Get last finalized block hash. + fn last_finalized(&self) -> Result; + /// Returns data cache reference, if it is enabled on this backend. + fn cache(&self) -> Option>>; + + /// Returns hashes of all blocks that are leaves of the block tree. + /// in other words, that have no children, are chain heads. + /// Results must be ordered best (longest, heighest) chain first. + fn leaves(&self) -> Result>; + + /// Return hashes of all blocks that are children of the block with `parent_hash`. + fn children(&self, parent_hash: Block::Hash) -> Result>; } /// Provides access to the optional cache. pub trait ProvideCache { - /// Returns data cache reference, if it is enabled on this backend. - fn cache(&self) -> Option>>; + /// Returns data cache reference, if it is enabled on this backend. + fn cache(&self) -> Option>>; } /// Blockchain optional data cache. pub trait Cache: Send + Sync { - /// Returns cached value by the given key. - fn get_at(&self, key: &well_known_cache_keys::Id, block: &BlockId) -> Option>; + /// Returns cached value by the given key. + fn get_at(&self, key: &well_known_cache_keys::Id, block: &BlockId) -> Option>; } /// Blockchain info #[derive(Debug)] pub struct Info { - /// Best block hash. - pub best_hash: Block::Hash, - /// Best block number. - pub best_number: <::Header as HeaderT>::Number, - /// Genesis block hash. - pub genesis_hash: Block::Hash, - /// The head of the finalized chain. - pub finalized_hash: Block::Hash, - /// Last finalized block number. - pub finalized_number: <::Header as HeaderT>::Number, + /// Best block hash. + pub best_hash: Block::Hash, + /// Best block number. + pub best_number: <::Header as HeaderT>::Number, + /// Genesis block hash. + pub genesis_hash: Block::Hash, + /// The head of the finalized chain. + pub finalized_hash: Block::Hash, + /// Last finalized block number. + pub finalized_number: <::Header as HeaderT>::Number, } /// Block status. #[derive(Debug, PartialEq, Eq)] pub enum BlockStatus { - /// Already in the blockchain. - InChain, - /// Not in the queue or the blockchain. - Unknown, + /// Already in the blockchain. + InChain, + /// Not in the queue or the blockchain. + Unknown, } /// An entry in a tree route. #[derive(Debug)] pub struct RouteEntry { - /// The number of the block. - pub number: ::Number, - /// The hash of the block. - pub hash: Block::Hash, + /// The number of the block. + pub number: ::Number, + /// The hash of the block. + pub hash: Block::Hash, } /// A tree-route from one block to another in the chain. @@ -161,95 +165,95 @@ pub struct RouteEntry { /// ``` #[derive(Debug)] pub struct TreeRoute { - route: Vec>, - pivot: usize, + route: Vec>, + pivot: usize, } impl TreeRoute { - /// Get a slice of all retracted blocks in reverse order (towards common ancestor) - pub fn retracted(&self) -> &[RouteEntry] { - &self.route[..self.pivot] - } - - /// Get the common ancestor block. This might be one of the two blocks of the - /// route. - pub fn common_block(&self) -> &RouteEntry { - self.route.get(self.pivot).expect("tree-routes are computed between blocks; \ - which are included in the route; \ - thus it is never empty; qed") - } - - /// Get a slice of enacted blocks (descendents of the common ancestor) - pub fn enacted(&self) -> &[RouteEntry] { - &self.route[self.pivot + 1 ..] - } + /// Get a slice of all retracted blocks in reverse order (towards common ancestor) + pub fn retracted(&self) -> &[RouteEntry] { + &self.route[..self.pivot] + } + + /// Get the common ancestor block. This might be one of the two blocks of the + /// route. + pub fn common_block(&self) -> &RouteEntry { + self.route.get(self.pivot).expect( + "tree-routes are computed between blocks; \ + which are included in the route; \ + thus it is never empty; qed", + ) + } + + /// Get a slice of enacted blocks (descendents of the common ancestor) + pub fn enacted(&self) -> &[RouteEntry] { + &self.route[self.pivot + 1..] + } } /// Compute a tree-route between two blocks. See tree-route docs for more details. pub fn tree_route>( - backend: &Backend, - from: BlockId, - to: BlockId, + backend: &Backend, + from: BlockId, + to: BlockId, ) -> Result> { - use runtime_primitives::traits::Header; - - let load_header = |id: BlockId| { - match backend.header(id) { - Ok(Some(hdr)) => Ok(hdr), - Ok(None) => Err(ErrorKind::UnknownBlock(format!("Unknown block {:?}", id)).into()), - Err(e) => Err(e), - } - }; - - let mut from = load_header(from)?; - let mut to = load_header(to)?; - - let mut from_branch = Vec::new(); - let mut to_branch = Vec::new(); - - while to.number() > from.number() { - to_branch.push(RouteEntry { - number: to.number().clone(), - hash: to.hash(), - }); - - to = load_header(BlockId::Hash(*to.parent_hash()))?; - } - - while from.number() > to.number() { - from_branch.push(RouteEntry { - number: from.number().clone(), - hash: from.hash(), - }); - from = load_header(BlockId::Hash(*from.parent_hash()))?; - } - - // numbers are equal now. walk backwards until the block is the same - - while to != from { - to_branch.push(RouteEntry { - number: to.number().clone(), - hash: to.hash(), - }); - to = load_header(BlockId::Hash(*to.parent_hash()))?; - - from_branch.push(RouteEntry { - number: from.number().clone(), - hash: from.hash(), - }); - from = load_header(BlockId::Hash(*from.parent_hash()))?; - } - - // add the pivot block. and append the reversed to-branch (note that it's reverse order originalls) - let pivot = from_branch.len(); - from_branch.push(RouteEntry { - number: to.number().clone(), - hash: to.hash(), - }); - from_branch.extend(to_branch.into_iter().rev()); - - Ok(TreeRoute { - route: from_branch, - pivot, - }) + use runtime_primitives::traits::Header; + + let load_header = |id: BlockId| match backend.header(id) { + Ok(Some(hdr)) => Ok(hdr), + Ok(None) => Err(ErrorKind::UnknownBlock(format!("Unknown block {:?}", id)).into()), + Err(e) => Err(e), + }; + + let mut from = load_header(from)?; + let mut to = load_header(to)?; + + let mut from_branch = Vec::new(); + let mut to_branch = Vec::new(); + + while to.number() > from.number() { + to_branch.push(RouteEntry { + number: to.number().clone(), + hash: to.hash(), + }); + + to = load_header(BlockId::Hash(*to.parent_hash()))?; + } + + while from.number() > to.number() { + from_branch.push(RouteEntry { + number: from.number().clone(), + hash: from.hash(), + }); + from = load_header(BlockId::Hash(*from.parent_hash()))?; + } + + // numbers are equal now. walk backwards until the block is the same + + while to != from { + to_branch.push(RouteEntry { + number: to.number().clone(), + hash: to.hash(), + }); + to = load_header(BlockId::Hash(*to.parent_hash()))?; + + from_branch.push(RouteEntry { + number: from.number().clone(), + hash: from.hash(), + }); + from = load_header(BlockId::Hash(*from.parent_hash()))?; + } + + // add the pivot block. and append the reversed to-branch (note that it's reverse order originalls) + let pivot = from_branch.len(); + from_branch.push(RouteEntry { + number: to.number().clone(), + hash: to.hash(), + }); + from_branch.extend(to_branch.into_iter().rev()); + + Ok(TreeRoute { + route: from_branch, + pivot, + }) } diff --git a/core/client/src/call_executor.rs b/core/client/src/call_executor.rs index 20460f72ad..185d29ad6b 100644 --- a/core/client/src/call_executor.rs +++ b/core/client/src/call_executor.rs @@ -14,17 +14,18 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::{sync::Arc, cmp::Ord, panic::UnwindSafe, result}; -use parity_codec::{Encode, Decode}; +use executor::{NativeVersion, RuntimeInfo, RuntimeVersion}; +use hash_db::Hasher; +use parity_codec::{Decode, Encode}; +use primitives::{Blake2Hasher, NativeOrEncoded, NeverNativeValue, OffchainExt, H256}; use runtime_primitives::generic::BlockId; use runtime_primitives::traits::Block as BlockT; use state_machine::{ - self, OverlayedChanges, Ext, CodeExecutor, ExecutionManager, ExecutionStrategy, NeverOffchainExt, + self, CodeExecutor, ExecutionManager, ExecutionStrategy, Ext, NeverOffchainExt, + OverlayedChanges, }; -use executor::{RuntimeVersion, RuntimeInfo, NativeVersion}; -use hash_db::Hasher; +use std::{cmp::Ord, panic::UnwindSafe, result, sync::Arc}; use trie::MemoryDB; -use primitives::{H256, Blake2Hasher, NativeOrEncoded, NeverNativeValue, OffchainExt}; use crate::backend; use crate::error; @@ -32,300 +33,319 @@ use crate::error; /// Method call executor. pub trait CallExecutor where - B: BlockT, - H: Hasher, - H::Out: Ord, + B: BlockT, + H: Hasher, + H::Out: Ord, { - /// Externalities error type. - type Error: state_machine::Error; + /// Externalities error type. + type Error: state_machine::Error; - /// Execute a call to a contract on top of state in a block of given hash. - /// - /// No changes are made. - fn call< - O: OffchainExt, - >( - &self, - id: &BlockId, - method: &str, - call_data: &[u8], - strategy: ExecutionStrategy, - side_effects_handler: Option<&mut O>, - ) -> Result, error::Error>; + /// Execute a call to a contract on top of state in a block of given hash. + /// + /// No changes are made. + fn call( + &self, + id: &BlockId, + method: &str, + call_data: &[u8], + strategy: ExecutionStrategy, + side_effects_handler: Option<&mut O>, + ) -> Result, error::Error>; - /// Execute a contextual call on top of state in a block of a given hash. - /// - /// No changes are made. - /// Before executing the method, passed header is installed as the current header - /// of the execution context. - fn contextual_call< - O: OffchainExt, - PB: Fn() -> error::Result, - EM: Fn( - Result, Self::Error>, - Result, Self::Error> - ) -> Result, Self::Error>, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - >( - &self, - at: &BlockId, - method: &str, - call_data: &[u8], - changes: &mut OverlayedChanges, - initialized_block: &mut Option>, - prepare_environment_block: PB, - execution_manager: ExecutionManager, - native_call: Option, - side_effects_handler: Option<&mut O>, - ) -> error::Result> where ExecutionManager: Clone; + /// Execute a contextual call on top of state in a block of a given hash. + /// + /// No changes are made. + /// Before executing the method, passed header is installed as the current header + /// of the execution context. + fn contextual_call< + O: OffchainExt, + PB: Fn() -> error::Result, + EM: Fn( + Result, Self::Error>, + Result, Self::Error>, + ) -> Result, Self::Error>, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + >( + &self, + at: &BlockId, + method: &str, + call_data: &[u8], + changes: &mut OverlayedChanges, + initialized_block: &mut Option>, + prepare_environment_block: PB, + execution_manager: ExecutionManager, + native_call: Option, + side_effects_handler: Option<&mut O>, + ) -> error::Result> + where + ExecutionManager: Clone; - /// Extract RuntimeVersion of given block - /// - /// No changes are made. - fn runtime_version(&self, id: &BlockId) -> Result; + /// Extract RuntimeVersion of given block + /// + /// No changes are made. + fn runtime_version(&self, id: &BlockId) -> Result; - /// Execute a call to a contract on top of given state. - /// - /// No changes are made. - fn call_at_state< - O: OffchainExt, - S: state_machine::Backend, - F: FnOnce( - Result, Self::Error>, - Result, Self::Error> - ) -> Result, Self::Error>, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - >(&self, - state: &S, - overlay: &mut OverlayedChanges, - method: &str, - call_data: &[u8], - manager: ExecutionManager, - native_call: Option, - side_effects_handler: Option<&mut O>, - ) -> Result<(NativeOrEncoded, S::Transaction, Option>), error::Error>; + /// Execute a call to a contract on top of given state. + /// + /// No changes are made. + fn call_at_state< + O: OffchainExt, + S: state_machine::Backend, + F: FnOnce( + Result, Self::Error>, + Result, Self::Error>, + ) -> Result, Self::Error>, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + >( + &self, + state: &S, + overlay: &mut OverlayedChanges, + method: &str, + call_data: &[u8], + manager: ExecutionManager, + native_call: Option, + side_effects_handler: Option<&mut O>, + ) -> Result<(NativeOrEncoded, S::Transaction, Option>), error::Error>; - /// Execute a call to a contract on top of given state, gathering execution proof. - /// - /// No changes are made. - fn prove_at_state>( - &self, - state: S, - overlay: &mut OverlayedChanges, - method: &str, - call_data: &[u8] - ) -> Result<(Vec, Vec>), error::Error> { - let trie_state = state.try_into_trie_backend() - .ok_or_else(|| Box::new(state_machine::ExecutionError::UnableToGenerateProof) as Box)?; - self.prove_at_trie_state(&trie_state, overlay, method, call_data) - } + /// Execute a call to a contract on top of given state, gathering execution proof. + /// + /// No changes are made. + fn prove_at_state>( + &self, + state: S, + overlay: &mut OverlayedChanges, + method: &str, + call_data: &[u8], + ) -> Result<(Vec, Vec>), error::Error> { + let trie_state = state.try_into_trie_backend().ok_or_else(|| { + Box::new(state_machine::ExecutionError::UnableToGenerateProof) + as Box + })?; + self.prove_at_trie_state(&trie_state, overlay, method, call_data) + } - /// Execute a call to a contract on top of given trie state, gathering execution proof. - /// - /// No changes are made. - fn prove_at_trie_state>( - &self, - trie_state: &state_machine::TrieBackend, - overlay: &mut OverlayedChanges, - method: &str, - call_data: &[u8] - ) -> Result<(Vec, Vec>), error::Error>; + /// Execute a call to a contract on top of given trie state, gathering execution proof. + /// + /// No changes are made. + fn prove_at_trie_state>( + &self, + trie_state: &state_machine::TrieBackend, + overlay: &mut OverlayedChanges, + method: &str, + call_data: &[u8], + ) -> Result<(Vec, Vec>), error::Error>; - /// Get runtime version if supported. - fn native_runtime_version(&self) -> Option<&NativeVersion>; + /// Get runtime version if supported. + fn native_runtime_version(&self) -> Option<&NativeVersion>; } /// Call executor that executes methods locally, querying all required /// data from local backend. pub struct LocalCallExecutor { - backend: Arc, - executor: E, + backend: Arc, + executor: E, } impl LocalCallExecutor { - /// Creates new instance of local call executor. - pub fn new(backend: Arc, executor: E) -> Self { - LocalCallExecutor { - backend, - executor, - } - } + /// Creates new instance of local call executor. + pub fn new(backend: Arc, executor: E) -> Self { + LocalCallExecutor { backend, executor } + } } -impl Clone for LocalCallExecutor where E: Clone { - fn clone(&self) -> Self { - LocalCallExecutor { - backend: self.backend.clone(), - executor: self.executor.clone(), - } - } +impl Clone for LocalCallExecutor +where + E: Clone, +{ + fn clone(&self) -> Self { + LocalCallExecutor { + backend: self.backend.clone(), + executor: self.executor.clone(), + } + } } impl CallExecutor for LocalCallExecutor where - B: backend::Backend, - E: CodeExecutor + RuntimeInfo, - Block: BlockT, + B: backend::Backend, + E: CodeExecutor + RuntimeInfo, + Block: BlockT, { - type Error = E::Error; + type Error = E::Error; - fn call(&self, - id: &BlockId, - method: &str, - call_data: &[u8], - strategy: ExecutionStrategy, - side_effects_handler: Option<&mut O>, - ) -> error::Result> { - let mut changes = OverlayedChanges::default(); - let state = self.backend.state_at(*id)?; - let return_data = state_machine::new( - &state, - self.backend.changes_trie_storage(), - side_effects_handler, - &mut changes, - &self.executor, - method, - call_data, - ).execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - strategy.get_manager(), - false, - None, - ) - .map(|(result, _, _)| result)?; - self.backend.destroy_state(state)?; - Ok(return_data.into_encoded()) - } + fn call( + &self, + id: &BlockId, + method: &str, + call_data: &[u8], + strategy: ExecutionStrategy, + side_effects_handler: Option<&mut O>, + ) -> error::Result> { + let mut changes = OverlayedChanges::default(); + let state = self.backend.state_at(*id)?; + let return_data = state_machine::new( + &state, + self.backend.changes_trie_storage(), + side_effects_handler, + &mut changes, + &self.executor, + method, + call_data, + ) + .execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + strategy.get_manager(), + false, + None, + ) + .map(|(result, _, _)| result)?; + self.backend.destroy_state(state)?; + Ok(return_data.into_encoded()) + } - fn contextual_call< - O: OffchainExt, - PB: Fn() -> error::Result, - EM: Fn( - Result, Self::Error>, - Result, Self::Error> - ) -> Result, Self::Error>, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - >( - &self, - at: &BlockId, - method: &str, - call_data: &[u8], - changes: &mut OverlayedChanges, - initialized_block: &mut Option>, - prepare_environment_block: PB, - execution_manager: ExecutionManager, - native_call: Option, - mut side_effects_handler: Option<&mut O>, - ) -> Result, error::Error> where ExecutionManager: Clone { - let state = self.backend.state_at(*at)?; - if method != "Core_initialize_block" && initialized_block.map(|id| id != *at).unwrap_or(true) { - let header = prepare_environment_block()?; - state_machine::new( - &state, - self.backend.changes_trie_storage(), - side_effects_handler.as_mut().map(|x| &mut **x), - changes, - &self.executor, - "Core_initialize_block", - &header.encode(), - ).execute_using_consensus_failure_handler::<_, R, fn() -> _>( - execution_manager.clone(), - false, - None, - )?; - *initialized_block = Some(*at); - } + fn contextual_call< + O: OffchainExt, + PB: Fn() -> error::Result, + EM: Fn( + Result, Self::Error>, + Result, Self::Error>, + ) -> Result, Self::Error>, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + >( + &self, + at: &BlockId, + method: &str, + call_data: &[u8], + changes: &mut OverlayedChanges, + initialized_block: &mut Option>, + prepare_environment_block: PB, + execution_manager: ExecutionManager, + native_call: Option, + mut side_effects_handler: Option<&mut O>, + ) -> Result, error::Error> + where + ExecutionManager: Clone, + { + let state = self.backend.state_at(*at)?; + if method != "Core_initialize_block" + && initialized_block.map(|id| id != *at).unwrap_or(true) + { + let header = prepare_environment_block()?; + state_machine::new( + &state, + self.backend.changes_trie_storage(), + side_effects_handler.as_mut().map(|x| &mut **x), + changes, + &self.executor, + "Core_initialize_block", + &header.encode(), + ) + .execute_using_consensus_failure_handler::<_, R, fn() -> _>( + execution_manager.clone(), + false, + None, + )?; + *initialized_block = Some(*at); + } - let result = state_machine::new( - &state, - self.backend.changes_trie_storage(), - side_effects_handler, - changes, - &self.executor, - method, - call_data, - ).execute_using_consensus_failure_handler( - execution_manager, - false, - native_call, - ).map(|(result, _, _)| result)?; + let result = state_machine::new( + &state, + self.backend.changes_trie_storage(), + side_effects_handler, + changes, + &self.executor, + method, + call_data, + ) + .execute_using_consensus_failure_handler(execution_manager, false, native_call) + .map(|(result, _, _)| result)?; - // If the method is `initialize_block` we need to set the `initialized_block` - if method == "Core_initialize_block" { - *initialized_block = Some(*at); - } + // If the method is `initialize_block` we need to set the `initialized_block` + if method == "Core_initialize_block" { + *initialized_block = Some(*at); + } - self.backend.destroy_state(state)?; - Ok(result) - } + self.backend.destroy_state(state)?; + Ok(result) + } - fn runtime_version(&self, id: &BlockId) -> error::Result { - let mut overlay = OverlayedChanges::default(); - let state = self.backend.state_at(*id)?; - let mut ext = Ext::new(&mut overlay, &state, self.backend.changes_trie_storage(), NeverOffchainExt::new()); - self.executor.runtime_version(&mut ext) - .ok_or(error::ErrorKind::VersionInvalid.into()) - } + fn runtime_version(&self, id: &BlockId) -> error::Result { + let mut overlay = OverlayedChanges::default(); + let state = self.backend.state_at(*id)?; + let mut ext = Ext::new( + &mut overlay, + &state, + self.backend.changes_trie_storage(), + NeverOffchainExt::new(), + ); + self.executor + .runtime_version(&mut ext) + .ok_or(error::ErrorKind::VersionInvalid.into()) + } - fn call_at_state< - O: OffchainExt, - S: state_machine::Backend, - F: FnOnce( - Result, Self::Error>, - Result, Self::Error> - ) -> Result, Self::Error>, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - >(&self, - state: &S, - changes: &mut OverlayedChanges, - method: &str, - call_data: &[u8], - manager: ExecutionManager, - native_call: Option, - side_effects_handler: Option<&mut O>, - ) -> error::Result<(NativeOrEncoded, S::Transaction, Option>)> { - state_machine::new( - state, - self.backend.changes_trie_storage(), - side_effects_handler, - changes, - &self.executor, - method, - call_data, - ).execute_using_consensus_failure_handler( - manager, - true, - native_call, - ) - .map(|(result, storage_tx, changes_tx)| ( - result, - storage_tx.expect("storage_tx is always computed when compute_tx is true; qed"), - changes_tx, - )) - .map_err(Into::into) - } + fn call_at_state< + O: OffchainExt, + S: state_machine::Backend, + F: FnOnce( + Result, Self::Error>, + Result, Self::Error>, + ) -> Result, Self::Error>, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + >( + &self, + state: &S, + changes: &mut OverlayedChanges, + method: &str, + call_data: &[u8], + manager: ExecutionManager, + native_call: Option, + side_effects_handler: Option<&mut O>, + ) -> error::Result<( + NativeOrEncoded, + S::Transaction, + Option>, + )> { + state_machine::new( + state, + self.backend.changes_trie_storage(), + side_effects_handler, + changes, + &self.executor, + method, + call_data, + ) + .execute_using_consensus_failure_handler(manager, true, native_call) + .map(|(result, storage_tx, changes_tx)| { + ( + result, + storage_tx.expect("storage_tx is always computed when compute_tx is true; qed"), + changes_tx, + ) + }) + .map_err(Into::into) + } - fn prove_at_trie_state>( - &self, - trie_state: &state_machine::TrieBackend, - overlay: &mut OverlayedChanges, - method: &str, - call_data: &[u8] - ) -> Result<(Vec, Vec>), error::Error> { - state_machine::prove_execution_on_trie_backend( - trie_state, - overlay, - &self.executor, - method, - call_data, - ) - .map_err(Into::into) - } + fn prove_at_trie_state>( + &self, + trie_state: &state_machine::TrieBackend, + overlay: &mut OverlayedChanges, + method: &str, + call_data: &[u8], + ) -> Result<(Vec, Vec>), error::Error> { + state_machine::prove_execution_on_trie_backend( + trie_state, + overlay, + &self.executor, + method, + call_data, + ) + .map_err(Into::into) + } - fn native_runtime_version(&self) -> Option<&NativeVersion> { - Some(self.executor.native_version()) - } + fn native_runtime_version(&self) -> Option<&NativeVersion> { + Some(self.executor.native_version()) + } } diff --git a/core/client/src/children.rs b/core/client/src/children.rs index 48b39d18cd..1660aed55b 100644 --- a/core/client/src/children.rs +++ b/core/client/src/children.rs @@ -16,106 +16,109 @@ //! Functionality for reading and storing children hashes from db. -use kvdb::{KeyValueDB, DBTransaction}; -use parity_codec::{Encode, Decode}; use crate::error; +use kvdb::{DBTransaction, KeyValueDB}; +use parity_codec::{Decode, Encode}; use std::hash::Hash; - /// Returns the hashes of the children blocks of the block with `parent_hash`. pub fn read_children< - K: Eq + Hash + Clone + Encode + Decode, - V: Eq + Hash + Clone + Encode + Decode, ->(db: &KeyValueDB, column: Option, prefix: &[u8], parent_hash: K) -> error::Result> { - let mut buf = prefix.to_vec(); - parent_hash.using_encoded(|s| buf.extend(s)); - - let raw_val_opt = match db.get(column, &buf[..]) { - Ok(raw_val_opt) => raw_val_opt, - Err(_) => return Err(error::ErrorKind::Backend("Error reading value from database".into()).into()), - }; - - let raw_val = match raw_val_opt { - Some(val) => val, - None => return Ok(Vec::new()), - }; - - let children: Vec = match Decode::decode(&mut &raw_val[..]) { - Some(children) => children, - None => return Err(error::ErrorKind::Backend("Error decoding children".into()).into()), - }; - - Ok(children) + K: Eq + Hash + Clone + Encode + Decode, + V: Eq + Hash + Clone + Encode + Decode, +>( + db: &KeyValueDB, + column: Option, + prefix: &[u8], + parent_hash: K, +) -> error::Result> { + let mut buf = prefix.to_vec(); + parent_hash.using_encoded(|s| buf.extend(s)); + + let raw_val_opt = match db.get(column, &buf[..]) { + Ok(raw_val_opt) => raw_val_opt, + Err(_) => { + return Err(error::ErrorKind::Backend("Error reading value from database".into()).into()); + } + }; + + let raw_val = match raw_val_opt { + Some(val) => val, + None => return Ok(Vec::new()), + }; + + let children: Vec = match Decode::decode(&mut &raw_val[..]) { + Some(children) => children, + None => return Err(error::ErrorKind::Backend("Error decoding children".into()).into()), + }; + + Ok(children) } /// Insert the key-value pair (`parent_hash`, `children_hashes`) in the transaction. /// Any existing value is overwritten upon write. pub fn write_children< - K: Eq + Hash + Clone + Encode + Decode, - V: Eq + Hash + Clone + Encode + Decode, + K: Eq + Hash + Clone + Encode + Decode, + V: Eq + Hash + Clone + Encode + Decode, >( - tx: &mut DBTransaction, - column: Option, - prefix: &[u8], - parent_hash: K, - children_hashes: V, + tx: &mut DBTransaction, + column: Option, + prefix: &[u8], + parent_hash: K, + children_hashes: V, ) { - let mut key = prefix.to_vec(); - parent_hash.using_encoded(|s| key.extend(s)); - tx.put_vec(column, &key[..], children_hashes.encode()); + let mut key = prefix.to_vec(); + parent_hash.using_encoded(|s| key.extend(s)); + tx.put_vec(column, &key[..], children_hashes.encode()); } /// Prepare transaction to remove the children of `parent_hash`. -pub fn remove_children< - K: Eq + Hash + Clone + Encode + Decode, ->( - tx: &mut DBTransaction, - column: Option, - prefix: &[u8], - parent_hash: K, +pub fn remove_children( + tx: &mut DBTransaction, + column: Option, + prefix: &[u8], + parent_hash: K, ) { - let mut key = prefix.to_vec(); - parent_hash.using_encoded(|s| key.extend(s)); - tx.delete(column, &key[..]); + let mut key = prefix.to_vec(); + parent_hash.using_encoded(|s| key.extend(s)); + tx.delete(column, &key[..]); } - #[cfg(test)] mod tests { - use super::*; + use super::*; - #[test] - fn children_write_read_remove() { - const PREFIX: &[u8] = b"children"; - let db = ::kvdb_memorydb::create(0); + #[test] + fn children_write_read_remove() { + const PREFIX: &[u8] = b"children"; + let db = ::kvdb_memorydb::create(0); - let mut tx = DBTransaction::new(); + let mut tx = DBTransaction::new(); - let mut children1 = Vec::new(); - children1.push(1_3); - children1.push(1_5); - write_children(&mut tx, None, PREFIX, 1_1, children1); + let mut children1 = Vec::new(); + children1.push(1_3); + children1.push(1_5); + write_children(&mut tx, None, PREFIX, 1_1, children1); - let mut children2 = Vec::new(); - children2.push(1_4); - children2.push(1_6); - write_children(&mut tx, None, PREFIX, 1_2, children2); + let mut children2 = Vec::new(); + children2.push(1_4); + children2.push(1_6); + write_children(&mut tx, None, PREFIX, 1_2, children2); - db.write(tx.clone()).unwrap(); + db.write(tx.clone()).unwrap(); - let r1: Vec = read_children(&db, None, PREFIX, 1_1).unwrap(); - let r2: Vec = read_children(&db, None, PREFIX, 1_2).unwrap(); + let r1: Vec = read_children(&db, None, PREFIX, 1_1).unwrap(); + let r2: Vec = read_children(&db, None, PREFIX, 1_2).unwrap(); - assert_eq!(r1, vec![1_3, 1_5]); - assert_eq!(r2, vec![1_4, 1_6]); + assert_eq!(r1, vec![1_3, 1_5]); + assert_eq!(r2, vec![1_4, 1_6]); - remove_children(&mut tx, None, PREFIX, 1_2); - db.write(tx).unwrap(); + remove_children(&mut tx, None, PREFIX, 1_2); + db.write(tx).unwrap(); - let r1: Vec = read_children(&db, None, PREFIX, 1_1).unwrap(); - let r2: Vec = read_children(&db, None, PREFIX, 1_2).unwrap(); + let r1: Vec = read_children(&db, None, PREFIX, 1_1).unwrap(); + let r2: Vec = read_children(&db, None, PREFIX, 1_2).unwrap(); - assert_eq!(r1, vec![1_3, 1_5]); - assert_eq!(r2.len(), 0); - } -} \ No newline at end of file + assert_eq!(r1, vec![1_3, 1_5]); + assert_eq!(r2.len(), 0); + } +} diff --git a/core/client/src/cht.rs b/core/client/src/cht.rs index d8e7ffbff3..58bab468c6 100644 --- a/core/client/src/cht.rs +++ b/core/client/src/cht.rs @@ -29,11 +29,13 @@ use hash_db; use heapsize::HeapSizeOf; use trie; -use primitives::{H256, convert_hash}; -use runtime_primitives::traits::{As, Header as HeaderT, SimpleArithmetic, One}; +use primitives::{convert_hash, H256}; +use runtime_primitives::traits::{As, Header as HeaderT, One, SimpleArithmetic}; use state_machine::backend::InMemory as InMemoryState; -use state_machine::{MemoryDB, TrieBackend, Backend as StateBackend, - prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend}; +use state_machine::{ + prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend, + Backend as StateBackend, MemoryDB, TrieBackend, +}; use crate::error::{Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult}; @@ -43,211 +45,236 @@ pub const SIZE: u64 = 2048; /// Returns Some(cht_number) if CHT is need to be built when the block with given number is canonized. pub fn is_build_required(cht_size: u64, block_num: N) -> Option - where - N: Clone + SimpleArithmetic, +where + N: Clone + SimpleArithmetic, { - let block_cht_num = block_to_cht_number(cht_size, block_num.clone())?; - let two = N::one() + N::one(); - if block_cht_num < two { - return None; - } - let cht_start = start_number(cht_size, block_cht_num.clone()); - if cht_start != block_num { - return None; - } - - Some(block_cht_num - two) + let block_cht_num = block_to_cht_number(cht_size, block_num.clone())?; + let two = N::one() + N::one(); + if block_cht_num < two { + return None; + } + let cht_start = start_number(cht_size, block_cht_num.clone()); + if cht_start != block_num { + return None; + } + + Some(block_cht_num - two) } /// Compute a CHT root from an iterator of block hashes. Fails if shorter than /// SIZE items. The items are assumed to proceed sequentially from `start_number(cht_num)`. /// Discards the trie's nodes. pub fn compute_root( - cht_size: u64, - cht_num: Header::Number, - hashes: I, + cht_size: u64, + cht_num: Header::Number, + hashes: I, ) -> ClientResult - where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord, - I: IntoIterator>>, +where + Header: HeaderT, + Hasher: hash_db::Hasher, + Hasher::Out: Ord, + I: IntoIterator>>, { - Ok(trie::trie_root::( - build_pairs::(cht_size, cht_num, hashes)? - )) + Ok(trie::trie_root::( + build_pairs::(cht_size, cht_num, hashes)?, + )) } /// Build CHT-based header proof. pub fn build_proof( - cht_size: u64, - cht_num: Header::Number, - blocks: BlocksI, - hashes: HashesI + cht_size: u64, + cht_num: Header::Number, + blocks: BlocksI, + hashes: HashesI, ) -> ClientResult>> - where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + HeapSizeOf, - BlocksI: IntoIterator, - HashesI: IntoIterator>>, +where + Header: HeaderT, + Hasher: hash_db::Hasher, + Hasher::Out: Ord + HeapSizeOf, + BlocksI: IntoIterator, + HashesI: IntoIterator>>, { - let transaction = build_pairs::(cht_size, cht_num, hashes)? - .into_iter() - .map(|(k, v)| (None, k, Some(v))) - .collect::>(); - let storage = InMemoryState::::default().update(transaction); - let trie_storage = storage.try_into_trie_backend() - .expect("InMemoryState::try_into_trie_backend always returns Some; qed"); - let mut total_proof = HashSet::new(); - for block in blocks.into_iter() { - debug_assert_eq!(block_to_cht_number(cht_size, block), Some(cht_num)); - - let (value, proof) = prove_read_on_trie_backend(&trie_storage, &encode_cht_key(block))?; - assert!(value.is_some(), "we have just built trie that includes the value for block"); - total_proof.extend(proof); - } - Ok(total_proof.into_iter().collect()) + let transaction = build_pairs::(cht_size, cht_num, hashes)? + .into_iter() + .map(|(k, v)| (None, k, Some(v))) + .collect::>(); + let storage = InMemoryState::::default().update(transaction); + let trie_storage = storage + .try_into_trie_backend() + .expect("InMemoryState::try_into_trie_backend always returns Some; qed"); + let mut total_proof = HashSet::new(); + for block in blocks.into_iter() { + debug_assert_eq!(block_to_cht_number(cht_size, block), Some(cht_num)); + + let (value, proof) = prove_read_on_trie_backend(&trie_storage, &encode_cht_key(block))?; + assert!( + value.is_some(), + "we have just built trie that includes the value for block" + ); + total_proof.extend(proof); + } + Ok(total_proof.into_iter().collect()) } /// Check CHT-based header proof. pub fn check_proof( - local_root: Header::Hash, - local_number: Header::Number, - remote_hash: Header::Hash, - remote_proof: Vec> + local_root: Header::Hash, + local_number: Header::Number, + remote_hash: Header::Hash, + remote_proof: Vec>, ) -> ClientResult<()> - where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + HeapSizeOf, +where + Header: HeaderT, + Hasher: hash_db::Hasher, + Hasher::Out: Ord + HeapSizeOf, { - do_check_proof::(local_root, local_number, remote_hash, move |local_root, local_cht_key| - read_proof_check::(local_root, remote_proof, - local_cht_key).map_err(|e| ClientError::from(e))) + do_check_proof::( + local_root, + local_number, + remote_hash, + move |local_root, local_cht_key| { + read_proof_check::(local_root, remote_proof, local_cht_key) + .map_err(|e| ClientError::from(e)) + }, + ) } /// Check CHT-based header proof on pre-created proving backend. pub fn check_proof_on_proving_backend( - local_root: Header::Hash, - local_number: Header::Number, - remote_hash: Header::Hash, - proving_backend: &TrieBackend, Hasher>, + local_root: Header::Hash, + local_number: Header::Number, + remote_hash: Header::Hash, + proving_backend: &TrieBackend, Hasher>, ) -> ClientResult<()> - where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + HeapSizeOf, +where + Header: HeaderT, + Hasher: hash_db::Hasher, + Hasher::Out: Ord + HeapSizeOf, { - do_check_proof::(local_root, local_number, remote_hash, |_, local_cht_key| - read_proof_check_on_proving_backend::( - proving_backend, local_cht_key).map_err(|e| ClientError::from(e))) + do_check_proof::( + local_root, + local_number, + remote_hash, + |_, local_cht_key| { + read_proof_check_on_proving_backend::(proving_backend, local_cht_key) + .map_err(|e| ClientError::from(e)) + }, + ) } /// Check CHT-based header proof using passed checker function. fn do_check_proof( - local_root: Header::Hash, - local_number: Header::Number, - remote_hash: Header::Hash, - checker: F, + local_root: Header::Hash, + local_number: Header::Number, + remote_hash: Header::Hash, + checker: F, ) -> ClientResult<()> - where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + HeapSizeOf, - F: FnOnce(Hasher::Out, &[u8]) -> ClientResult>>, +where + Header: HeaderT, + Hasher: hash_db::Hasher, + Hasher::Out: Ord + HeapSizeOf, + F: FnOnce(Hasher::Out, &[u8]) -> ClientResult>>, { - let root: Hasher::Out = convert_hash(&local_root); - let local_cht_key = encode_cht_key(local_number); - let local_cht_value = checker(root, &local_cht_key)?; - let local_cht_value = local_cht_value.ok_or_else(|| ClientErrorKind::InvalidCHTProof)?; - let local_hash = decode_cht_value(&local_cht_value).ok_or_else(|| ClientErrorKind::InvalidCHTProof)?; - match &local_hash[..] == remote_hash.as_ref() { - true => Ok(()), - false => Err(ClientErrorKind::InvalidCHTProof.into()), - } - + let root: Hasher::Out = convert_hash(&local_root); + let local_cht_key = encode_cht_key(local_number); + let local_cht_value = checker(root, &local_cht_key)?; + let local_cht_value = local_cht_value.ok_or_else(|| ClientErrorKind::InvalidCHTProof)?; + let local_hash = + decode_cht_value(&local_cht_value).ok_or_else(|| ClientErrorKind::InvalidCHTProof)?; + match &local_hash[..] == remote_hash.as_ref() { + true => Ok(()), + false => Err(ClientErrorKind::InvalidCHTProof.into()), + } } /// Group ordered blocks by CHT number and call functor with blocks of each group. pub fn for_each_cht_group( - cht_size: u64, - blocks: I, - mut functor: F, - mut functor_param: P, + cht_size: u64, + blocks: I, + mut functor: F, + mut functor_param: P, ) -> ClientResult<()> - where - Header: HeaderT, - I: IntoIterator, - F: FnMut(P, Header::Number, Vec) -> ClientResult

, +where + Header: HeaderT, + I: IntoIterator, + F: FnMut(P, Header::Number, Vec) -> ClientResult

, { - let mut current_cht_num = None; - let mut current_cht_blocks = Vec::new(); - for block in blocks { - let new_cht_num = match block_to_cht_number(cht_size, block.as_()) { - Some(new_cht_num) => new_cht_num, - None => return Err(ClientErrorKind::Backend(format!( - "Cannot compute CHT root for the block #{}", block)).into() - ), - }; - - let advance_to_next_cht = current_cht_num.is_some() && current_cht_num != Some(new_cht_num); - if advance_to_next_cht { - let current_cht_num = current_cht_num.expect("advance_to_next_cht is true; - it is true only when current_cht_num is Some; qed"); - assert!(new_cht_num > current_cht_num, "for_each_cht_group only supports ordered iterators"); - - functor_param = functor( - functor_param, - As::sa(current_cht_num), - ::std::mem::replace(&mut current_cht_blocks, Vec::new()), - )?; - } - - current_cht_blocks.push(block); - current_cht_num = Some(new_cht_num); - } - - if let Some(current_cht_num) = current_cht_num { - functor( - functor_param, - As::sa(current_cht_num), - ::std::mem::replace(&mut current_cht_blocks, Vec::new()), - )?; - } - - Ok(()) + let mut current_cht_num = None; + let mut current_cht_blocks = Vec::new(); + for block in blocks { + let new_cht_num = match block_to_cht_number(cht_size, block.as_()) { + Some(new_cht_num) => new_cht_num, + None => { + return Err(ClientErrorKind::Backend(format!( + "Cannot compute CHT root for the block #{}", + block + )) + .into()); + } + }; + + let advance_to_next_cht = current_cht_num.is_some() && current_cht_num != Some(new_cht_num); + if advance_to_next_cht { + let current_cht_num = current_cht_num.expect( + "advance_to_next_cht is true; + it is true only when current_cht_num is Some; qed", + ); + assert!( + new_cht_num > current_cht_num, + "for_each_cht_group only supports ordered iterators" + ); + + functor_param = functor( + functor_param, + As::sa(current_cht_num), + ::std::mem::replace(&mut current_cht_blocks, Vec::new()), + )?; + } + + current_cht_blocks.push(block); + current_cht_num = Some(new_cht_num); + } + + if let Some(current_cht_num) = current_cht_num { + functor( + functor_param, + As::sa(current_cht_num), + ::std::mem::replace(&mut current_cht_blocks, Vec::new()), + )?; + } + + Ok(()) } /// Build pairs for computing CHT. fn build_pairs( - cht_size: u64, - cht_num: Header::Number, - hashes: I + cht_size: u64, + cht_num: Header::Number, + hashes: I, ) -> ClientResult, Vec)>> - where - Header: HeaderT, - I: IntoIterator>>, +where + Header: HeaderT, + I: IntoIterator>>, { - let start_num = start_number(cht_size, cht_num); - let mut pairs = Vec::new(); - let mut hash_number = start_num; - for hash in hashes.into_iter().take(cht_size as usize) { - let hash = hash?.ok_or_else(|| ClientError::from( - ClientErrorKind::MissingHashRequiredForCHT(cht_num.as_(), hash_number.as_()) - ))?; - pairs.push(( - encode_cht_key(hash_number).to_vec(), - encode_cht_value(hash) - )); - hash_number += Header::Number::one(); - } - - if pairs.len() as u64 == cht_size { - Ok(pairs) - } else { - Err(ClientErrorKind::MissingHashRequiredForCHT(cht_num.as_(), hash_number.as_()).into()) - } + let start_num = start_number(cht_size, cht_num); + let mut pairs = Vec::new(); + let mut hash_number = start_num; + for hash in hashes.into_iter().take(cht_size as usize) { + let hash = hash?.ok_or_else(|| { + ClientError::from(ClientErrorKind::MissingHashRequiredForCHT( + cht_num.as_(), + hash_number.as_(), + )) + })?; + pairs.push((encode_cht_key(hash_number).to_vec(), encode_cht_value(hash))); + hash_number += Header::Number::one(); + } + + if pairs.len() as u64 == cht_size { + Ok(pairs) + } else { + Err(ClientErrorKind::MissingHashRequiredForCHT(cht_num.as_(), hash_number.as_()).into()) + } } /// Get the starting block of a given CHT. @@ -257,140 +284,177 @@ fn build_pairs( /// This is because the genesis hash is assumed to be known /// and including it would be redundant. pub fn start_number(cht_size: u64, cht_num: N) -> N { - (cht_num * As::sa(cht_size)) + N::one() + (cht_num * As::sa(cht_size)) + N::one() } /// Get the ending block of a given CHT. pub fn end_number(cht_size: u64, cht_num: N) -> N { - (cht_num + N::one()) * As::sa(cht_size) + (cht_num + N::one()) * As::sa(cht_size) } /// Convert a block number to a CHT number. /// Returns `None` for `block_num` == 0, `Some` otherwise. pub fn block_to_cht_number(cht_size: u64, block_num: N) -> Option { - if block_num == N::zero() { - None - } else { - Some((block_num - N::one()) / As::sa(cht_size)) - } + if block_num == N::zero() { + None + } else { + Some((block_num - N::one()) / As::sa(cht_size)) + } } /// Convert header number into CHT key. pub fn encode_cht_key>(number: N) -> Vec { - let number: u64 = number.as_(); - vec![ - (number >> 56) as u8, - ((number >> 48) & 0xff) as u8, - ((number >> 40) & 0xff) as u8, - ((number >> 32) & 0xff) as u8, - ((number >> 24) & 0xff) as u8, - ((number >> 16) & 0xff) as u8, - ((number >> 8) & 0xff) as u8, - (number & 0xff) as u8 - ] + let number: u64 = number.as_(); + vec![ + (number >> 56) as u8, + ((number >> 48) & 0xff) as u8, + ((number >> 40) & 0xff) as u8, + ((number >> 32) & 0xff) as u8, + ((number >> 24) & 0xff) as u8, + ((number >> 16) & 0xff) as u8, + ((number >> 8) & 0xff) as u8, + (number & 0xff) as u8, + ] } /// Convert header hash into CHT value. fn encode_cht_value>(hash: Hash) -> Vec { - hash.as_ref().to_vec() + hash.as_ref().to_vec() } /// Convert CHT value into block header hash. pub fn decode_cht_value(value: &[u8]) -> Option { - match value.len() { - 32 => Some(H256::from_slice(&value[0..32])), - _ => None, - } - + match value.len() { + 32 => Some(H256::from_slice(&value[0..32])), + _ => None, + } } #[cfg(test)] mod tests { - use primitives::{Blake2Hasher}; - use test_client::runtime::Header; - use super::*; - - #[test] - fn is_build_required_works() { - assert_eq!(is_build_required(SIZE, 0u64), None); - assert_eq!(is_build_required(SIZE, 1u64), None); - assert_eq!(is_build_required(SIZE, SIZE), None); - assert_eq!(is_build_required(SIZE, SIZE + 1), None); - assert_eq!(is_build_required(SIZE, 2 * SIZE), None); - assert_eq!(is_build_required(SIZE, 2 * SIZE + 1), Some(0)); - assert_eq!(is_build_required(SIZE, 3 * SIZE), None); - assert_eq!(is_build_required(SIZE, 3 * SIZE + 1), Some(1)); - } - - #[test] - fn start_number_works() { - assert_eq!(start_number(SIZE, 0u64), 1u64); - assert_eq!(start_number(SIZE, 1u64), SIZE + 1); - assert_eq!(start_number(SIZE, 2u64), SIZE + SIZE + 1); - } - - #[test] - fn end_number_works() { - assert_eq!(end_number(SIZE, 0u64), SIZE); - assert_eq!(end_number(SIZE, 1u64), SIZE + SIZE); - assert_eq!(end_number(SIZE, 2u64), SIZE + SIZE + SIZE); - } - - #[test] - fn build_pairs_fails_when_no_enough_blocks() { - assert!(build_pairs::(SIZE, 0, - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize / 2)).is_err()); - } - - #[test] - fn build_pairs_fails_when_missing_block() { - assert!(build_pairs::(SIZE, 0, ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize / 2) - .chain(::std::iter::once(Ok(None))) - .chain(::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(2)))).take(SIZE as usize / 2 - 1))).is_err()); - } - - #[test] - fn compute_root_works() { - assert!(compute_root::(SIZE, 42, - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize)).is_ok()); - } - - #[test] - #[should_panic] - fn build_proof_panics_when_querying_wrong_block() { - assert!(build_proof::( - SIZE, 0, vec![(SIZE * 1000) as u64], - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize)).is_err()); - } - - #[test] - fn build_proof_works() { - assert!(build_proof::( - SIZE, 0, vec![(SIZE / 2) as u64], - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize)).is_ok()); - } - - #[test] - #[should_panic] - fn for_each_cht_group_panics() { - let _ = for_each_cht_group::(SIZE, vec![SIZE * 5, SIZE * 2], |_, _, _| Ok(()), ()); - } - - #[test] - fn for_each_cht_group_works() { - let _ = for_each_cht_group::(SIZE, vec![ - SIZE * 2 + 1, SIZE * 2 + 2, SIZE * 2 + 5, - SIZE * 4 + 1, SIZE * 4 + 7, - SIZE * 6 + 1 - ], |_, cht_num, blocks| { - match cht_num { - 2 => assert_eq!(blocks, vec![SIZE * 2 + 1, SIZE * 2 + 2, SIZE * 2 + 5]), - 4 => assert_eq!(blocks, vec![SIZE * 4 + 1, SIZE * 4 + 7]), - 6 => assert_eq!(blocks, vec![SIZE * 6 + 1]), - _ => unreachable!(), - } - - Ok(()) - }, ()); - } + use super::*; + use primitives::Blake2Hasher; + use test_client::runtime::Header; + + #[test] + fn is_build_required_works() { + assert_eq!(is_build_required(SIZE, 0u64), None); + assert_eq!(is_build_required(SIZE, 1u64), None); + assert_eq!(is_build_required(SIZE, SIZE), None); + assert_eq!(is_build_required(SIZE, SIZE + 1), None); + assert_eq!(is_build_required(SIZE, 2 * SIZE), None); + assert_eq!(is_build_required(SIZE, 2 * SIZE + 1), Some(0)); + assert_eq!(is_build_required(SIZE, 3 * SIZE), None); + assert_eq!(is_build_required(SIZE, 3 * SIZE + 1), Some(1)); + } + + #[test] + fn start_number_works() { + assert_eq!(start_number(SIZE, 0u64), 1u64); + assert_eq!(start_number(SIZE, 1u64), SIZE + 1); + assert_eq!(start_number(SIZE, 2u64), SIZE + SIZE + 1); + } + + #[test] + fn end_number_works() { + assert_eq!(end_number(SIZE, 0u64), SIZE); + assert_eq!(end_number(SIZE, 1u64), SIZE + SIZE); + assert_eq!(end_number(SIZE, 2u64), SIZE + SIZE + SIZE); + } + + #[test] + fn build_pairs_fails_when_no_enough_blocks() { + assert!(build_pairs::( + SIZE, + 0, + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize / 2) + ) + .is_err()); + } + + #[test] + fn build_pairs_fails_when_missing_block() { + assert!(build_pairs::( + SIZE, + 0, + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))) + .take(SIZE as usize / 2) + .chain(::std::iter::once(Ok(None))) + .chain( + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(2)))) + .take(SIZE as usize / 2 - 1) + ) + ) + .is_err()); + } + + #[test] + fn compute_root_works() { + assert!(compute_root::( + SIZE, + 42, + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) + ) + .is_ok()); + } + + #[test] + #[should_panic] + fn build_proof_panics_when_querying_wrong_block() { + assert!(build_proof::( + SIZE, + 0, + vec![(SIZE * 1000) as u64], + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) + ) + .is_err()); + } + + #[test] + fn build_proof_works() { + assert!(build_proof::( + SIZE, + 0, + vec![(SIZE / 2) as u64], + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) + ) + .is_ok()); + } + + #[test] + #[should_panic] + fn for_each_cht_group_panics() { + let _ = for_each_cht_group::( + SIZE, + vec![SIZE * 5, SIZE * 2], + |_, _, _| Ok(()), + (), + ); + } + + #[test] + fn for_each_cht_group_works() { + let _ = for_each_cht_group::( + SIZE, + vec![ + SIZE * 2 + 1, + SIZE * 2 + 2, + SIZE * 2 + 5, + SIZE * 4 + 1, + SIZE * 4 + 7, + SIZE * 6 + 1, + ], + |_, cht_num, blocks| { + match cht_num { + 2 => assert_eq!(blocks, vec![SIZE * 2 + 1, SIZE * 2 + 2, SIZE * 2 + 5]), + 4 => assert_eq!(blocks, vec![SIZE * 4 + 1, SIZE * 4 + 7]), + 6 => assert_eq!(blocks, vec![SIZE * 6 + 1]), + _ => unreachable!(), + } + + Ok(()) + }, + (), + ); + } } diff --git a/core/client/src/client.rs b/core/client/src/client.rs index 041933cfff..6303bb6290 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -16,56 +16,63 @@ //! Substrate Client -use std::{marker::PhantomData, collections::{HashSet, BTreeMap, HashMap}, sync::Arc, panic::UnwindSafe, result}; use crate::error::Error; +use crate::runtime_api::{CallRuntimeAt, ConstructRuntimeApi}; +use consensus::{ + well_known_cache_keys::Id as CacheKeyId, BlockOrigin, Error as ConsensusError, + ErrorKind as ConsensusErrorKind, ForkChoiceStrategy, ImportBlock, ImportResult, +}; use futures::sync::mpsc; +use hash_db::Hasher; +use parity_codec::{Decode, Encode}; use parking_lot::{Mutex, RwLock}; +use primitives::storage::well_known_keys; +use primitives::storage::{StorageData, StorageKey}; use primitives::NativeOrEncoded; -use runtime_primitives::{ - Justification, - generic::{BlockId, SignedBlock}, -}; -use consensus::{ - Error as ConsensusError, ErrorKind as ConsensusErrorKind, ImportBlock, ImportResult, - BlockOrigin, ForkChoiceStrategy, well_known_cache_keys::Id as CacheKeyId, +use primitives::{ + convert_hash, Blake2Hasher, ChangesTrieConfiguration, ExecutionContext, NeverNativeValue, H256, }; use runtime_primitives::traits::{ - Block as BlockT, Header as HeaderT, Zero, As, NumberFor, CurrentHeight, BlockNumberToHash, - ApiRef, ProvideRuntimeApi, Digest, DigestItem + ApiRef, As, Block as BlockT, BlockNumberToHash, CurrentHeight, Digest, DigestItem, + Header as HeaderT, NumberFor, ProvideRuntimeApi, Zero, }; use runtime_primitives::BuildStorage; -use crate::runtime_api::{CallRuntimeAt, ConstructRuntimeApi}; -use primitives::{Blake2Hasher, H256, ChangesTrieConfiguration, convert_hash, NeverNativeValue, ExecutionContext}; -use primitives::storage::{StorageKey, StorageData}; -use primitives::storage::well_known_keys; -use parity_codec::{Encode, Decode}; +use runtime_primitives::{ + generic::{BlockId, SignedBlock}, + Justification, +}; use state_machine::{ - DBValue, Backend as StateBackend, CodeExecutor, ChangesTrieAnchorBlockId, - ExecutionStrategy, ExecutionManager, prove_read, - ChangesTrieRootsStorage, ChangesTrieStorage, - key_changes, key_changes_proof, OverlayedChanges, NeverOffchainExt, + key_changes, key_changes_proof, prove_read, Backend as StateBackend, ChangesTrieAnchorBlockId, + ChangesTrieRootsStorage, ChangesTrieStorage, CodeExecutor, DBValue, ExecutionManager, + ExecutionStrategy, NeverOffchainExt, OverlayedChanges, +}; +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + marker::PhantomData, + panic::UnwindSafe, + result, + sync::Arc, }; -use hash_db::Hasher; use crate::backend::{self, BlockImportOperation, PrunableStateChangesTrieStorage}; +use crate::block_builder::{self, api::BlockBuilder as BlockBuilderAPI}; use crate::blockchain::{ - self, Info as ChainInfo, Backend as ChainBackend, HeaderBackend as ChainHeaderBackend, - ProvideCache, Cache, + self, Backend as ChainBackend, Cache, HeaderBackend as ChainHeaderBackend, Info as ChainInfo, + ProvideCache, }; use crate::call_executor::{CallExecutor, LocalCallExecutor}; -use executor::{RuntimeVersion, RuntimeInfo}; -use crate::notifications::{StorageNotifications, StorageEventStream}; -use crate::light::{call_executor::prove_execution, fetcher::ChangesProof}; use crate::cht; use crate::error::{self, ErrorKind}; -use crate::in_mem; -use crate::block_builder::{self, api::BlockBuilder as BlockBuilderAPI}; use crate::genesis; +use crate::in_mem; +use crate::light::{call_executor::prove_execution, fetcher::ChangesProof}; +use crate::notifications::{StorageEventStream, StorageNotifications}; use consensus; +use executor::{RuntimeInfo, RuntimeVersion}; use substrate_telemetry::{telemetry, SUBSTRATE_INFO}; -use log::{info, trace, warn}; use error_chain::bail; +use log::{info, trace, warn}; /// Type that implements `futures::Stream` of block import events. pub type ImportNotifications = mpsc::UnboundedReceiver>; @@ -73,2083 +80,2713 @@ pub type ImportNotifications = mpsc::UnboundedReceiver = mpsc::UnboundedReceiver>; -type StorageUpdate = <<>::BlockImportOperation as BlockImportOperation>::State as state_machine::Backend>::Transaction; +type StorageUpdate = + <<>::BlockImportOperation as BlockImportOperation< + Block, + Blake2Hasher, + >>::State as state_machine::Backend>::Transaction; type ChangesUpdate = trie::MemoryDB; /// Execution strategies settings. #[derive(Debug, Clone)] pub struct ExecutionStrategies { - /// Execution strategy used when syncing. - pub syncing: ExecutionStrategy, - /// Execution strategy used when importing blocks. - pub importing: ExecutionStrategy, - /// Execution strategy used when constructing blocks. - pub block_construction: ExecutionStrategy, - /// Execution strategy used for offchain workers. - pub offchain_worker: ExecutionStrategy, - /// Execution strategy used in other cases. - pub other: ExecutionStrategy, + /// Execution strategy used when syncing. + pub syncing: ExecutionStrategy, + /// Execution strategy used when importing blocks. + pub importing: ExecutionStrategy, + /// Execution strategy used when constructing blocks. + pub block_construction: ExecutionStrategy, + /// Execution strategy used for offchain workers. + pub offchain_worker: ExecutionStrategy, + /// Execution strategy used in other cases. + pub other: ExecutionStrategy, } impl Default for ExecutionStrategies { - fn default() -> ExecutionStrategies { - ExecutionStrategies { - syncing: ExecutionStrategy::NativeElseWasm, - importing: ExecutionStrategy::NativeElseWasm, - block_construction: ExecutionStrategy::AlwaysWasm, - offchain_worker: ExecutionStrategy::NativeWhenPossible, - other: ExecutionStrategy::NativeElseWasm, - } - } + fn default() -> ExecutionStrategies { + ExecutionStrategies { + syncing: ExecutionStrategy::NativeElseWasm, + importing: ExecutionStrategy::NativeElseWasm, + block_construction: ExecutionStrategy::AlwaysWasm, + offchain_worker: ExecutionStrategy::NativeWhenPossible, + other: ExecutionStrategy::NativeElseWasm, + } + } } /// Substrate Client -pub struct Client where Block: BlockT { - backend: Arc, - executor: E, - storage_notifications: Mutex>, - import_notification_sinks: Mutex>>>, - finality_notification_sinks: Mutex>>>, - import_lock: Mutex<()>, - // holds the block hash currently being imported. TODO: replace this with block queue - importing_block: RwLock>, - execution_strategies: ExecutionStrategies, - _phantom: PhantomData, +pub struct Client +where + Block: BlockT, +{ + backend: Arc, + executor: E, + storage_notifications: Mutex>, + import_notification_sinks: Mutex>>>, + finality_notification_sinks: Mutex>>>, + import_lock: Mutex<()>, + // holds the block hash currently being imported. TODO: replace this with block queue + importing_block: RwLock>, + execution_strategies: ExecutionStrategies, + _phantom: PhantomData, } /// Client import operation, a wrapper for the backend. -pub struct ClientImportOperation, B: backend::Backend> { - op: B::BlockImportOperation, - notify_imported: Option<(Block::Hash, BlockOrigin, Block::Header, bool, Option, Option>)>>)>, - notify_finalized: Vec, +pub struct ClientImportOperation< + Block: BlockT, + H: Hasher, + B: backend::Backend, +> { + op: B::BlockImportOperation, + notify_imported: Option<( + Block::Hash, + BlockOrigin, + Block::Header, + bool, + Option, Option>)>>, + )>, + notify_finalized: Vec, } /// A source of blockchain events. pub trait BlockchainEvents { - /// Get block import event stream. Not guaranteed to be fired for every - /// imported block. - fn import_notification_stream(&self) -> ImportNotifications; - - /// Get a stream of finality notifications. Not guaranteed to be fired for every - /// finalized block. - fn finality_notification_stream(&self) -> FinalityNotifications; - - /// Get storage changes event stream. - /// - /// Passing `None` as `filter_keys` subscribes to all storage changes. - fn storage_changes_notification_stream(&self, filter_keys: Option<&[StorageKey]>) -> error::Result>; + /// Get block import event stream. Not guaranteed to be fired for every + /// imported block. + fn import_notification_stream(&self) -> ImportNotifications; + + /// Get a stream of finality notifications. Not guaranteed to be fired for every + /// finalized block. + fn finality_notification_stream(&self) -> FinalityNotifications; + + /// Get storage changes event stream. + /// + /// Passing `None` as `filter_keys` subscribes to all storage changes. + fn storage_changes_notification_stream( + &self, + filter_keys: Option<&[StorageKey]>, + ) -> error::Result>; } /// Chain head information. pub trait ChainHead { - /// Get best block header. - fn best_block_header(&self) -> Result<::Header, error::Error>; - /// Get all leaves of the chain: block hashes that have no children currently. - /// Leaves that can never be finalized will not be returned. - fn leaves(&self) -> Result::Hash>, error::Error>; + /// Get best block header. + fn best_block_header(&self) -> Result<::Header, error::Error>; + /// Get all leaves of the chain: block hashes that have no children currently. + /// Leaves that can never be finalized will not be returned. + fn leaves(&self) -> Result::Hash>, error::Error>; } /// Fetch block body by ID. pub trait BlockBody { - /// Get block body by ID. Returns `None` if the body is not stored. - fn block_body(&self, id: &BlockId) -> error::Result::Extrinsic>>>; + /// Get block body by ID. Returns `None` if the body is not stored. + fn block_body( + &self, + id: &BlockId, + ) -> error::Result::Extrinsic>>>; } /// Client info #[derive(Debug)] pub struct ClientInfo { - /// Best block hash. - pub chain: ChainInfo, - /// Best block number in the queue. - pub best_queued_number: Option<<::Header as HeaderT>::Number>, - /// Best queued block hash. - pub best_queued_hash: Option, + /// Best block hash. + pub chain: ChainInfo, + /// Best block number in the queue. + pub best_queued_number: Option<<::Header as HeaderT>::Number>, + /// Best queued block hash. + pub best_queued_hash: Option, } /// Block status. #[derive(Debug, PartialEq, Eq)] pub enum BlockStatus { - /// Added to the import queue. - Queued, - /// Already in the blockchain and the state is available. - InChainWithState, - /// In the blockchain, but the state is not available. - InChainPruned, - /// Block or parent is known to be bad. - KnownBad, - /// Not in the queue or the blockchain. - Unknown, + /// Added to the import queue. + Queued, + /// Already in the blockchain and the state is available. + InChainWithState, + /// In the blockchain, but the state is not available. + InChainPruned, + /// Block or parent is known to be bad. + KnownBad, + /// Not in the queue or the blockchain. + Unknown, } /// Summary of an imported block #[derive(Clone, Debug)] pub struct BlockImportNotification { - /// Imported block header hash. - pub hash: Block::Hash, - /// Imported block origin. - pub origin: BlockOrigin, - /// Imported block header. - pub header: Block::Header, - /// Is this the new best block. - pub is_new_best: bool, + /// Imported block header hash. + pub hash: Block::Hash, + /// Imported block origin. + pub origin: BlockOrigin, + /// Imported block header. + pub header: Block::Header, + /// Is this the new best block. + pub is_new_best: bool, } /// Summary of a finalized block. #[derive(Clone, Debug)] pub struct FinalityNotification { - /// Imported block header hash. - pub hash: Block::Hash, - /// Imported block header. - pub header: Block::Header, + /// Imported block header hash. + pub hash: Block::Hash, + /// Imported block header. + pub header: Block::Header, } // used in importing a block, where additional changes are made after the runtime // executed. enum PrePostHeader { - // they are the same: no post-runtime digest items. - Same(H), - // different headers (pre, post). - Different(H, H), + // they are the same: no post-runtime digest items. + Same(H), + // different headers (pre, post). + Different(H, H), } impl PrePostHeader { - // get a reference to the "pre-header" -- the header as it should be just after the runtime. - fn pre(&self) -> &H { - match *self { - PrePostHeader::Same(ref h) => h, - PrePostHeader::Different(ref h, _) => h, - } - } - - // get a reference to the "post-header" -- the header as it should be after all changes are applied. - fn post(&self) -> &H { - match *self { - PrePostHeader::Same(ref h) => h, - PrePostHeader::Different(_, ref h) => h, - } - } - - // convert to the "post-header" -- the header as it should be after all changes are applied. - fn into_post(self) -> H { - match self { - PrePostHeader::Same(h) => h, - PrePostHeader::Different(_, h) => h, - } - } + // get a reference to the "pre-header" -- the header as it should be just after the runtime. + fn pre(&self) -> &H { + match *self { + PrePostHeader::Same(ref h) => h, + PrePostHeader::Different(ref h, _) => h, + } + } + + // get a reference to the "post-header" -- the header as it should be after all changes are applied. + fn post(&self) -> &H { + match *self { + PrePostHeader::Same(ref h) => h, + PrePostHeader::Different(_, ref h) => h, + } + } + + // convert to the "post-header" -- the header as it should be after all changes are applied. + fn into_post(self) -> H { + match self { + PrePostHeader::Same(h) => h, + PrePostHeader::Different(_, h) => h, + } + } } /// Create an instance of in-memory client. pub fn new_in_mem( - executor: E, - genesis_storage: S, -) -> error::Result, LocalCallExecutor, E>, Block, RA>> - where - E: CodeExecutor + RuntimeInfo, - S: BuildStorage, - Block: BlockT, + executor: E, + genesis_storage: S, +) -> error::Result< + Client< + in_mem::Backend, + LocalCallExecutor, E>, + Block, + RA, + >, +> +where + E: CodeExecutor + RuntimeInfo, + S: BuildStorage, + Block: BlockT, { - new_with_backend(Arc::new(in_mem::Backend::new()), executor, genesis_storage) + new_with_backend(Arc::new(in_mem::Backend::new()), executor, genesis_storage) } /// Create a client with the explicitely provided backend. /// This is useful for testing backend implementations. pub fn new_with_backend( - backend: Arc, - executor: E, - build_genesis_storage: S, + backend: Arc, + executor: E, + build_genesis_storage: S, ) -> error::Result, Block, RA>> - where - E: CodeExecutor + RuntimeInfo, - S: BuildStorage, - Block: BlockT, - B: backend::LocalBackend +where + E: CodeExecutor + RuntimeInfo, + S: BuildStorage, + Block: BlockT, + B: backend::LocalBackend, { - let call_executor = LocalCallExecutor::new(backend.clone(), executor); - Client::new(backend, call_executor, build_genesis_storage, Default::default()) + let call_executor = LocalCallExecutor::new(backend.clone(), executor); + Client::new( + backend, + call_executor, + build_genesis_storage, + Default::default(), + ) } -impl Client where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, +impl Client +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { - /// Creates new Substrate Client with given blockchain and code executor. - pub fn new( - backend: Arc, - executor: E, - build_genesis_storage: S, - execution_strategies: ExecutionStrategies - ) -> error::Result { - if backend.blockchain().header(BlockId::Number(Zero::zero()))?.is_none() { - let (genesis_storage, children_genesis_storage) = build_genesis_storage.build_storage()?; - let mut op = backend.begin_operation()?; - backend.begin_state_operation(&mut op, BlockId::Hash(Default::default()))?; - let state_root = op.reset_storage(genesis_storage, children_genesis_storage)?; - let genesis_block = genesis::construct_genesis_block::(state_root.into()); - info!("Initializing Genesis block/state (state: {}, header-hash: {})", genesis_block.header().state_root(), genesis_block.header().hash()); - op.set_block_data( - genesis_block.deconstruct().0, - Some(vec![]), - None, - crate::backend::NewBlockState::Final - )?; - backend.commit_operation(op)?; - } - - Ok(Client { - backend, - executor, - storage_notifications: Default::default(), - import_notification_sinks: Default::default(), - finality_notification_sinks: Default::default(), - import_lock: Default::default(), - importing_block: Default::default(), - execution_strategies, - _phantom: Default::default(), - }) - } - - /// Get a reference to the execution strategies. - pub fn execution_strategies(&self) -> &ExecutionStrategies { - &self.execution_strategies - } - - /// Get a reference to the state at a given block. - pub fn state_at(&self, block: &BlockId) -> error::Result { - self.backend.state_at(*block) - } - - /// Expose backend reference. To be used in tests only - pub fn backend(&self) -> &Arc { - &self.backend - } - - /// Return storage entry keys in state in a block of given hash with given prefix. - pub fn storage_keys(&self, id: &BlockId, key_prefix: &StorageKey) -> error::Result> { - let keys = self.state_at(id)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect(); - Ok(keys) - } - - /// Return single storage entry of contract under given address in state in a block of given hash. - pub fn storage(&self, id: &BlockId, key: &StorageKey) -> error::Result> { - Ok(self.state_at(id)? - .storage(&key.0).map_err(|e| error::Error::from_state(Box::new(e)))? - .map(StorageData)) - } - - /// Get the code at a given block. - pub fn code_at(&self, id: &BlockId) -> error::Result> { - Ok(self.storage(id, &StorageKey(well_known_keys::CODE.to_vec()))? + /// Creates new Substrate Client with given blockchain and code executor. + pub fn new( + backend: Arc, + executor: E, + build_genesis_storage: S, + execution_strategies: ExecutionStrategies, + ) -> error::Result { + if backend + .blockchain() + .header(BlockId::Number(Zero::zero()))? + .is_none() + { + let (genesis_storage, children_genesis_storage) = + build_genesis_storage.build_storage()?; + let mut op = backend.begin_operation()?; + backend.begin_state_operation(&mut op, BlockId::Hash(Default::default()))?; + let state_root = op.reset_storage(genesis_storage, children_genesis_storage)?; + let genesis_block = genesis::construct_genesis_block::(state_root.into()); + info!( + "Initializing Genesis block/state (state: {}, header-hash: {})", + genesis_block.header().state_root(), + genesis_block.header().hash() + ); + op.set_block_data( + genesis_block.deconstruct().0, + Some(vec![]), + None, + crate::backend::NewBlockState::Final, + )?; + backend.commit_operation(op)?; + } + + Ok(Client { + backend, + executor, + storage_notifications: Default::default(), + import_notification_sinks: Default::default(), + finality_notification_sinks: Default::default(), + import_lock: Default::default(), + importing_block: Default::default(), + execution_strategies, + _phantom: Default::default(), + }) + } + + /// Get a reference to the execution strategies. + pub fn execution_strategies(&self) -> &ExecutionStrategies { + &self.execution_strategies + } + + /// Get a reference to the state at a given block. + pub fn state_at(&self, block: &BlockId) -> error::Result { + self.backend.state_at(*block) + } + + /// Expose backend reference. To be used in tests only + pub fn backend(&self) -> &Arc { + &self.backend + } + + /// Return storage entry keys in state in a block of given hash with given prefix. + pub fn storage_keys( + &self, + id: &BlockId, + key_prefix: &StorageKey, + ) -> error::Result> { + let keys = self + .state_at(id)? + .keys(&key_prefix.0) + .into_iter() + .map(StorageKey) + .collect(); + Ok(keys) + } + + /// Return single storage entry of contract under given address in state in a block of given hash. + pub fn storage( + &self, + id: &BlockId, + key: &StorageKey, + ) -> error::Result> { + Ok(self + .state_at(id)? + .storage(&key.0) + .map_err(|e| error::Error::from_state(Box::new(e)))? + .map(StorageData)) + } + + /// Get the code at a given block. + pub fn code_at(&self, id: &BlockId) -> error::Result> { + Ok(self.storage(id, &StorageKey(well_known_keys::CODE.to_vec()))? .expect("None is returned if there's no value stored for the given key; ':code' key is always defined; qed").0) - } - - /// Get the RuntimeVersion at a given block. - pub fn runtime_version_at(&self, id: &BlockId) -> error::Result { - self.executor.runtime_version(id) - } - - /// Get call executor reference. - pub fn executor(&self) -> &E { - &self.executor - } - - /// Reads storage value at a given block + key, returning read proof. - pub fn read_proof(&self, id: &BlockId, key: &[u8]) -> error::Result>> { - self.state_at(id) - .and_then(|state| prove_read(state, key) - .map(|(_, proof)| proof) - .map_err(Into::into)) - } - - /// Execute a call to a contract on top of state in a block of given hash - /// AND returning execution proof. - /// - /// No changes are made. - pub fn execution_proof(&self, id: &BlockId, method: &str, call_data: &[u8]) -> error::Result<(Vec, Vec>)> { - let state = self.state_at(id)?; - let header = self.prepare_environment_block(id)?; - prove_execution(state, header, &self.executor, method, call_data) - } - - /// Reads given header and generates CHT-based header proof. - pub fn header_proof(&self, id: &BlockId) -> error::Result<(Block::Header, Vec>)> { - self.header_proof_with_cht_size(id, cht::SIZE) - } - - /// Get block hash by number. - pub fn block_hash(&self, block_number: <::Header as HeaderT>::Number) -> error::Result> { - self.backend.blockchain().hash(block_number) - } - - /// Reads given header and generates CHT-based header proof for CHT of given size. - pub fn header_proof_with_cht_size(&self, id: &BlockId, cht_size: u64) -> error::Result<(Block::Header, Vec>)> { - let proof_error = || error::ErrorKind::Backend(format!("Failed to generate header proof for {:?}", id)); - let header = self.backend.blockchain().expect_header(*id)?; - let block_num = *header.number(); - let cht_num = cht::block_to_cht_number(cht_size, block_num).ok_or_else(proof_error)?; - let cht_start = cht::start_number(cht_size, cht_num); - let headers = (cht_start.as_()..).map(|num| self.block_hash(As::sa(num))); - let proof = cht::build_proof::(cht_size, cht_num, ::std::iter::once(block_num), headers)?; - Ok((header, proof)) - } - - /// Get longest range within [first; last] that is possible to use in `key_changes` - /// and `key_changes_proof` calls. - /// Range could be shortened from the beginning if some changes tries have been pruned. - /// Returns Ok(None) if changes trues are not supported. - pub fn max_key_changes_range( - &self, - first: NumberFor, - last: BlockId, - ) -> error::Result, BlockId)>> { - let (config, storage) = match self.require_changes_trie().ok() { - Some((config, storage)) => (config, storage), - None => return Ok(None), - }; - let first = first.as_(); - let last_num = self.backend.blockchain().expect_block_number_from_id(&last)?.as_(); - if first > last_num { - return Err(error::ErrorKind::ChangesTrieAccessFailed("Invalid changes trie range".into()).into()); - } - let finalized_number = self.backend.blockchain().info()?.finalized_number; - let oldest = storage.oldest_changes_trie_block(&config, finalized_number.as_()); - let first = As::sa(::std::cmp::max(first, oldest)); - Ok(Some((first, last))) - } - - /// Get pairs of (block, extrinsic) where key has been changed at given blocks range. - /// Works only for runtimes that are supporting changes tries. - pub fn key_changes( - &self, - first: NumberFor, - last: BlockId, - key: &StorageKey - ) -> error::Result, u32)>> { - let (config, storage) = self.require_changes_trie()?; - let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?.as_(); - let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; - - key_changes::<_, Blake2Hasher>( - &config, - &*storage, - first.as_(), - &ChangesTrieAnchorBlockId { - hash: convert_hash(&last_hash), - number: last_number, - }, - self.backend.blockchain().info()?.best_number.as_(), - &key.0) - .and_then(|r| r.map(|r| r.map(|(block, tx)| (As::sa(block), tx))).collect::>()) - .map_err(|err| error::ErrorKind::ChangesTrieAccessFailed(err).into()) - } - - /// Get proof for computation of (block, extrinsic) pairs where key has been changed at given blocks range. - /// `min` is the hash of the first block, which changes trie root is known to the requester - when we're using - /// changes tries from ascendants of this block, we should provide proofs for changes tries roots - /// `max` is the hash of the last block known to the requester - we can't use changes tries from descendants - /// of this block. - /// Works only for runtimes that are supporting changes tries. - pub fn key_changes_proof( - &self, - first: Block::Hash, - last: Block::Hash, - min: Block::Hash, - max: Block::Hash, - key: &StorageKey - ) -> error::Result> { - self.key_changes_proof_with_cht_size( - first, - last, - min, - max, - key, - cht::SIZE, - ) - } - - /// Does the same work as `key_changes_proof`, but assumes that CHTs are of passed size. - pub fn key_changes_proof_with_cht_size( - &self, - first: Block::Hash, - last: Block::Hash, - min: Block::Hash, - max: Block::Hash, - key: &StorageKey, - cht_size: u64, - ) -> error::Result> { - struct AccessedRootsRecorder<'a, Block: BlockT> { - storage: &'a ChangesTrieStorage, - min: u64, - required_roots_proofs: Mutex, H256>>, - }; - - impl<'a, Block: BlockT> ChangesTrieRootsStorage for AccessedRootsRecorder<'a, Block> { - fn root(&self, anchor: &ChangesTrieAnchorBlockId, block: u64) -> Result, String> { - let root = self.storage.root(anchor, block)?; - if block < self.min { - if let Some(ref root) = root { - self.required_roots_proofs.lock().insert( - As::sa(block), - root.clone() - ); - } - } - Ok(root) - } - } - - impl<'a, Block: BlockT> ChangesTrieStorage for AccessedRootsRecorder<'a, Block> { - fn get(&self, key: &H256, prefix: &[u8]) -> Result, String> { - self.storage.get(key, prefix) - } - } - - let (config, storage) = self.require_changes_trie()?; - let min_number = self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(min))?; - - let recording_storage = AccessedRootsRecorder:: { - storage, - min: min_number.as_(), - required_roots_proofs: Mutex::new(BTreeMap::new()), - }; - - let max_number = ::std::cmp::min( - self.backend.blockchain().info()?.best_number, - self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(max))?, - ); - - // fetch key changes proof - let first_number = self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(first))?.as_(); - let last_number = self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(last))?.as_(); - let key_changes_proof = key_changes_proof::<_, Blake2Hasher>( - &config, - &recording_storage, - first_number, - &ChangesTrieAnchorBlockId { - hash: convert_hash(&last), - number: last_number, - }, - max_number.as_(), - &key.0 - ) - .map_err(|err| error::Error::from(error::ErrorKind::ChangesTrieAccessFailed(err)))?; - - // now gather proofs for all changes tries roots that were touched during key_changes_proof - // execution AND are unknown (i.e. replaced with CHT) to the requester - let roots = recording_storage.required_roots_proofs.into_inner(); - let roots_proof = self.changes_trie_roots_proof(cht_size, roots.keys().cloned())?; - - Ok(ChangesProof { - max_block: max_number, - proof: key_changes_proof, - roots: roots.into_iter().map(|(n, h)| (n, convert_hash(&h))).collect(), - roots_proof, - }) - } - - /// Generate CHT-based proof for roots of changes tries at given blocks. - fn changes_trie_roots_proof>>( - &self, - cht_size: u64, - blocks: I - ) -> error::Result>> { - // most probably we have touched several changes tries that are parts of the single CHT - // => GroupBy changes tries by CHT number and then gather proof for the whole group at once - let mut proof = HashSet::new(); - - cht::for_each_cht_group::(cht_size, blocks, |_, cht_num, cht_blocks| { - let cht_proof = self.changes_trie_roots_proof_at_cht(cht_size, cht_num, cht_blocks)?; - proof.extend(cht_proof); - Ok(()) - }, ())?; - - Ok(proof.into_iter().collect()) - } - - /// Generates CHT-based proof for roots of changes tries at given blocks (that are part of single CHT). - fn changes_trie_roots_proof_at_cht( - &self, - cht_size: u64, - cht_num: NumberFor, - blocks: Vec> - ) -> error::Result>> { - let cht_start = cht::start_number(cht_size, cht_num); - let roots = (cht_start.as_()..).map(|num| self.header(&BlockId::Number(As::sa(num))) - .map(|block| block.and_then(|block| block.digest().log(DigestItem::as_changes_trie_root).cloned()))); - let proof = cht::build_proof::(cht_size, cht_num, blocks, roots)?; - Ok(proof) - } - - /// Returns changes trie configuration and storage or an error if it is not supported. - fn require_changes_trie(&self) -> error::Result<(ChangesTrieConfiguration, &B::ChangesTrieStorage)> { - let config = self.changes_trie_config()?; - let storage = self.backend.changes_trie_storage(); - match (config, storage) { - (Some(config), Some(storage)) => Ok((config, storage)), - _ => Err(error::ErrorKind::ChangesTriesNotSupported.into()), - } - } - - /// Create a new block, built on the head of the chain. - pub fn new_block( - &self - ) -> error::Result> where - E: Clone + Send + Sync, - RA: Send + Sync, - Self: ProvideRuntimeApi, - ::Api: BlockBuilderAPI - { - block_builder::BlockBuilder::new(self) - } - - /// Create a new block, built on top of `parent`. - pub fn new_block_at( - &self, parent: &BlockId - ) -> error::Result> where - E: Clone + Send + Sync, - RA: Send + Sync, - Self: ProvideRuntimeApi, - ::Api: BlockBuilderAPI - { - block_builder::BlockBuilder::at_block(parent, &self) - } - - /// Lock the import lock, and run operations inside. - pub fn lock_import_and_run(&self, f: F) -> Result where - F: FnOnce(&mut ClientImportOperation) -> Result, - Err: From, - { - let inner = || { - let _import_lock = self.import_lock.lock(); - - let mut op = ClientImportOperation { - op: self.backend.begin_operation()?, - notify_imported: None, - notify_finalized: Vec::new(), - }; - - let r = f(&mut op)?; - - let ClientImportOperation { op, notify_imported, notify_finalized } = op; - self.backend.commit_operation(op)?; - self.notify_finalized(notify_finalized)?; - - if let Some(notify_imported) = notify_imported { - self.notify_imported(notify_imported)?; - } - - Ok(r) - }; - - let result = inner(); - *self.importing_block.write() = None; - - result - } - - /// Set a block as best block. - pub fn set_head( - &self, - id: BlockId - ) -> error::Result<()> { - self.lock_import_and_run(|operation| { - self.apply_head(operation, id) - }) - } - - /// Set a block as best block, and apply it to an operation. - pub fn apply_head( - &self, - operation: &mut ClientImportOperation, - id: BlockId, - ) -> error::Result<()> { - operation.op.mark_head(id) - } - - /// Apply a checked and validated block to an operation. If a justification is provided - /// then `finalized` *must* be true. - pub fn apply_block( - &self, - operation: &mut ClientImportOperation, - import_block: ImportBlock, - new_cache: HashMap>, - ) -> error::Result where - E: CallExecutor + Send + Sync + Clone, - { - use runtime_primitives::traits::Digest; - - let ImportBlock { - origin, - header, - justification, - post_digests, - body, - finalized, - auxiliary, - fork_choice, - } = import_block; - - assert!(justification.is_some() && finalized || justification.is_none()); - - let parent_hash = header.parent_hash().clone(); - - match self.backend.blockchain().status(BlockId::Hash(parent_hash))? { - blockchain::BlockStatus::InChain => {}, - blockchain::BlockStatus::Unknown => return Ok(ImportResult::UnknownParent), - } - - let import_headers = if post_digests.is_empty() { - PrePostHeader::Same(header) - } else { - let mut post_header = header.clone(); - for item in post_digests { - post_header.digest_mut().push(item); - } - PrePostHeader::Different(header, post_header) - }; - - let hash = import_headers.post().hash(); - let height: u64 = import_headers.post().number().as_(); - - *self.importing_block.write() = Some(hash); - - let result = self.execute_and_import_block( - operation, - origin, - hash, - import_headers, - justification, - body, - new_cache, - finalized, - auxiliary, - fork_choice, - ); - - telemetry!(SUBSTRATE_INFO; "block.import"; - "height" => height, - "best" => ?hash, - "origin" => ?origin - ); - - result - } - - fn execute_and_import_block( - &self, - operation: &mut ClientImportOperation, - origin: BlockOrigin, - hash: Block::Hash, - import_headers: PrePostHeader, - justification: Option, - body: Option>, - new_cache: HashMap>, - finalized: bool, - aux: Vec<(Vec, Option>)>, - fork_choice: ForkChoiceStrategy, - ) -> error::Result where - E: CallExecutor + Send + Sync + Clone, - { - let parent_hash = import_headers.post().parent_hash().clone(); - match self.backend.blockchain().status(BlockId::Hash(hash))? { - blockchain::BlockStatus::InChain => return Ok(ImportResult::AlreadyInChain), - blockchain::BlockStatus::Unknown => {}, - } - - let (last_best, last_best_number) = { - let info = self.backend.blockchain().info()?; - (info.best_hash, info.best_number) - }; - - // this is a fairly arbitrary choice of where to draw the line on making notifications, - // but the general goal is to only make notifications when we are already fully synced - // and get a new chain head. - let make_notifications = match origin { - BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => true, - BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false, - }; - - self.backend.begin_state_operation(&mut operation.op, BlockId::Hash(parent_hash))?; - - // ensure parent block is finalized to maintain invariant that - // finality is called sequentially. - if finalized { - self.apply_finality_with_block_hash(operation, parent_hash, None, last_best, make_notifications)?; - } - - // FIXME #1232: correct path logic for when to execute this function - let (storage_update,changes_update,storage_changes) = self.block_execution(&operation.op, &import_headers, origin, hash, body.clone())?; - - let is_new_best = finalized || match fork_choice { - ForkChoiceStrategy::LongestChain => import_headers.post().number() > &last_best_number, - ForkChoiceStrategy::Custom(v) => v, - }; - let leaf_state = if finalized { - crate::backend::NewBlockState::Final - } else if is_new_best { - crate::backend::NewBlockState::Best - } else { - crate::backend::NewBlockState::Normal - }; - - trace!("Imported {}, (#{}), best={}, origin={:?}", hash, import_headers.post().number(), is_new_best, origin); - - operation.op.set_block_data( - import_headers.post().clone(), - body, - justification, - leaf_state, - )?; - - operation.op.update_cache(new_cache); - if let Some(storage_update) = storage_update { - operation.op.update_db_storage(storage_update)?; - } - if let Some(storage_changes) = storage_changes.clone() { - operation.op.update_storage(storage_changes)?; - } - if let Some(Some(changes_update)) = changes_update { - operation.op.update_changes_trie(changes_update)?; - } - - operation.op.insert_aux(aux)?; - - if make_notifications { - if finalized { - operation.notify_finalized.push(hash); - } - - operation.notify_imported = Some((hash, origin, import_headers.into_post(), is_new_best, storage_changes)); - } - - Ok(ImportResult::imported()) - } - - fn block_execution( - &self, - transaction: &B::BlockImportOperation, - import_headers: &PrePostHeader, - origin: BlockOrigin, - hash: Block::Hash, - body: Option>, - ) -> error::Result<( - Option>, - Option>, - Option, Option>)>>, - )> - where - E: CallExecutor + Send + Sync + Clone, - { - match transaction.state()? { - Some(transaction_state) => { - let mut overlay = Default::default(); - let get_execution_manager = |execution_strategy: ExecutionStrategy| { - match execution_strategy { - ExecutionStrategy::NativeElseWasm => ExecutionManager::NativeElseWasm, - ExecutionStrategy::AlwaysWasm => ExecutionManager::AlwaysWasm, - ExecutionStrategy::NativeWhenPossible => ExecutionManager::NativeWhenPossible, - ExecutionStrategy::Both => ExecutionManager::Both(|wasm_result, native_result| { - let header = import_headers.post(); - warn!("Consensus error between wasm and native block execution at block {}", hash); - warn!(" Header {:?}", header); - warn!(" Native result {:?}", native_result); - warn!(" Wasm result {:?}", wasm_result); - telemetry!(SUBSTRATE_INFO; "block.execute.consensus_failure"; - "hash" => ?hash, - "origin" => ?origin, - "header" => ?header - ); - wasm_result - }), - } - }; - let (_, storage_update, changes_update) = self.executor.call_at_state::<_, _, _, NeverNativeValue, fn() -> _>( - transaction_state, - &mut overlay, - "Core_execute_block", - &::new(import_headers.pre().clone(), body.unwrap_or_default()).encode(), - match origin { - BlockOrigin::NetworkInitialSync => get_execution_manager(self.execution_strategies().syncing), - _ => get_execution_manager(self.execution_strategies().importing), - }, - None, - NeverOffchainExt::new(), - )?; - - overlay.commit_prospective(); - - Ok((Some(storage_update), Some(changes_update), Some(overlay.into_committed().collect()))) - }, - None => Ok((None, None, None)) - } - } - - fn apply_finality_with_block_hash( - &self, - operation: &mut ClientImportOperation, - block: Block::Hash, - justification: Option, - best_block: Block::Hash, - notify: bool, - ) -> error::Result<()> { - // find tree route from last finalized to given block. - let last_finalized = self.backend.blockchain().last_finalized()?; - - if block == last_finalized { - warn!("Possible safety violation: attempted to re-finalize last finalized block {:?} ", last_finalized); - return Ok(()); - } - - let route_from_finalized = crate::blockchain::tree_route( - self.backend.blockchain(), - BlockId::Hash(last_finalized), - BlockId::Hash(block), - )?; - - if let Some(retracted) = route_from_finalized.retracted().get(0) { - warn!("Safety violation: attempted to revert finalized block {:?} which is not in the \ - same chain as last finalized {:?}", retracted, last_finalized); - - bail!(error::ErrorKind::NotInFinalizedChain); - } - - let route_from_best = crate::blockchain::tree_route( - self.backend.blockchain(), - BlockId::Hash(best_block), - BlockId::Hash(block), - )?; - - // if the block is not a direct ancestor of the current best chain, - // then some other block is the common ancestor. - if route_from_best.common_block().hash != block { - // FIXME: #1442 reorganize best block to be the best chain containing - // `block`. - } - - let enacted = route_from_finalized.enacted(); - assert!(enacted.len() > 0); - for finalize_new in &enacted[..enacted.len() - 1] { - operation.op.mark_finalized(BlockId::Hash(finalize_new.hash), None)?; - } - - assert_eq!(enacted.last().map(|e| e.hash), Some(block)); - operation.op.mark_finalized(BlockId::Hash(block), justification)?; - - if notify { - // sometimes when syncing, tons of blocks can be finalized at once. - // we'll send notifications spuriously in that case. - const MAX_TO_NOTIFY: usize = 256; - let enacted = route_from_finalized.enacted(); - let start = enacted.len() - ::std::cmp::min(enacted.len(), MAX_TO_NOTIFY); - for finalized in &enacted[start..] { - operation.notify_finalized.push(finalized.hash); - } - } - - Ok(()) - } - - fn notify_finalized( - &self, - notify_finalized: Vec, - ) -> error::Result<()> { - let mut sinks = self.finality_notification_sinks.lock(); - - for finalized_hash in notify_finalized { - let header = self.header(&BlockId::Hash(finalized_hash))? + } + + /// Get the RuntimeVersion at a given block. + pub fn runtime_version_at(&self, id: &BlockId) -> error::Result { + self.executor.runtime_version(id) + } + + /// Get call executor reference. + pub fn executor(&self) -> &E { + &self.executor + } + + /// Reads storage value at a given block + key, returning read proof. + pub fn read_proof(&self, id: &BlockId, key: &[u8]) -> error::Result>> { + self.state_at(id).and_then(|state| { + prove_read(state, key) + .map(|(_, proof)| proof) + .map_err(Into::into) + }) + } + + /// Execute a call to a contract on top of state in a block of given hash + /// AND returning execution proof. + /// + /// No changes are made. + pub fn execution_proof( + &self, + id: &BlockId, + method: &str, + call_data: &[u8], + ) -> error::Result<(Vec, Vec>)> { + let state = self.state_at(id)?; + let header = self.prepare_environment_block(id)?; + prove_execution(state, header, &self.executor, method, call_data) + } + + /// Reads given header and generates CHT-based header proof. + pub fn header_proof( + &self, + id: &BlockId, + ) -> error::Result<(Block::Header, Vec>)> { + self.header_proof_with_cht_size(id, cht::SIZE) + } + + /// Get block hash by number. + pub fn block_hash( + &self, + block_number: <::Header as HeaderT>::Number, + ) -> error::Result> { + self.backend.blockchain().hash(block_number) + } + + /// Reads given header and generates CHT-based header proof for CHT of given size. + pub fn header_proof_with_cht_size( + &self, + id: &BlockId, + cht_size: u64, + ) -> error::Result<(Block::Header, Vec>)> { + let proof_error = + || error::ErrorKind::Backend(format!("Failed to generate header proof for {:?}", id)); + let header = self.backend.blockchain().expect_header(*id)?; + let block_num = *header.number(); + let cht_num = cht::block_to_cht_number(cht_size, block_num).ok_or_else(proof_error)?; + let cht_start = cht::start_number(cht_size, cht_num); + let headers = (cht_start.as_()..).map(|num| self.block_hash(As::sa(num))); + let proof = cht::build_proof::( + cht_size, + cht_num, + ::std::iter::once(block_num), + headers, + )?; + Ok((header, proof)) + } + + /// Get longest range within [first; last] that is possible to use in `key_changes` + /// and `key_changes_proof` calls. + /// Range could be shortened from the beginning if some changes tries have been pruned. + /// Returns Ok(None) if changes trues are not supported. + pub fn max_key_changes_range( + &self, + first: NumberFor, + last: BlockId, + ) -> error::Result, BlockId)>> { + let (config, storage) = match self.require_changes_trie().ok() { + Some((config, storage)) => (config, storage), + None => return Ok(None), + }; + let first = first.as_(); + let last_num = self + .backend + .blockchain() + .expect_block_number_from_id(&last)? + .as_(); + if first > last_num { + return Err(error::ErrorKind::ChangesTrieAccessFailed( + "Invalid changes trie range".into(), + ) + .into()); + } + let finalized_number = self.backend.blockchain().info()?.finalized_number; + let oldest = storage.oldest_changes_trie_block(&config, finalized_number.as_()); + let first = As::sa(::std::cmp::max(first, oldest)); + Ok(Some((first, last))) + } + + /// Get pairs of (block, extrinsic) where key has been changed at given blocks range. + /// Works only for runtimes that are supporting changes tries. + pub fn key_changes( + &self, + first: NumberFor, + last: BlockId, + key: &StorageKey, + ) -> error::Result, u32)>> { + let (config, storage) = self.require_changes_trie()?; + let last_number = self + .backend + .blockchain() + .expect_block_number_from_id(&last)? + .as_(); + let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; + + key_changes::<_, Blake2Hasher>( + &config, + &*storage, + first.as_(), + &ChangesTrieAnchorBlockId { + hash: convert_hash(&last_hash), + number: last_number, + }, + self.backend.blockchain().info()?.best_number.as_(), + &key.0, + ) + .and_then(|r| { + r.map(|r| r.map(|(block, tx)| (As::sa(block), tx))) + .collect::>() + }) + .map_err(|err| error::ErrorKind::ChangesTrieAccessFailed(err).into()) + } + + /// Get proof for computation of (block, extrinsic) pairs where key has been changed at given blocks range. + /// `min` is the hash of the first block, which changes trie root is known to the requester - when we're using + /// changes tries from ascendants of this block, we should provide proofs for changes tries roots + /// `max` is the hash of the last block known to the requester - we can't use changes tries from descendants + /// of this block. + /// Works only for runtimes that are supporting changes tries. + pub fn key_changes_proof( + &self, + first: Block::Hash, + last: Block::Hash, + min: Block::Hash, + max: Block::Hash, + key: &StorageKey, + ) -> error::Result> { + self.key_changes_proof_with_cht_size(first, last, min, max, key, cht::SIZE) + } + + /// Does the same work as `key_changes_proof`, but assumes that CHTs are of passed size. + pub fn key_changes_proof_with_cht_size( + &self, + first: Block::Hash, + last: Block::Hash, + min: Block::Hash, + max: Block::Hash, + key: &StorageKey, + cht_size: u64, + ) -> error::Result> { + struct AccessedRootsRecorder<'a, Block: BlockT> { + storage: &'a ChangesTrieStorage, + min: u64, + required_roots_proofs: Mutex, H256>>, + }; + + impl<'a, Block: BlockT> ChangesTrieRootsStorage for AccessedRootsRecorder<'a, Block> { + fn root( + &self, + anchor: &ChangesTrieAnchorBlockId, + block: u64, + ) -> Result, String> { + let root = self.storage.root(anchor, block)?; + if block < self.min { + if let Some(ref root) = root { + self.required_roots_proofs + .lock() + .insert(As::sa(block), root.clone()); + } + } + Ok(root) + } + } + + impl<'a, Block: BlockT> ChangesTrieStorage for AccessedRootsRecorder<'a, Block> { + fn get(&self, key: &H256, prefix: &[u8]) -> Result, String> { + self.storage.get(key, prefix) + } + } + + let (config, storage) = self.require_changes_trie()?; + let min_number = self + .backend + .blockchain() + .expect_block_number_from_id(&BlockId::Hash(min))?; + + let recording_storage = AccessedRootsRecorder:: { + storage, + min: min_number.as_(), + required_roots_proofs: Mutex::new(BTreeMap::new()), + }; + + let max_number = ::std::cmp::min( + self.backend.blockchain().info()?.best_number, + self.backend + .blockchain() + .expect_block_number_from_id(&BlockId::Hash(max))?, + ); + + // fetch key changes proof + let first_number = self + .backend + .blockchain() + .expect_block_number_from_id(&BlockId::Hash(first))? + .as_(); + let last_number = self + .backend + .blockchain() + .expect_block_number_from_id(&BlockId::Hash(last))? + .as_(); + let key_changes_proof = key_changes_proof::<_, Blake2Hasher>( + &config, + &recording_storage, + first_number, + &ChangesTrieAnchorBlockId { + hash: convert_hash(&last), + number: last_number, + }, + max_number.as_(), + &key.0, + ) + .map_err(|err| error::Error::from(error::ErrorKind::ChangesTrieAccessFailed(err)))?; + + // now gather proofs for all changes tries roots that were touched during key_changes_proof + // execution AND are unknown (i.e. replaced with CHT) to the requester + let roots = recording_storage.required_roots_proofs.into_inner(); + let roots_proof = self.changes_trie_roots_proof(cht_size, roots.keys().cloned())?; + + Ok(ChangesProof { + max_block: max_number, + proof: key_changes_proof, + roots: roots + .into_iter() + .map(|(n, h)| (n, convert_hash(&h))) + .collect(), + roots_proof, + }) + } + + /// Generate CHT-based proof for roots of changes tries at given blocks. + fn changes_trie_roots_proof>>( + &self, + cht_size: u64, + blocks: I, + ) -> error::Result>> { + // most probably we have touched several changes tries that are parts of the single CHT + // => GroupBy changes tries by CHT number and then gather proof for the whole group at once + let mut proof = HashSet::new(); + + cht::for_each_cht_group::( + cht_size, + blocks, + |_, cht_num, cht_blocks| { + let cht_proof = + self.changes_trie_roots_proof_at_cht(cht_size, cht_num, cht_blocks)?; + proof.extend(cht_proof); + Ok(()) + }, + (), + )?; + + Ok(proof.into_iter().collect()) + } + + /// Generates CHT-based proof for roots of changes tries at given blocks (that are part of single CHT). + fn changes_trie_roots_proof_at_cht( + &self, + cht_size: u64, + cht_num: NumberFor, + blocks: Vec>, + ) -> error::Result>> { + let cht_start = cht::start_number(cht_size, cht_num); + let roots = (cht_start.as_()..).map(|num| { + self.header(&BlockId::Number(As::sa(num))).map(|block| { + block.and_then(|block| { + block + .digest() + .log(DigestItem::as_changes_trie_root) + .cloned() + }) + }) + }); + let proof = cht::build_proof::( + cht_size, cht_num, blocks, roots, + )?; + Ok(proof) + } + + /// Returns changes trie configuration and storage or an error if it is not supported. + fn require_changes_trie( + &self, + ) -> error::Result<(ChangesTrieConfiguration, &B::ChangesTrieStorage)> { + let config = self.changes_trie_config()?; + let storage = self.backend.changes_trie_storage(); + match (config, storage) { + (Some(config), Some(storage)) => Ok((config, storage)), + _ => Err(error::ErrorKind::ChangesTriesNotSupported.into()), + } + } + + /// Create a new block, built on the head of the chain. + pub fn new_block(&self) -> error::Result> + where + E: Clone + Send + Sync, + RA: Send + Sync, + Self: ProvideRuntimeApi, + ::Api: BlockBuilderAPI, + { + block_builder::BlockBuilder::new(self) + } + + /// Create a new block, built on top of `parent`. + pub fn new_block_at( + &self, + parent: &BlockId, + ) -> error::Result> + where + E: Clone + Send + Sync, + RA: Send + Sync, + Self: ProvideRuntimeApi, + ::Api: BlockBuilderAPI, + { + block_builder::BlockBuilder::at_block(parent, &self) + } + + /// Lock the import lock, and run operations inside. + pub fn lock_import_and_run(&self, f: F) -> Result + where + F: FnOnce(&mut ClientImportOperation) -> Result, + Err: From, + { + let inner = || { + let _import_lock = self.import_lock.lock(); + + let mut op = ClientImportOperation { + op: self.backend.begin_operation()?, + notify_imported: None, + notify_finalized: Vec::new(), + }; + + let r = f(&mut op)?; + + let ClientImportOperation { + op, + notify_imported, + notify_finalized, + } = op; + self.backend.commit_operation(op)?; + self.notify_finalized(notify_finalized)?; + + if let Some(notify_imported) = notify_imported { + self.notify_imported(notify_imported)?; + } + + Ok(r) + }; + + let result = inner(); + *self.importing_block.write() = None; + + result + } + + /// Set a block as best block. + pub fn set_head(&self, id: BlockId) -> error::Result<()> { + self.lock_import_and_run(|operation| self.apply_head(operation, id)) + } + + /// Set a block as best block, and apply it to an operation. + pub fn apply_head( + &self, + operation: &mut ClientImportOperation, + id: BlockId, + ) -> error::Result<()> { + operation.op.mark_head(id) + } + + /// Apply a checked and validated block to an operation. If a justification is provided + /// then `finalized` *must* be true. + pub fn apply_block( + &self, + operation: &mut ClientImportOperation, + import_block: ImportBlock, + new_cache: HashMap>, + ) -> error::Result + where + E: CallExecutor + Send + Sync + Clone, + { + use runtime_primitives::traits::Digest; + + let ImportBlock { + origin, + header, + justification, + post_digests, + body, + finalized, + auxiliary, + fork_choice, + } = import_block; + + assert!(justification.is_some() && finalized || justification.is_none()); + + let parent_hash = header.parent_hash().clone(); + + match self + .backend + .blockchain() + .status(BlockId::Hash(parent_hash))? + { + blockchain::BlockStatus::InChain => {} + blockchain::BlockStatus::Unknown => return Ok(ImportResult::UnknownParent), + } + + let import_headers = if post_digests.is_empty() { + PrePostHeader::Same(header) + } else { + let mut post_header = header.clone(); + for item in post_digests { + post_header.digest_mut().push(item); + } + PrePostHeader::Different(header, post_header) + }; + + let hash = import_headers.post().hash(); + let height: u64 = import_headers.post().number().as_(); + + *self.importing_block.write() = Some(hash); + + let result = self.execute_and_import_block( + operation, + origin, + hash, + import_headers, + justification, + body, + new_cache, + finalized, + auxiliary, + fork_choice, + ); + + telemetry!(SUBSTRATE_INFO; "block.import"; + "height" => height, + "best" => ?hash, + "origin" => ?origin + ); + + result + } + + fn execute_and_import_block( + &self, + operation: &mut ClientImportOperation, + origin: BlockOrigin, + hash: Block::Hash, + import_headers: PrePostHeader, + justification: Option, + body: Option>, + new_cache: HashMap>, + finalized: bool, + aux: Vec<(Vec, Option>)>, + fork_choice: ForkChoiceStrategy, + ) -> error::Result + where + E: CallExecutor + Send + Sync + Clone, + { + let parent_hash = import_headers.post().parent_hash().clone(); + match self.backend.blockchain().status(BlockId::Hash(hash))? { + blockchain::BlockStatus::InChain => return Ok(ImportResult::AlreadyInChain), + blockchain::BlockStatus::Unknown => {} + } + + let (last_best, last_best_number) = { + let info = self.backend.blockchain().info()?; + (info.best_hash, info.best_number) + }; + + // this is a fairly arbitrary choice of where to draw the line on making notifications, + // but the general goal is to only make notifications when we are already fully synced + // and get a new chain head. + let make_notifications = match origin { + BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => { + true + } + BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false, + }; + + self.backend + .begin_state_operation(&mut operation.op, BlockId::Hash(parent_hash))?; + + // ensure parent block is finalized to maintain invariant that + // finality is called sequentially. + if finalized { + self.apply_finality_with_block_hash( + operation, + parent_hash, + None, + last_best, + make_notifications, + )?; + } + + // FIXME #1232: correct path logic for when to execute this function + let (storage_update, changes_update, storage_changes) = + self.block_execution(&operation.op, &import_headers, origin, hash, body.clone())?; + + let is_new_best = finalized + || match fork_choice { + ForkChoiceStrategy::LongestChain => { + import_headers.post().number() > &last_best_number + } + ForkChoiceStrategy::Custom(v) => v, + }; + let leaf_state = if finalized { + crate::backend::NewBlockState::Final + } else if is_new_best { + crate::backend::NewBlockState::Best + } else { + crate::backend::NewBlockState::Normal + }; + + trace!( + "Imported {}, (#{}), best={}, origin={:?}", + hash, + import_headers.post().number(), + is_new_best, + origin + ); + + operation.op.set_block_data( + import_headers.post().clone(), + body, + justification, + leaf_state, + )?; + + operation.op.update_cache(new_cache); + if let Some(storage_update) = storage_update { + operation.op.update_db_storage(storage_update)?; + } + if let Some(storage_changes) = storage_changes.clone() { + operation.op.update_storage(storage_changes)?; + } + if let Some(Some(changes_update)) = changes_update { + operation.op.update_changes_trie(changes_update)?; + } + + operation.op.insert_aux(aux)?; + + if make_notifications { + if finalized { + operation.notify_finalized.push(hash); + } + + operation.notify_imported = Some(( + hash, + origin, + import_headers.into_post(), + is_new_best, + storage_changes, + )); + } + + Ok(ImportResult::imported()) + } + + fn block_execution( + &self, + transaction: &B::BlockImportOperation, + import_headers: &PrePostHeader, + origin: BlockOrigin, + hash: Block::Hash, + body: Option>, + ) -> error::Result<( + Option>, + Option>, + Option, Option>)>>, + )> + where + E: CallExecutor + Send + Sync + Clone, + { + match transaction.state()? { + Some(transaction_state) => { + let mut overlay = Default::default(); + let get_execution_manager = |execution_strategy: ExecutionStrategy| { + match execution_strategy { + ExecutionStrategy::NativeElseWasm => ExecutionManager::NativeElseWasm, + ExecutionStrategy::AlwaysWasm => ExecutionManager::AlwaysWasm, + ExecutionStrategy::NativeWhenPossible => { + ExecutionManager::NativeWhenPossible + } + ExecutionStrategy::Both => { + ExecutionManager::Both(|wasm_result, native_result| { + let header = import_headers.post(); + warn!("Consensus error between wasm and native block execution at block {}", hash); + warn!(" Header {:?}", header); + warn!(" Native result {:?}", native_result); + warn!(" Wasm result {:?}", wasm_result); + telemetry!(SUBSTRATE_INFO; "block.execute.consensus_failure"; + "hash" => ?hash, + "origin" => ?origin, + "header" => ?header + ); + wasm_result + }) + } + } + }; + let (_, storage_update, changes_update) = self + .executor + .call_at_state::<_, _, _, NeverNativeValue, fn() -> _>( + transaction_state, + &mut overlay, + "Core_execute_block", + &::new( + import_headers.pre().clone(), + body.unwrap_or_default(), + ) + .encode(), + match origin { + BlockOrigin::NetworkInitialSync => { + get_execution_manager(self.execution_strategies().syncing) + } + _ => get_execution_manager(self.execution_strategies().importing), + }, + None, + NeverOffchainExt::new(), + )?; + + overlay.commit_prospective(); + + Ok(( + Some(storage_update), + Some(changes_update), + Some(overlay.into_committed().collect()), + )) + } + None => Ok((None, None, None)), + } + } + + fn apply_finality_with_block_hash( + &self, + operation: &mut ClientImportOperation, + block: Block::Hash, + justification: Option, + best_block: Block::Hash, + notify: bool, + ) -> error::Result<()> { + // find tree route from last finalized to given block. + let last_finalized = self.backend.blockchain().last_finalized()?; + + if block == last_finalized { + warn!( + "Possible safety violation: attempted to re-finalize last finalized block {:?} ", + last_finalized + ); + return Ok(()); + } + + let route_from_finalized = crate::blockchain::tree_route( + self.backend.blockchain(), + BlockId::Hash(last_finalized), + BlockId::Hash(block), + )?; + + if let Some(retracted) = route_from_finalized.retracted().get(0) { + warn!( + "Safety violation: attempted to revert finalized block {:?} which is not in the \ + same chain as last finalized {:?}", + retracted, last_finalized + ); + + bail!(error::ErrorKind::NotInFinalizedChain); + } + + let route_from_best = crate::blockchain::tree_route( + self.backend.blockchain(), + BlockId::Hash(best_block), + BlockId::Hash(block), + )?; + + // if the block is not a direct ancestor of the current best chain, + // then some other block is the common ancestor. + if route_from_best.common_block().hash != block { + // FIXME: #1442 reorganize best block to be the best chain containing + // `block`. + } + + let enacted = route_from_finalized.enacted(); + assert!(enacted.len() > 0); + for finalize_new in &enacted[..enacted.len() - 1] { + operation + .op + .mark_finalized(BlockId::Hash(finalize_new.hash), None)?; + } + + assert_eq!(enacted.last().map(|e| e.hash), Some(block)); + operation + .op + .mark_finalized(BlockId::Hash(block), justification)?; + + if notify { + // sometimes when syncing, tons of blocks can be finalized at once. + // we'll send notifications spuriously in that case. + const MAX_TO_NOTIFY: usize = 256; + let enacted = route_from_finalized.enacted(); + let start = enacted.len() - ::std::cmp::min(enacted.len(), MAX_TO_NOTIFY); + for finalized in &enacted[start..] { + operation.notify_finalized.push(finalized.hash); + } + } + + Ok(()) + } + + fn notify_finalized(&self, notify_finalized: Vec) -> error::Result<()> { + let mut sinks = self.finality_notification_sinks.lock(); + + for finalized_hash in notify_finalized { + let header = self.header(&BlockId::Hash(finalized_hash))? .expect("header already known to exist in DB because it is indicated in the tree route; qed"); - let notification = FinalityNotification { - header, - hash: finalized_hash, - }; - - sinks.retain(|sink| sink.unbounded_send(notification.clone()).is_ok()); - } - - Ok(()) - } - - fn notify_imported( - &self, - notify_import: (Block::Hash, BlockOrigin, Block::Header, bool, Option, Option>)>>), - ) -> error::Result<()> { - let (hash, origin, header, is_new_best, storage_changes) = notify_import; - - if let Some(storage_changes) = storage_changes { - // TODO [ToDr] How to handle re-orgs? Should we re-emit all storage changes? - self.storage_notifications.lock() - .trigger(&hash, storage_changes.into_iter()); - } - - let notification = BlockImportNotification:: { - hash, - origin, - header, - is_new_best, - }; - - self.import_notification_sinks.lock() - .retain(|sink| sink.unbounded_send(notification.clone()).is_ok()); - - Ok(()) - } - - /// Apply auxiliary data insertion into an operation. - pub fn apply_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >( - &self, - operation: &mut ClientImportOperation, - insert: I, - delete: D - ) -> error::Result<()> { - operation.op.insert_aux( - insert.into_iter() - .map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) - .chain(delete.into_iter().map(|k| (k.to_vec(), None))) - ) - } - - /// Mark all blocks up to given as finalized in operation. If a - /// justification is provided it is stored with the given finalized - /// block (any other finalized blocks are left unjustified). - pub fn apply_finality( - &self, - operation: &mut ClientImportOperation, - id: BlockId, - justification: Option, - notify: bool, - ) -> error::Result<()> { - let last_best = self.backend.blockchain().info()?.best_hash; - let to_finalize_hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; - self.apply_finality_with_block_hash(operation, to_finalize_hash, justification, last_best, notify) - } - - /// Finalize a block. This will implicitly finalize all blocks up to it and - /// fire finality notifications. - /// - /// Pass a flag to indicate whether finality notifications should be propagated. - /// This is usually tied to some synchronization state, where we don't send notifications - /// while performing major synchronization work. - pub fn finalize_block(&self, id: BlockId, justification: Option, notify: bool) -> error::Result<()> { - self.lock_import_and_run(|operation| { - let last_best = self.backend.blockchain().info()?.best_hash; - let to_finalize_hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; - self.apply_finality_with_block_hash(operation, to_finalize_hash, justification, last_best, notify) - }) - } - - /// Attempts to revert the chain by `n` blocks. Returns the number of blocks that were - /// successfully reverted. - pub fn revert(&self, n: NumberFor) -> error::Result> { - Ok(self.backend.revert(n)?) - } - - /// Get blockchain info. - pub fn info(&self) -> error::Result> { - let info = self.backend.blockchain().info().map_err(|e| error::Error::from_blockchain(Box::new(e)))?; - Ok(ClientInfo { - chain: info, - best_queued_hash: None, - best_queued_number: None, - }) - } - - /// Get block status. - pub fn block_status(&self, id: &BlockId) -> error::Result { - // this can probably be implemented more efficiently - if let BlockId::Hash(ref h) = id { - if self.importing_block.read().as_ref().map_or(false, |importing| h == importing) { - return Ok(BlockStatus::Queued); - } - } - let hash_and_number = match id.clone() { - BlockId::Hash(hash) => self.backend.blockchain().number(hash)?.map(|n| (hash, n)), - BlockId::Number(n) => self.backend.blockchain().hash(n)?.map(|hash| (hash, n)), - }; - match hash_and_number { - Some((hash, number)) => { - if self.backend().have_state_at(&hash, number) { - Ok(BlockStatus::InChainWithState) - } else { - Ok(BlockStatus::InChainPruned) - } - } - None => Ok(BlockStatus::Unknown), - } - } - - /// Get block header by id. - pub fn header(&self, id: &BlockId) -> error::Result::Header>> { - self.backend.blockchain().header(*id) - } - - /// Get block body by id. - pub fn body(&self, id: &BlockId) -> error::Result::Extrinsic>>> { - self.backend.blockchain().body(*id) - } - - /// Get block justification set by id. - pub fn justification(&self, id: &BlockId) -> error::Result> { - self.backend.blockchain().justification(*id) - } - - /// Get full block by id. - pub fn block(&self, id: &BlockId) - -> error::Result>> - { - Ok(match (self.header(id)?, self.body(id)?, self.justification(id)?) { - (Some(header), Some(extrinsics), justification) => - Some(SignedBlock { block: Block::new(header, extrinsics), justification }), - _ => None, - }) - } - - /// Get best block header. - pub fn best_block_header(&self) -> error::Result<::Header> { - let info = self.backend.blockchain().info().map_err(|e| error::Error::from_blockchain(Box::new(e)))?; - Ok(self.header(&BlockId::Hash(info.best_hash))?.expect("Best block header must always exist")) - } - - /// Get the most recent block hash of the best (longest) chains - /// that contain block with the given `target_hash`. - /// - /// The search space is always limited to blocks which are in the finalized - /// chain or descendents of it. - /// - /// If `maybe_max_block_number` is `Some(max_block_number)` - /// the search is limited to block `numbers <= max_block_number`. - /// in other words as if there were no blocks greater `max_block_number`. - /// TODO : we want to move this implement to `blockchain::Backend`, see [#1443](https://github.com/paritytech/substrate/issues/1443) - /// Returns `Ok(None)` if `target_hash` is not found in search space. - /// TODO: document time complexity of this, see [#1444](https://github.com/paritytech/substrate/issues/1444) - pub fn best_containing(&self, target_hash: Block::Hash, maybe_max_number: Option>) - -> error::Result> - { - let target_header = { - match self.backend.blockchain().header(BlockId::Hash(target_hash))? { - Some(x) => x, - // target not in blockchain - None => { return Ok(None); }, - } - }; - - if let Some(max_number) = maybe_max_number { - // target outside search range - if target_header.number() > &max_number { - return Ok(None); - } - } - - let (leaves, best_already_checked) = { - // ensure no blocks are imported during this code block. - // an import could trigger a reorg which could change the canonical chain. - // we depend on the canonical chain staying the same during this code block. - let _import_lock = self.import_lock.lock(); - - let info = self.backend.blockchain().info()?; - - let canon_hash = self.backend.blockchain().hash(*target_header.number())? - .ok_or_else(|| error::Error::from(format!("failed to get hash for block number {}", target_header.number())))?; - - if canon_hash == target_hash { - // if no block at the given max depth exists fallback to the best block - if let Some(max_number) = maybe_max_number { - if let Some(header) = self.backend.blockchain().hash(max_number)? { - return Ok(Some(header)); - } - } - - return Ok(Some(info.best_hash)); - } else if info.finalized_number >= *target_header.number() { - // header is on a dead fork. - return Ok(None); - } - - (self.backend.blockchain().leaves()?, info.best_hash) - }; - - // for each chain. longest chain first. shortest last - for leaf_hash in leaves { - // ignore canonical chain which we already checked above - if leaf_hash == best_already_checked { - continue; - } - - // start at the leaf - let mut current_hash = leaf_hash; - - // if search is not restricted then the leaf is the best - let mut best_hash = leaf_hash; - - // go backwards entering the search space - // waiting until we are <= max_number - if let Some(max_number) = maybe_max_number { - loop { - let current_header = self.backend.blockchain().header(BlockId::Hash(current_hash.clone()))? - .ok_or_else(|| error::Error::from(format!("failed to get header for hash {}", current_hash)))?; - - if current_header.number() <= &max_number { - best_hash = current_header.hash(); - break; - } - - current_hash = *current_header.parent_hash(); - } - } - - // go backwards through the chain (via parent links) - loop { - // until we find target - if current_hash == target_hash { - return Ok(Some(best_hash)); - } - - let current_header = self.backend.blockchain().header(BlockId::Hash(current_hash.clone()))? - .ok_or_else(|| error::Error::from(format!("failed to get header for hash {}", current_hash)))?; - - // stop search in this chain once we go below the target's block number - if current_header.number() < target_header.number() { - break; - } - - current_hash = *current_header.parent_hash(); - } - } - - // header may be on a dead fork -- the only leaves that are considered are - // those which can still be finalized. - // - // FIXME #1558 only issue this warning when not on a dead fork - warn!( - "Block {:?} exists in chain but not found when following all \ - leaves backwards. Number limit = {:?}", - target_hash, - maybe_max_number, - ); - - Ok(None) - } - - /// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors. - pub fn uncles(&self, target_hash: Block::Hash, max_generation: NumberFor) -> error::Result> { - let load_header = |id: Block::Hash| -> error::Result { - match self.backend.blockchain().header(BlockId::Hash(id))? { - Some(hdr) => Ok(hdr), - None => Err(ErrorKind::UnknownBlock(format!("Unknown block {:?}", id)).into()), - } - }; - - let genesis_hash = self.backend.blockchain().info()?.genesis_hash; - if genesis_hash == target_hash { return Ok(Vec::new()); } - - let mut current_hash = target_hash; - let mut current = load_header(current_hash)?; - let mut ancestor_hash = *current.parent_hash(); - let mut ancestor = load_header(ancestor_hash)?; - let mut uncles = Vec::new(); - - for _generation in 0..max_generation.as_() { - let children = self.backend.blockchain().children(ancestor_hash)?; - uncles.extend(children.into_iter().filter(|h| h != ¤t_hash)); - current_hash = ancestor_hash; - if genesis_hash == current_hash { break; } - current = ancestor; - ancestor_hash = *current.parent_hash(); - ancestor = load_header(ancestor_hash)?; - } - - Ok(uncles) - } - - fn changes_trie_config(&self) -> Result, Error> { - Ok(self.backend.state_at(BlockId::Number(self.backend.blockchain().info()?.best_number))? - .storage(well_known_keys::CHANGES_TRIE_CONFIG) - .map_err(|e| error::Error::from_state(Box::new(e)))? - .and_then(|c| Decode::decode(&mut &*c))) - } - - /// Prepare in-memory header that is used in execution environment. - fn prepare_environment_block(&self, parent: &BlockId) -> error::Result { - Ok(<::Header as HeaderT>::new( - self.backend.blockchain().expect_block_number_from_id(parent)? + As::sa(1), - Default::default(), - Default::default(), - self.backend.blockchain().expect_block_hash_from_id(&parent)?, - Default::default(), - )) - } + let notification = FinalityNotification { + header, + hash: finalized_hash, + }; + + sinks.retain(|sink| sink.unbounded_send(notification.clone()).is_ok()); + } + + Ok(()) + } + + fn notify_imported( + &self, + notify_import: ( + Block::Hash, + BlockOrigin, + Block::Header, + bool, + Option, Option>)>>, + ), + ) -> error::Result<()> { + let (hash, origin, header, is_new_best, storage_changes) = notify_import; + + if let Some(storage_changes) = storage_changes { + // TODO [ToDr] How to handle re-orgs? Should we re-emit all storage changes? + self.storage_notifications + .lock() + .trigger(&hash, storage_changes.into_iter()); + } + + let notification = BlockImportNotification:: { + hash, + origin, + header, + is_new_best, + }; + + self.import_notification_sinks + .lock() + .retain(|sink| sink.unbounded_send(notification.clone()).is_ok()); + + Ok(()) + } + + /// Apply auxiliary data insertion into an operation. + pub fn apply_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + operation: &mut ClientImportOperation, + insert: I, + delete: D, + ) -> error::Result<()> { + operation.op.insert_aux( + insert + .into_iter() + .map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) + .chain(delete.into_iter().map(|k| (k.to_vec(), None))), + ) + } + + /// Mark all blocks up to given as finalized in operation. If a + /// justification is provided it is stored with the given finalized + /// block (any other finalized blocks are left unjustified). + pub fn apply_finality( + &self, + operation: &mut ClientImportOperation, + id: BlockId, + justification: Option, + notify: bool, + ) -> error::Result<()> { + let last_best = self.backend.blockchain().info()?.best_hash; + let to_finalize_hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + self.apply_finality_with_block_hash( + operation, + to_finalize_hash, + justification, + last_best, + notify, + ) + } + + /// Finalize a block. This will implicitly finalize all blocks up to it and + /// fire finality notifications. + /// + /// Pass a flag to indicate whether finality notifications should be propagated. + /// This is usually tied to some synchronization state, where we don't send notifications + /// while performing major synchronization work. + pub fn finalize_block( + &self, + id: BlockId, + justification: Option, + notify: bool, + ) -> error::Result<()> { + self.lock_import_and_run(|operation| { + let last_best = self.backend.blockchain().info()?.best_hash; + let to_finalize_hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + self.apply_finality_with_block_hash( + operation, + to_finalize_hash, + justification, + last_best, + notify, + ) + }) + } + + /// Attempts to revert the chain by `n` blocks. Returns the number of blocks that were + /// successfully reverted. + pub fn revert(&self, n: NumberFor) -> error::Result> { + Ok(self.backend.revert(n)?) + } + + /// Get blockchain info. + pub fn info(&self) -> error::Result> { + let info = self + .backend + .blockchain() + .info() + .map_err(|e| error::Error::from_blockchain(Box::new(e)))?; + Ok(ClientInfo { + chain: info, + best_queued_hash: None, + best_queued_number: None, + }) + } + + /// Get block status. + pub fn block_status(&self, id: &BlockId) -> error::Result { + // this can probably be implemented more efficiently + if let BlockId::Hash(ref h) = id { + if self + .importing_block + .read() + .as_ref() + .map_or(false, |importing| h == importing) + { + return Ok(BlockStatus::Queued); + } + } + let hash_and_number = match id.clone() { + BlockId::Hash(hash) => self.backend.blockchain().number(hash)?.map(|n| (hash, n)), + BlockId::Number(n) => self.backend.blockchain().hash(n)?.map(|hash| (hash, n)), + }; + match hash_and_number { + Some((hash, number)) => { + if self.backend().have_state_at(&hash, number) { + Ok(BlockStatus::InChainWithState) + } else { + Ok(BlockStatus::InChainPruned) + } + } + None => Ok(BlockStatus::Unknown), + } + } + + /// Get block header by id. + pub fn header(&self, id: &BlockId) -> error::Result::Header>> { + self.backend.blockchain().header(*id) + } + + /// Get block body by id. + pub fn body( + &self, + id: &BlockId, + ) -> error::Result::Extrinsic>>> { + self.backend.blockchain().body(*id) + } + + /// Get block justification set by id. + pub fn justification(&self, id: &BlockId) -> error::Result> { + self.backend.blockchain().justification(*id) + } + + /// Get full block by id. + pub fn block(&self, id: &BlockId) -> error::Result>> { + Ok( + match (self.header(id)?, self.body(id)?, self.justification(id)?) { + (Some(header), Some(extrinsics), justification) => Some(SignedBlock { + block: Block::new(header, extrinsics), + justification, + }), + _ => None, + }, + ) + } + + /// Get best block header. + pub fn best_block_header(&self) -> error::Result<::Header> { + let info = self + .backend + .blockchain() + .info() + .map_err(|e| error::Error::from_blockchain(Box::new(e)))?; + Ok(self + .header(&BlockId::Hash(info.best_hash))? + .expect("Best block header must always exist")) + } + + /// Get the most recent block hash of the best (longest) chains + /// that contain block with the given `target_hash`. + /// + /// The search space is always limited to blocks which are in the finalized + /// chain or descendents of it. + /// + /// If `maybe_max_block_number` is `Some(max_block_number)` + /// the search is limited to block `numbers <= max_block_number`. + /// in other words as if there were no blocks greater `max_block_number`. + /// TODO : we want to move this implement to `blockchain::Backend`, see [#1443](https://github.com/paritytech/substrate/issues/1443) + /// Returns `Ok(None)` if `target_hash` is not found in search space. + /// TODO: document time complexity of this, see [#1444](https://github.com/paritytech/substrate/issues/1444) + pub fn best_containing( + &self, + target_hash: Block::Hash, + maybe_max_number: Option>, + ) -> error::Result> { + let target_header = { + match self + .backend + .blockchain() + .header(BlockId::Hash(target_hash))? + { + Some(x) => x, + // target not in blockchain + None => { + return Ok(None); + } + } + }; + + if let Some(max_number) = maybe_max_number { + // target outside search range + if target_header.number() > &max_number { + return Ok(None); + } + } + + let (leaves, best_already_checked) = { + // ensure no blocks are imported during this code block. + // an import could trigger a reorg which could change the canonical chain. + // we depend on the canonical chain staying the same during this code block. + let _import_lock = self.import_lock.lock(); + + let info = self.backend.blockchain().info()?; + + let canon_hash = self + .backend + .blockchain() + .hash(*target_header.number())? + .ok_or_else(|| { + error::Error::from(format!( + "failed to get hash for block number {}", + target_header.number() + )) + })?; + + if canon_hash == target_hash { + // if no block at the given max depth exists fallback to the best block + if let Some(max_number) = maybe_max_number { + if let Some(header) = self.backend.blockchain().hash(max_number)? { + return Ok(Some(header)); + } + } + + return Ok(Some(info.best_hash)); + } else if info.finalized_number >= *target_header.number() { + // header is on a dead fork. + return Ok(None); + } + + (self.backend.blockchain().leaves()?, info.best_hash) + }; + + // for each chain. longest chain first. shortest last + for leaf_hash in leaves { + // ignore canonical chain which we already checked above + if leaf_hash == best_already_checked { + continue; + } + + // start at the leaf + let mut current_hash = leaf_hash; + + // if search is not restricted then the leaf is the best + let mut best_hash = leaf_hash; + + // go backwards entering the search space + // waiting until we are <= max_number + if let Some(max_number) = maybe_max_number { + loop { + let current_header = self + .backend + .blockchain() + .header(BlockId::Hash(current_hash.clone()))? + .ok_or_else(|| { + error::Error::from(format!( + "failed to get header for hash {}", + current_hash + )) + })?; + + if current_header.number() <= &max_number { + best_hash = current_header.hash(); + break; + } + + current_hash = *current_header.parent_hash(); + } + } + + // go backwards through the chain (via parent links) + loop { + // until we find target + if current_hash == target_hash { + return Ok(Some(best_hash)); + } + + let current_header = self + .backend + .blockchain() + .header(BlockId::Hash(current_hash.clone()))? + .ok_or_else(|| { + error::Error::from(format!( + "failed to get header for hash {}", + current_hash + )) + })?; + + // stop search in this chain once we go below the target's block number + if current_header.number() < target_header.number() { + break; + } + + current_hash = *current_header.parent_hash(); + } + } + + // header may be on a dead fork -- the only leaves that are considered are + // those which can still be finalized. + // + // FIXME #1558 only issue this warning when not on a dead fork + warn!( + "Block {:?} exists in chain but not found when following all \ + leaves backwards. Number limit = {:?}", + target_hash, maybe_max_number, + ); + + Ok(None) + } + + /// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors. + pub fn uncles( + &self, + target_hash: Block::Hash, + max_generation: NumberFor, + ) -> error::Result> { + let load_header = |id: Block::Hash| -> error::Result { + match self.backend.blockchain().header(BlockId::Hash(id))? { + Some(hdr) => Ok(hdr), + None => Err(ErrorKind::UnknownBlock(format!("Unknown block {:?}", id)).into()), + } + }; + + let genesis_hash = self.backend.blockchain().info()?.genesis_hash; + if genesis_hash == target_hash { + return Ok(Vec::new()); + } + + let mut current_hash = target_hash; + let mut current = load_header(current_hash)?; + let mut ancestor_hash = *current.parent_hash(); + let mut ancestor = load_header(ancestor_hash)?; + let mut uncles = Vec::new(); + + for _generation in 0..max_generation.as_() { + let children = self.backend.blockchain().children(ancestor_hash)?; + uncles.extend(children.into_iter().filter(|h| h != ¤t_hash)); + current_hash = ancestor_hash; + if genesis_hash == current_hash { + break; + } + current = ancestor; + ancestor_hash = *current.parent_hash(); + ancestor = load_header(ancestor_hash)?; + } + + Ok(uncles) + } + + fn changes_trie_config(&self) -> Result, Error> { + Ok(self + .backend + .state_at(BlockId::Number( + self.backend.blockchain().info()?.best_number, + ))? + .storage(well_known_keys::CHANGES_TRIE_CONFIG) + .map_err(|e| error::Error::from_state(Box::new(e)))? + .and_then(|c| Decode::decode(&mut &*c))) + } + + /// Prepare in-memory header that is used in execution environment. + fn prepare_environment_block(&self, parent: &BlockId) -> error::Result { + Ok(<::Header as HeaderT>::new( + self.backend + .blockchain() + .expect_block_number_from_id(parent)? + + As::sa(1), + Default::default(), + Default::default(), + self.backend + .blockchain() + .expect_block_hash_from_id(&parent)?, + Default::default(), + )) + } } -impl ChainHeaderBackend for Client where - B: backend::Backend, - E: CallExecutor + Send + Sync, - Block: BlockT, - RA: Send + Sync +impl ChainHeaderBackend for Client +where + B: backend::Backend, + E: CallExecutor + Send + Sync, + Block: BlockT, + RA: Send + Sync, { - fn header(&self, id: BlockId) -> error::Result> { - self.backend.blockchain().header(id) - } - - fn info(&self) -> error::Result> { - self.backend.blockchain().info() - } - - fn status(&self, id: BlockId) -> error::Result { - self.backend.blockchain().status(id) - } - - fn number(&self, hash: Block::Hash) -> error::Result::Header as HeaderT>::Number>> { - self.backend.blockchain().number(hash) - } - - fn hash(&self, number: NumberFor) -> error::Result> { - self.backend.blockchain().hash(number) - } + fn header(&self, id: BlockId) -> error::Result> { + self.backend.blockchain().header(id) + } + + fn info(&self) -> error::Result> { + self.backend.blockchain().info() + } + + fn status(&self, id: BlockId) -> error::Result { + self.backend.blockchain().status(id) + } + + fn number( + &self, + hash: Block::Hash, + ) -> error::Result::Header as HeaderT>::Number>> { + self.backend.blockchain().number(hash) + } + + fn hash(&self, number: NumberFor) -> error::Result> { + self.backend.blockchain().hash(number) + } } -impl ProvideCache for Client where - B: backend::Backend, - Block: BlockT, +impl ProvideCache for Client +where + B: backend::Backend, + Block: BlockT, { - fn cache(&self) -> Option>> { - self.backend.blockchain().cache() - } + fn cache(&self) -> Option>> { + self.backend.blockchain().cache() + } } -impl ProvideRuntimeApi for Client where - B: backend::Backend, - E: CallExecutor + Clone + Send + Sync, - Block: BlockT, - RA: ConstructRuntimeApi +impl ProvideRuntimeApi for Client +where + B: backend::Backend, + E: CallExecutor + Clone + Send + Sync, + Block: BlockT, + RA: ConstructRuntimeApi, { - type Api = >::RuntimeApi; + type Api = >::RuntimeApi; - fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { - RA::construct_runtime_api(self) - } + fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { + RA::construct_runtime_api(self) + } } -impl CallRuntimeAt for Client where - B: backend::Backend, - E: CallExecutor + Clone + Send + Sync, - Block: BlockT +impl CallRuntimeAt for Client +where + B: backend::Backend, + E: CallExecutor + Clone + Send + Sync, + Block: BlockT, { - fn call_api_at< - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - >( - &self, - at: &BlockId, - function: &'static str, - args: Vec, - changes: &mut OverlayedChanges, - initialized_block: &mut Option>, - native_call: Option, - context: ExecutionContext, - ) -> error::Result> { - let manager = match context { - ExecutionContext::BlockConstruction => self.execution_strategies.block_construction.get_manager(), - ExecutionContext::Syncing => self.execution_strategies.syncing.get_manager(), - ExecutionContext::Importing => self.execution_strategies.importing.get_manager(), - ExecutionContext::OffchainWorker(_) => self.execution_strategies.offchain_worker.get_manager(), - ExecutionContext::Other => self.execution_strategies.other.get_manager(), - }; - - let mut offchain_extensions = match context { - ExecutionContext::OffchainWorker(ext) => Some(ext), - _ => None, - }; - - self.executor.contextual_call::<_, _, fn(_,_) -> _,_,_>( - at, - function, - &args, - changes, - initialized_block, - || self.prepare_environment_block(at), - manager, - native_call, - offchain_extensions.as_mut(), - ) - } - - fn runtime_version_at(&self, at: &BlockId) -> error::Result { - self.runtime_version_at(at) - } + fn call_api_at< + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + >( + &self, + at: &BlockId, + function: &'static str, + args: Vec, + changes: &mut OverlayedChanges, + initialized_block: &mut Option>, + native_call: Option, + context: ExecutionContext, + ) -> error::Result> { + let manager = match context { + ExecutionContext::BlockConstruction => { + self.execution_strategies.block_construction.get_manager() + } + ExecutionContext::Syncing => self.execution_strategies.syncing.get_manager(), + ExecutionContext::Importing => self.execution_strategies.importing.get_manager(), + ExecutionContext::OffchainWorker(_) => { + self.execution_strategies.offchain_worker.get_manager() + } + ExecutionContext::Other => self.execution_strategies.other.get_manager(), + }; + + let mut offchain_extensions = match context { + ExecutionContext::OffchainWorker(ext) => Some(ext), + _ => None, + }; + + self.executor.contextual_call::<_, _, fn(_, _) -> _, _, _>( + at, + function, + &args, + changes, + initialized_block, + || self.prepare_environment_block(at), + manager, + native_call, + offchain_extensions.as_mut(), + ) + } + + fn runtime_version_at(&self, at: &BlockId) -> error::Result { + self.runtime_version_at(at) + } } -impl consensus::BlockImport for Client where - B: backend::Backend, - E: CallExecutor + Clone + Send + Sync, - Block: BlockT, +impl consensus::BlockImport for Client +where + B: backend::Backend, + E: CallExecutor + Clone + Send + Sync, + Block: BlockT, { - type Error = ConsensusError; - - /// Import a checked and validated block. If a justification is provided in - /// `ImportBlock` then `finalized` *must* be true. - fn import_block( - &self, - import_block: ImportBlock, - new_cache: HashMap>, - ) -> Result { - self.lock_import_and_run(|operation| { - self.apply_block(operation, import_block, new_cache) - }).map_err(|e| ConsensusErrorKind::ClientImport(e.to_string()).into()) - } - - /// Check block preconditions. - fn check_block( - &self, - hash: Block::Hash, - parent_hash: Block::Hash, - ) -> Result { - match self.block_status(&BlockId::Hash(parent_hash)) - .map_err(|e| ConsensusError::from(ConsensusErrorKind::ClientImport(e.to_string())))? - { - BlockStatus::InChainWithState | BlockStatus::Queued => {}, - BlockStatus::Unknown | BlockStatus::InChainPruned => return Ok(ImportResult::UnknownParent), - BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), - } - - match self.block_status(&BlockId::Hash(hash)) - .map_err(|e| ConsensusError::from(ConsensusErrorKind::ClientImport(e.to_string())))? - { - BlockStatus::InChainWithState | BlockStatus::Queued => return Ok(ImportResult::AlreadyInChain), - BlockStatus::Unknown | BlockStatus::InChainPruned => {}, - BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), - } - - Ok(ImportResult::imported()) - } + type Error = ConsensusError; + + /// Import a checked and validated block. If a justification is provided in + /// `ImportBlock` then `finalized` *must* be true. + fn import_block( + &self, + import_block: ImportBlock, + new_cache: HashMap>, + ) -> Result { + self.lock_import_and_run(|operation| self.apply_block(operation, import_block, new_cache)) + .map_err(|e| ConsensusErrorKind::ClientImport(e.to_string()).into()) + } + + /// Check block preconditions. + fn check_block( + &self, + hash: Block::Hash, + parent_hash: Block::Hash, + ) -> Result { + match self + .block_status(&BlockId::Hash(parent_hash)) + .map_err(|e| ConsensusError::from(ConsensusErrorKind::ClientImport(e.to_string())))? + { + BlockStatus::InChainWithState | BlockStatus::Queued => {} + BlockStatus::Unknown | BlockStatus::InChainPruned => { + return Ok(ImportResult::UnknownParent); + } + BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), + } + + match self + .block_status(&BlockId::Hash(hash)) + .map_err(|e| ConsensusError::from(ConsensusErrorKind::ClientImport(e.to_string())))? + { + BlockStatus::InChainWithState | BlockStatus::Queued => { + return Ok(ImportResult::AlreadyInChain); + } + BlockStatus::Unknown | BlockStatus::InChainPruned => {} + BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), + } + + Ok(ImportResult::imported()) + } } -impl CurrentHeight for Client where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, +impl CurrentHeight for Client +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { - type BlockNumber = ::Number; - fn current_height(&self) -> Self::BlockNumber { - self.backend.blockchain().info().map(|i| i.best_number).unwrap_or_else(|_| Zero::zero()) - } + type BlockNumber = ::Number; + fn current_height(&self) -> Self::BlockNumber { + self.backend + .blockchain() + .info() + .map(|i| i.best_number) + .unwrap_or_else(|_| Zero::zero()) + } } -impl BlockNumberToHash for Client where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, +impl BlockNumberToHash for Client +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { - type BlockNumber = ::Number; - type Hash = Block::Hash; - fn block_number_to_hash(&self, n: Self::BlockNumber) -> Option { - self.block_hash(n).unwrap_or(None) - } + type BlockNumber = ::Number; + type Hash = Block::Hash; + fn block_number_to_hash(&self, n: Self::BlockNumber) -> Option { + self.block_hash(n).unwrap_or(None) + } } - impl BlockchainEvents for Client where - E: CallExecutor, - Block: BlockT, + E: CallExecutor, + Block: BlockT, { - /// Get block import event stream. - fn import_notification_stream(&self) -> ImportNotifications { - let (sink, stream) = mpsc::unbounded(); - self.import_notification_sinks.lock().push(sink); - stream - } - - fn finality_notification_stream(&self) -> FinalityNotifications { - let (sink, stream) = mpsc::unbounded(); - self.finality_notification_sinks.lock().push(sink); - stream - } - - /// Get storage changes event stream. - fn storage_changes_notification_stream(&self, filter_keys: Option<&[StorageKey]>) -> error::Result> { - Ok(self.storage_notifications.lock().listen(filter_keys)) - } + /// Get block import event stream. + fn import_notification_stream(&self) -> ImportNotifications { + let (sink, stream) = mpsc::unbounded(); + self.import_notification_sinks.lock().push(sink); + stream + } + + fn finality_notification_stream(&self) -> FinalityNotifications { + let (sink, stream) = mpsc::unbounded(); + self.finality_notification_sinks.lock().push(sink); + stream + } + + /// Get storage changes event stream. + fn storage_changes_notification_stream( + &self, + filter_keys: Option<&[StorageKey]>, + ) -> error::Result> { + Ok(self.storage_notifications.lock().listen(filter_keys)) + } } impl ChainHead for Client where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { - fn best_block_header(&self) -> error::Result<::Header> { - Client::best_block_header(self) - } + fn best_block_header(&self) -> error::Result<::Header> { + Client::best_block_header(self) + } - fn leaves(&self) -> Result::Hash>, error::Error> { - self.backend.blockchain().leaves() - } + fn leaves(&self) -> Result::Hash>, error::Error> { + self.backend.blockchain().leaves() + } } impl BlockBody for Client - where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { - fn block_body(&self, id: &BlockId) -> error::Result::Extrinsic>>> { - self.body(id) - } + fn block_body( + &self, + id: &BlockId, + ) -> error::Result::Extrinsic>>> { + self.body(id) + } } impl backend::AuxStore for Client - where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { - /// Insert auxiliary data into key-value store. - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> error::Result<()> { - // Import is locked here because we may have other block import - // operations that tries to set aux data. Note that for consensus - // layer, one can always use atomic operations to make sure - // import is only locked once. - self.lock_import_and_run(|operation| { - self.apply_aux(operation, insert, delete) - }) - } - /// Query auxiliary data from key-value store. - fn get_aux(&self, key: &[u8]) -> error::Result>> { - crate::backend::AuxStore::get_aux(&*self.backend, key) - } + /// Insert auxiliary data into key-value store. + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> error::Result<()> { + // Import is locked here because we may have other block import + // operations that tries to set aux data. Note that for consensus + // layer, one can always use atomic operations to make sure + // import is only locked once. + self.lock_import_and_run(|operation| self.apply_aux(operation, insert, delete)) + } + /// Query auxiliary data from key-value store. + fn get_aux(&self, key: &[u8]) -> error::Result>> { + crate::backend::AuxStore::get_aux(&*self.backend, key) + } } #[cfg(test)] pub(crate) mod tests { - use std::collections::HashMap; - use super::*; - use primitives::twox_128; - use runtime_primitives::traits::DigestItem as DigestItemT; - use runtime_primitives::generic::DigestItem; - use test_client::{self, TestClient, AccountKeyring}; - use consensus::BlockOrigin; - use test_client::client::backend::Backend as TestBackend; - use test_client::BlockBuilderExt; - use test_client::runtime::{self, Block, Transfer, RuntimeApi, TestAPI}; - - /// Returns tuple, consisting of: - /// 1) test client pre-filled with blocks changing balances; - /// 2) roots of changes tries for these blocks - /// 3) test cases in form (begin, end, key, vec![(block, extrinsic)]) that are required to pass - pub fn prepare_client_with_key_changes() -> ( - test_client::client::Client, - Vec, - Vec<(u64, u64, Vec, Vec<(u64, u32)>)>, - ) { - // prepare block structure - let blocks_transfers = vec![ - vec![(AccountKeyring::Alice, AccountKeyring::Dave), (AccountKeyring::Bob, AccountKeyring::Dave)], - vec![(AccountKeyring::Charlie, AccountKeyring::Eve)], - vec![], - vec![(AccountKeyring::Alice, AccountKeyring::Dave)], - ]; - - // prepare client ang import blocks - let mut local_roots = Vec::new(); - let remote_client = test_client::new_with_changes_trie(); - let mut nonces: HashMap<_, u64> = Default::default(); - for (i, block_transfers) in blocks_transfers.into_iter().enumerate() { - let mut builder = remote_client.new_block().unwrap(); - for (from, to) in block_transfers { - builder.push_transfer(Transfer { - from: from.into(), - to: to.into(), - amount: 1, - nonce: *nonces.entry(from).and_modify(|n| { *n = *n + 1 }).or_default(), - }).unwrap(); - } - remote_client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap(); - - let header = remote_client.header(&BlockId::Number(i as u64 + 1)).unwrap().unwrap(); - let trie_root = header.digest().log(DigestItem::as_changes_trie_root) - .map(|root| H256::from_slice(root.as_ref())) - .unwrap(); - local_roots.push(trie_root); - } - - // prepare test cases - let alice = twox_128(&runtime::system::balance_of_key(AccountKeyring::Alice.into())).to_vec(); - let bob = twox_128(&runtime::system::balance_of_key(AccountKeyring::Bob.into())).to_vec(); - let charlie = twox_128(&runtime::system::balance_of_key(AccountKeyring::Charlie.into())).to_vec(); - let dave = twox_128(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); - let eve = twox_128(&runtime::system::balance_of_key(AccountKeyring::Eve.into())).to_vec(); - let ferdie = twox_128(&runtime::system::balance_of_key(AccountKeyring::Ferdie.into())).to_vec(); - let test_cases = vec![ - (1, 4, alice.clone(), vec![(4, 0), (1, 0)]), - (1, 3, alice.clone(), vec![(1, 0)]), - (2, 4, alice.clone(), vec![(4, 0)]), - (2, 3, alice.clone(), vec![]), - - (1, 4, bob.clone(), vec![(1, 1)]), - (1, 1, bob.clone(), vec![(1, 1)]), - (2, 4, bob.clone(), vec![]), - - (1, 4, charlie.clone(), vec![(2, 0)]), - - (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), - (1, 1, dave.clone(), vec![(1, 1), (1, 0)]), - (3, 4, dave.clone(), vec![(4, 0)]), - - (1, 4, eve.clone(), vec![(2, 0)]), - (1, 1, eve.clone(), vec![]), - (3, 4, eve.clone(), vec![]), - - (1, 4, ferdie.clone(), vec![]), - ]; - - (remote_client, local_roots, test_cases) - } - - #[test] - fn client_initializes_from_genesis_ok() { - let client = test_client::new(); - - assert_eq!( - client.runtime_api().balance_of( - &BlockId::Number(client.info().unwrap().chain.best_number), - AccountKeyring::Alice.into() - ).unwrap(), - 1000 - ); - assert_eq!( - client.runtime_api().balance_of( - &BlockId::Number(client.info().unwrap().chain.best_number), - AccountKeyring::Ferdie.into() - ).unwrap(), - 0 - ); - } - - #[test] - fn block_builder_works_with_no_transactions() { - let client = test_client::new(); - - let builder = client.new_block().unwrap(); - - client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap(); - - assert_eq!(client.info().unwrap().chain.best_number, 1); - } - - #[test] - fn block_builder_works_with_transactions() { - let client = test_client::new(); - - let mut builder = client.new_block().unwrap(); - - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 42, - nonce: 0, - }).unwrap(); - - client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap(); - - assert_eq!(client.info().unwrap().chain.best_number, 1); - assert!(client.state_at(&BlockId::Number(1)).unwrap().pairs() != client.state_at(&BlockId::Number(0)).unwrap().pairs()); - assert_eq!( - client.runtime_api().balance_of( - &BlockId::Number(client.info().unwrap().chain.best_number), - AccountKeyring::Alice.into() - ).unwrap(), - 958 - ); - assert_eq!( - client.runtime_api().balance_of( - &BlockId::Number(client.info().unwrap().chain.best_number), - AccountKeyring::Ferdie.into() - ).unwrap(), - 42 - ); - } - - #[test] - fn block_builder_does_not_include_invalid() { - let client = test_client::new(); - - let mut builder = client.new_block().unwrap(); - - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 42, - nonce: 0, - }).unwrap(); - - assert!(builder.push_transfer(Transfer { - from: AccountKeyring::Eve.into(), - to: AccountKeyring::Alice.into(), - amount: 42, - nonce: 0, - }).is_err()); - - client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap(); - - assert_eq!(client.info().unwrap().chain.best_number, 1); - assert!(client.state_at(&BlockId::Number(1)).unwrap().pairs() != client.state_at(&BlockId::Number(0)).unwrap().pairs()); - assert_eq!(client.body(&BlockId::Number(1)).unwrap().unwrap().len(), 1) - } - - #[test] - fn best_containing_with_genesis_block() { - // block tree: - // G - - let client = test_client::new(); - - let genesis_hash = client.info().unwrap().chain.genesis_hash; - - assert_eq!(genesis_hash.clone(), client.best_containing(genesis_hash.clone(), None).unwrap().unwrap()); - } - - #[test] - fn best_containing_with_hash_not_found() { - // block tree: - // G - - let client = test_client::new(); - - let uninserted_block = client.new_block().unwrap().bake().unwrap(); - - assert_eq!(None, client.best_containing(uninserted_block.hash().clone(), None).unwrap()); - } - - #[test] - fn uncles_with_only_ancestors() { - // block tree: - // G -> A1 -> A2 - let client = test_client::new(); - - // G -> A1 - let a1 = client.new_block().unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block().unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - let v: Vec = Vec::new(); - assert_eq!(v, client.uncles(a2.hash(), 3).unwrap()); - } - - #[test] - fn uncles_with_multiple_forks() { - // block tree: - // G -> A1 -> A2 -> A3 -> A4 -> A5 - // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 - let client = test_client::new(); - - // G -> A1 - let a1 = client.new_block().unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - // A2 -> A3 - let a3 = client.new_block_at(&BlockId::Hash(a2.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a3.clone()).unwrap(); - - // A3 -> A4 - let a4 = client.new_block_at(&BlockId::Hash(a3.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a4.clone()).unwrap(); - - // A4 -> A5 - let a5 = client.new_block_at(&BlockId::Hash(a4.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a5.clone()).unwrap(); - - // A1 -> B2 - let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap(); - // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); - let b2 = builder.bake().unwrap(); - client.import(BlockOrigin::Own, b2.clone()).unwrap(); - - // B2 -> B3 - let b3 = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, b3.clone()).unwrap(); - - // B3 -> B4 - let b4 = client.new_block_at(&BlockId::Hash(b3.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, b4.clone()).unwrap(); - - // // B2 -> C3 - let mut builder = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap(); - // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); - let c3 = builder.bake().unwrap(); - client.import(BlockOrigin::Own, c3.clone()).unwrap(); - - // A1 -> D2 - let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap(); - // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); - let d2 = builder.bake().unwrap(); - client.import(BlockOrigin::Own, d2.clone()).unwrap(); - - let genesis_hash = client.info().unwrap().chain.genesis_hash; - - let uncles1 = client.uncles(a4.hash(), 10).unwrap(); - assert_eq!(vec![b2.hash(), d2.hash()], uncles1); - - let uncles2 = client.uncles(a4.hash(), 0).unwrap(); - assert_eq!(0, uncles2.len()); - - let uncles3 = client.uncles(a1.hash(), 10).unwrap(); - assert_eq!(0, uncles3.len()); - - let uncles4 = client.uncles(genesis_hash, 10).unwrap(); - assert_eq!(0, uncles4.len()); - - let uncles5 = client.uncles(d2.hash(), 10).unwrap(); - assert_eq!(vec![a2.hash(), b2.hash()], uncles5); - - let uncles6 = client.uncles(b3.hash(), 1).unwrap(); - assert_eq!(vec![c3.hash()], uncles6); - } - - #[test] - fn best_containing_with_single_chain_3_blocks() { - // block tree: - // G -> A1 -> A2 - - let client = test_client::new(); - - // G -> A1 - let a1 = client.new_block().unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block().unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - let genesis_hash = client.info().unwrap().chain.genesis_hash; - - assert_eq!(a2.hash(), client.best_containing(genesis_hash, None).unwrap().unwrap()); - assert_eq!(a2.hash(), client.best_containing(a1.hash(), None).unwrap().unwrap()); - assert_eq!(a2.hash(), client.best_containing(a2.hash(), None).unwrap().unwrap()); - } - - #[test] - fn best_containing_with_multiple_forks() { - // NOTE: we use the version of the trait from `test_client` - // because that is actually different than the version linked to - // in the test facade crate. - use test_client::blockchain::Backend as BlockchainBackendT; - - // block tree: - // G -> A1 -> A2 -> A3 -> A4 -> A5 - // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 - let client = test_client::new(); - - // G -> A1 - let a1 = client.new_block().unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - // A2 -> A3 - let a3 = client.new_block_at(&BlockId::Hash(a2.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a3.clone()).unwrap(); - - // A3 -> A4 - let a4 = client.new_block_at(&BlockId::Hash(a3.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a4.clone()).unwrap(); - - // A4 -> A5 - let a5 = client.new_block_at(&BlockId::Hash(a4.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a5.clone()).unwrap(); - - // A1 -> B2 - let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap(); - // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); - let b2 = builder.bake().unwrap(); - client.import(BlockOrigin::Own, b2.clone()).unwrap(); - - // B2 -> B3 - let b3 = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, b3.clone()).unwrap(); - - // B3 -> B4 - let b4 = client.new_block_at(&BlockId::Hash(b3.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, b4.clone()).unwrap(); - - // // B2 -> C3 - let mut builder = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap(); - // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); - let c3 = builder.bake().unwrap(); - client.import(BlockOrigin::Own, c3.clone()).unwrap(); - - // A1 -> D2 - let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap(); - // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); - let d2 = builder.bake().unwrap(); - client.import(BlockOrigin::Own, d2.clone()).unwrap(); - - assert_eq!(client.info().unwrap().chain.best_hash, a5.hash()); - - let genesis_hash = client.info().unwrap().chain.genesis_hash; - let leaves = BlockchainBackendT::leaves(client.backend().blockchain()).unwrap(); - - assert!(leaves.contains(&a5.hash())); - assert!(leaves.contains(&b4.hash())); - assert!(leaves.contains(&c3.hash())); - assert!(leaves.contains(&d2.hash())); - assert_eq!(leaves.len(), 4); - - // search without restriction - - assert_eq!(a5.hash(), client.best_containing(genesis_hash, None).unwrap().unwrap()); - assert_eq!(a5.hash(), client.best_containing(a1.hash(), None).unwrap().unwrap()); - assert_eq!(a5.hash(), client.best_containing(a2.hash(), None).unwrap().unwrap()); - assert_eq!(a5.hash(), client.best_containing(a3.hash(), None).unwrap().unwrap()); - assert_eq!(a5.hash(), client.best_containing(a4.hash(), None).unwrap().unwrap()); - assert_eq!(a5.hash(), client.best_containing(a5.hash(), None).unwrap().unwrap()); - - assert_eq!(b4.hash(), client.best_containing(b2.hash(), None).unwrap().unwrap()); - assert_eq!(b4.hash(), client.best_containing(b3.hash(), None).unwrap().unwrap()); - assert_eq!(b4.hash(), client.best_containing(b4.hash(), None).unwrap().unwrap()); - - assert_eq!(c3.hash(), client.best_containing(c3.hash(), None).unwrap().unwrap()); - - assert_eq!(d2.hash(), client.best_containing(d2.hash(), None).unwrap().unwrap()); - - - // search only blocks with number <= 5. equivalent to without restriction for this scenario - - assert_eq!(a5.hash(), client.best_containing(genesis_hash, Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), client.best_containing(a1.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), client.best_containing(a2.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), client.best_containing(a3.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), client.best_containing(a4.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), client.best_containing(a5.hash(), Some(5)).unwrap().unwrap()); - - assert_eq!(b4.hash(), client.best_containing(b2.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(b4.hash(), client.best_containing(b3.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(b4.hash(), client.best_containing(b4.hash(), Some(5)).unwrap().unwrap()); - - assert_eq!(c3.hash(), client.best_containing(c3.hash(), Some(5)).unwrap().unwrap()); - - assert_eq!(d2.hash(), client.best_containing(d2.hash(), Some(5)).unwrap().unwrap()); - - - // search only blocks with number <= 4 - - assert_eq!(a4.hash(), client.best_containing(genesis_hash, Some(4)).unwrap().unwrap()); - assert_eq!(a4.hash(), client.best_containing(a1.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(a4.hash(), client.best_containing(a2.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(a4.hash(), client.best_containing(a3.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(a4.hash(), client.best_containing(a4.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(None, client.best_containing(a5.hash(), Some(4)).unwrap()); - - assert_eq!(b4.hash(), client.best_containing(b2.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(b4.hash(), client.best_containing(b3.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(b4.hash(), client.best_containing(b4.hash(), Some(4)).unwrap().unwrap()); - - assert_eq!(c3.hash(), client.best_containing(c3.hash(), Some(4)).unwrap().unwrap()); - - assert_eq!(d2.hash(), client.best_containing(d2.hash(), Some(4)).unwrap().unwrap()); - - - // search only blocks with number <= 3 - - assert_eq!(a3.hash(), client.best_containing(genesis_hash, Some(3)).unwrap().unwrap()); - assert_eq!(a3.hash(), client.best_containing(a1.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(a3.hash(), client.best_containing(a2.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(a3.hash(), client.best_containing(a3.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(None, client.best_containing(a4.hash(), Some(3)).unwrap()); - assert_eq!(None, client.best_containing(a5.hash(), Some(3)).unwrap()); - - assert_eq!(b3.hash(), client.best_containing(b2.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(b3.hash(), client.best_containing(b3.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(None, client.best_containing(b4.hash(), Some(3)).unwrap()); - - assert_eq!(c3.hash(), client.best_containing(c3.hash(), Some(3)).unwrap().unwrap()); - - assert_eq!(d2.hash(), client.best_containing(d2.hash(), Some(3)).unwrap().unwrap()); - - - // search only blocks with number <= 2 - - assert_eq!(a2.hash(), client.best_containing(genesis_hash, Some(2)).unwrap().unwrap()); - assert_eq!(a2.hash(), client.best_containing(a1.hash(), Some(2)).unwrap().unwrap()); - assert_eq!(a2.hash(), client.best_containing(a2.hash(), Some(2)).unwrap().unwrap()); - assert_eq!(None, client.best_containing(a3.hash(), Some(2)).unwrap()); - assert_eq!(None, client.best_containing(a4.hash(), Some(2)).unwrap()); - assert_eq!(None, client.best_containing(a5.hash(), Some(2)).unwrap()); - - assert_eq!(b2.hash(), client.best_containing(b2.hash(), Some(2)).unwrap().unwrap()); - assert_eq!(None, client.best_containing(b3.hash(), Some(2)).unwrap()); - assert_eq!(None, client.best_containing(b4.hash(), Some(2)).unwrap()); - - assert_eq!(None, client.best_containing(c3.hash(), Some(2)).unwrap()); - - assert_eq!(d2.hash(), client.best_containing(d2.hash(), Some(2)).unwrap().unwrap()); - - - // search only blocks with number <= 1 - - assert_eq!(a1.hash(), client.best_containing(genesis_hash, Some(1)).unwrap().unwrap()); - assert_eq!(a1.hash(), client.best_containing(a1.hash(), Some(1)).unwrap().unwrap()); - assert_eq!(None, client.best_containing(a2.hash(), Some(1)).unwrap()); - assert_eq!(None, client.best_containing(a3.hash(), Some(1)).unwrap()); - assert_eq!(None, client.best_containing(a4.hash(), Some(1)).unwrap()); - assert_eq!(None, client.best_containing(a5.hash(), Some(1)).unwrap()); - - assert_eq!(None, client.best_containing(b2.hash(), Some(1)).unwrap()); - assert_eq!(None, client.best_containing(b3.hash(), Some(1)).unwrap()); - assert_eq!(None, client.best_containing(b4.hash(), Some(1)).unwrap()); - - assert_eq!(None, client.best_containing(c3.hash(), Some(1)).unwrap()); - - assert_eq!(None, client.best_containing(d2.hash(), Some(1)).unwrap()); - - // search only blocks with number <= 0 - - assert_eq!(genesis_hash, client.best_containing(genesis_hash, Some(0)).unwrap().unwrap()); - assert_eq!(None, client.best_containing(a1.hash(), Some(0)).unwrap()); - assert_eq!(None, client.best_containing(a2.hash(), Some(0)).unwrap()); - assert_eq!(None, client.best_containing(a3.hash(), Some(0)).unwrap()); - assert_eq!(None, client.best_containing(a4.hash(), Some(0)).unwrap()); - assert_eq!(None, client.best_containing(a5.hash(), Some(0)).unwrap()); - - assert_eq!(None, client.best_containing(b2.hash(), Some(0)).unwrap()); - assert_eq!(None, client.best_containing(b3.hash(), Some(0)).unwrap()); - assert_eq!(None, client.best_containing(b4.hash(), Some(0)).unwrap()); - - assert_eq!(None, client.best_containing(c3.hash().clone(), Some(0)).unwrap()); - - assert_eq!(None, client.best_containing(d2.hash().clone(), Some(0)).unwrap()); - } - - #[test] - fn best_containing_with_max_depth_higher_than_best() { - // block tree: - // G -> A1 -> A2 - - let client = test_client::new(); - - // G -> A1 - let a1 = client.new_block().unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block().unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - let genesis_hash = client.info().unwrap().chain.genesis_hash; - - assert_eq!(a2.hash(), client.best_containing(genesis_hash, Some(10)).unwrap().unwrap()); - } - - #[test] - fn key_changes_works() { - let (client, _, test_cases) = prepare_client_with_key_changes(); - - for (index, (begin, end, key, expected_result)) in test_cases.into_iter().enumerate() { - let end = client.block_hash(end).unwrap().unwrap(); - let actual_result = client.key_changes(begin, BlockId::Hash(end), &StorageKey(key)).unwrap(); - match actual_result == expected_result { - true => (), - false => panic!(format!("Failed test {}: actual = {:?}, expected = {:?}", - index, actual_result, expected_result)), - } - } - } - - #[test] - fn import_with_justification() { - use test_client::blockchain::Backend; - - let client = test_client::new(); - - // G -> A1 - let a1 = client.new_block().unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - // A2 -> A3 - let justification = vec![1, 2, 3]; - let a3 = client.new_block_at(&BlockId::Hash(a2.hash())).unwrap().bake().unwrap(); - client.import_justified(BlockOrigin::Own, a3.clone(), justification.clone()).unwrap(); - - assert_eq!( - client.backend().blockchain().last_finalized().unwrap(), - a3.hash(), - ); - - assert_eq!( - client.backend().blockchain().justification(BlockId::Hash(a3.hash())).unwrap(), - Some(justification), - ); - - assert_eq!( - client.backend().blockchain().justification(BlockId::Hash(a1.hash())).unwrap(), - None, - ); - - assert_eq!( - client.backend().blockchain().justification(BlockId::Hash(a2.hash())).unwrap(), - None, - ); - } + use super::*; + use consensus::BlockOrigin; + use primitives::twox_128; + use runtime_primitives::generic::DigestItem; + use runtime_primitives::traits::DigestItem as DigestItemT; + use std::collections::HashMap; + use test_client::client::backend::Backend as TestBackend; + use test_client::runtime::{self, Block, RuntimeApi, TestAPI, Transfer}; + use test_client::BlockBuilderExt; + use test_client::{self, AccountKeyring, TestClient}; + + /// Returns tuple, consisting of: + /// 1) test client pre-filled with blocks changing balances; + /// 2) roots of changes tries for these blocks + /// 3) test cases in form (begin, end, key, vec![(block, extrinsic)]) that are required to pass + pub fn prepare_client_with_key_changes() -> ( + test_client::client::Client, + Vec, + Vec<(u64, u64, Vec, Vec<(u64, u32)>)>, + ) { + // prepare block structure + let blocks_transfers = vec![ + vec![ + (AccountKeyring::Alice, AccountKeyring::Dave), + (AccountKeyring::Bob, AccountKeyring::Dave), + ], + vec![(AccountKeyring::Charlie, AccountKeyring::Eve)], + vec![], + vec![(AccountKeyring::Alice, AccountKeyring::Dave)], + ]; + + // prepare client ang import blocks + let mut local_roots = Vec::new(); + let remote_client = test_client::new_with_changes_trie(); + let mut nonces: HashMap<_, u64> = Default::default(); + for (i, block_transfers) in blocks_transfers.into_iter().enumerate() { + let mut builder = remote_client.new_block().unwrap(); + for (from, to) in block_transfers { + builder + .push_transfer(Transfer { + from: from.into(), + to: to.into(), + amount: 1, + nonce: *nonces.entry(from).and_modify(|n| *n = *n + 1).or_default(), + }) + .unwrap(); + } + remote_client + .import(BlockOrigin::Own, builder.bake().unwrap()) + .unwrap(); + + let header = remote_client + .header(&BlockId::Number(i as u64 + 1)) + .unwrap() + .unwrap(); + let trie_root = header + .digest() + .log(DigestItem::as_changes_trie_root) + .map(|root| H256::from_slice(root.as_ref())) + .unwrap(); + local_roots.push(trie_root); + } + + // prepare test cases + let alice = twox_128(&runtime::system::balance_of_key( + AccountKeyring::Alice.into(), + )) + .to_vec(); + let bob = twox_128(&runtime::system::balance_of_key(AccountKeyring::Bob.into())).to_vec(); + let charlie = twox_128(&runtime::system::balance_of_key( + AccountKeyring::Charlie.into(), + )) + .to_vec(); + let dave = twox_128(&runtime::system::balance_of_key( + AccountKeyring::Dave.into(), + )) + .to_vec(); + let eve = twox_128(&runtime::system::balance_of_key(AccountKeyring::Eve.into())).to_vec(); + let ferdie = twox_128(&runtime::system::balance_of_key( + AccountKeyring::Ferdie.into(), + )) + .to_vec(); + let test_cases = vec![ + (1, 4, alice.clone(), vec![(4, 0), (1, 0)]), + (1, 3, alice.clone(), vec![(1, 0)]), + (2, 4, alice.clone(), vec![(4, 0)]), + (2, 3, alice.clone(), vec![]), + (1, 4, bob.clone(), vec![(1, 1)]), + (1, 1, bob.clone(), vec![(1, 1)]), + (2, 4, bob.clone(), vec![]), + (1, 4, charlie.clone(), vec![(2, 0)]), + (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), + (1, 1, dave.clone(), vec![(1, 1), (1, 0)]), + (3, 4, dave.clone(), vec![(4, 0)]), + (1, 4, eve.clone(), vec![(2, 0)]), + (1, 1, eve.clone(), vec![]), + (3, 4, eve.clone(), vec![]), + (1, 4, ferdie.clone(), vec![]), + ]; + + (remote_client, local_roots, test_cases) + } + + #[test] + fn client_initializes_from_genesis_ok() { + let client = test_client::new(); + + assert_eq!( + client + .runtime_api() + .balance_of( + &BlockId::Number(client.info().unwrap().chain.best_number), + AccountKeyring::Alice.into() + ) + .unwrap(), + 1000 + ); + assert_eq!( + client + .runtime_api() + .balance_of( + &BlockId::Number(client.info().unwrap().chain.best_number), + AccountKeyring::Ferdie.into() + ) + .unwrap(), + 0 + ); + } + + #[test] + fn block_builder_works_with_no_transactions() { + let client = test_client::new(); + + let builder = client.new_block().unwrap(); + + client + .import(BlockOrigin::Own, builder.bake().unwrap()) + .unwrap(); + + assert_eq!(client.info().unwrap().chain.best_number, 1); + } + + #[test] + fn block_builder_works_with_transactions() { + let client = test_client::new(); + + let mut builder = client.new_block().unwrap(); + + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }) + .unwrap(); + + client + .import(BlockOrigin::Own, builder.bake().unwrap()) + .unwrap(); + + assert_eq!(client.info().unwrap().chain.best_number, 1); + assert!( + client.state_at(&BlockId::Number(1)).unwrap().pairs() + != client.state_at(&BlockId::Number(0)).unwrap().pairs() + ); + assert_eq!( + client + .runtime_api() + .balance_of( + &BlockId::Number(client.info().unwrap().chain.best_number), + AccountKeyring::Alice.into() + ) + .unwrap(), + 958 + ); + assert_eq!( + client + .runtime_api() + .balance_of( + &BlockId::Number(client.info().unwrap().chain.best_number), + AccountKeyring::Ferdie.into() + ) + .unwrap(), + 42 + ); + } + + #[test] + fn block_builder_does_not_include_invalid() { + let client = test_client::new(); + + let mut builder = client.new_block().unwrap(); + + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }) + .unwrap(); + + assert!(builder + .push_transfer(Transfer { + from: AccountKeyring::Eve.into(), + to: AccountKeyring::Alice.into(), + amount: 42, + nonce: 0, + }) + .is_err()); + + client + .import(BlockOrigin::Own, builder.bake().unwrap()) + .unwrap(); + + assert_eq!(client.info().unwrap().chain.best_number, 1); + assert!( + client.state_at(&BlockId::Number(1)).unwrap().pairs() + != client.state_at(&BlockId::Number(0)).unwrap().pairs() + ); + assert_eq!(client.body(&BlockId::Number(1)).unwrap().unwrap().len(), 1) + } + + #[test] + fn best_containing_with_genesis_block() { + // block tree: + // G + + let client = test_client::new(); + + let genesis_hash = client.info().unwrap().chain.genesis_hash; + + assert_eq!( + genesis_hash.clone(), + client + .best_containing(genesis_hash.clone(), None) + .unwrap() + .unwrap() + ); + } + + #[test] + fn best_containing_with_hash_not_found() { + // block tree: + // G + + let client = test_client::new(); + + let uninserted_block = client.new_block().unwrap().bake().unwrap(); + + assert_eq!( + None, + client + .best_containing(uninserted_block.hash().clone(), None) + .unwrap() + ); + } + + #[test] + fn uncles_with_only_ancestors() { + // block tree: + // G -> A1 -> A2 + let client = test_client::new(); + + // G -> A1 + let a1 = client.new_block().unwrap().bake().unwrap(); + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client.new_block().unwrap().bake().unwrap(); + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + let v: Vec = Vec::new(); + assert_eq!(v, client.uncles(a2.hash(), 3).unwrap()); + } + + #[test] + fn uncles_with_multiple_forks() { + // block tree: + // G -> A1 -> A2 -> A3 -> A4 -> A5 + // A1 -> B2 -> B3 -> B4 + // B2 -> C3 + // A1 -> D2 + let client = test_client::new(); + + // G -> A1 + let a1 = client.new_block().unwrap().bake().unwrap(); + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + // A2 -> A3 + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a3.clone()).unwrap(); + + // A3 -> A4 + let a4 = client + .new_block_at(&BlockId::Hash(a3.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a4.clone()).unwrap(); + + // A4 -> A5 + let a5 = client + .new_block_at(&BlockId::Hash(a4.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a5.clone()).unwrap(); + + // A1 -> B2 + let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap(); + // this push is required as otherwise B2 has the same hash as A2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); + let b2 = builder.bake().unwrap(); + client.import(BlockOrigin::Own, b2.clone()).unwrap(); + + // B2 -> B3 + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, b3.clone()).unwrap(); + + // B3 -> B4 + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, b4.clone()).unwrap(); + + // // B2 -> C3 + let mut builder = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap(); + // this push is required as otherwise C3 has the same hash as B3 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }) + .unwrap(); + let c3 = builder.bake().unwrap(); + client.import(BlockOrigin::Own, c3.clone()).unwrap(); + + // A1 -> D2 + let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap(); + // this push is required as otherwise D2 has the same hash as B2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); + let d2 = builder.bake().unwrap(); + client.import(BlockOrigin::Own, d2.clone()).unwrap(); + + let genesis_hash = client.info().unwrap().chain.genesis_hash; + + let uncles1 = client.uncles(a4.hash(), 10).unwrap(); + assert_eq!(vec![b2.hash(), d2.hash()], uncles1); + + let uncles2 = client.uncles(a4.hash(), 0).unwrap(); + assert_eq!(0, uncles2.len()); + + let uncles3 = client.uncles(a1.hash(), 10).unwrap(); + assert_eq!(0, uncles3.len()); + + let uncles4 = client.uncles(genesis_hash, 10).unwrap(); + assert_eq!(0, uncles4.len()); + + let uncles5 = client.uncles(d2.hash(), 10).unwrap(); + assert_eq!(vec![a2.hash(), b2.hash()], uncles5); + + let uncles6 = client.uncles(b3.hash(), 1).unwrap(); + assert_eq!(vec![c3.hash()], uncles6); + } + + #[test] + fn best_containing_with_single_chain_3_blocks() { + // block tree: + // G -> A1 -> A2 + + let client = test_client::new(); + + // G -> A1 + let a1 = client.new_block().unwrap().bake().unwrap(); + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client.new_block().unwrap().bake().unwrap(); + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + let genesis_hash = client.info().unwrap().chain.genesis_hash; + + assert_eq!( + a2.hash(), + client.best_containing(genesis_hash, None).unwrap().unwrap() + ); + assert_eq!( + a2.hash(), + client.best_containing(a1.hash(), None).unwrap().unwrap() + ); + assert_eq!( + a2.hash(), + client.best_containing(a2.hash(), None).unwrap().unwrap() + ); + } + + #[test] + fn best_containing_with_multiple_forks() { + // NOTE: we use the version of the trait from `test_client` + // because that is actually different than the version linked to + // in the test facade crate. + use test_client::blockchain::Backend as BlockchainBackendT; + + // block tree: + // G -> A1 -> A2 -> A3 -> A4 -> A5 + // A1 -> B2 -> B3 -> B4 + // B2 -> C3 + // A1 -> D2 + let client = test_client::new(); + + // G -> A1 + let a1 = client.new_block().unwrap().bake().unwrap(); + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + // A2 -> A3 + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a3.clone()).unwrap(); + + // A3 -> A4 + let a4 = client + .new_block_at(&BlockId::Hash(a3.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a4.clone()).unwrap(); + + // A4 -> A5 + let a5 = client + .new_block_at(&BlockId::Hash(a4.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a5.clone()).unwrap(); + + // A1 -> B2 + let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap(); + // this push is required as otherwise B2 has the same hash as A2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); + let b2 = builder.bake().unwrap(); + client.import(BlockOrigin::Own, b2.clone()).unwrap(); + + // B2 -> B3 + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, b3.clone()).unwrap(); + + // B3 -> B4 + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, b4.clone()).unwrap(); + + // // B2 -> C3 + let mut builder = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap(); + // this push is required as otherwise C3 has the same hash as B3 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }) + .unwrap(); + let c3 = builder.bake().unwrap(); + client.import(BlockOrigin::Own, c3.clone()).unwrap(); + + // A1 -> D2 + let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap(); + // this push is required as otherwise D2 has the same hash as B2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); + let d2 = builder.bake().unwrap(); + client.import(BlockOrigin::Own, d2.clone()).unwrap(); + + assert_eq!(client.info().unwrap().chain.best_hash, a5.hash()); + + let genesis_hash = client.info().unwrap().chain.genesis_hash; + let leaves = BlockchainBackendT::leaves(client.backend().blockchain()).unwrap(); + + assert!(leaves.contains(&a5.hash())); + assert!(leaves.contains(&b4.hash())); + assert!(leaves.contains(&c3.hash())); + assert!(leaves.contains(&d2.hash())); + assert_eq!(leaves.len(), 4); + + // search without restriction + + assert_eq!( + a5.hash(), + client.best_containing(genesis_hash, None).unwrap().unwrap() + ); + assert_eq!( + a5.hash(), + client.best_containing(a1.hash(), None).unwrap().unwrap() + ); + assert_eq!( + a5.hash(), + client.best_containing(a2.hash(), None).unwrap().unwrap() + ); + assert_eq!( + a5.hash(), + client.best_containing(a3.hash(), None).unwrap().unwrap() + ); + assert_eq!( + a5.hash(), + client.best_containing(a4.hash(), None).unwrap().unwrap() + ); + assert_eq!( + a5.hash(), + client.best_containing(a5.hash(), None).unwrap().unwrap() + ); + + assert_eq!( + b4.hash(), + client.best_containing(b2.hash(), None).unwrap().unwrap() + ); + assert_eq!( + b4.hash(), + client.best_containing(b3.hash(), None).unwrap().unwrap() + ); + assert_eq!( + b4.hash(), + client.best_containing(b4.hash(), None).unwrap().unwrap() + ); + + assert_eq!( + c3.hash(), + client.best_containing(c3.hash(), None).unwrap().unwrap() + ); + + assert_eq!( + d2.hash(), + client.best_containing(d2.hash(), None).unwrap().unwrap() + ); + + // search only blocks with number <= 5. equivalent to without restriction for this scenario + + assert_eq!( + a5.hash(), + client + .best_containing(genesis_hash, Some(5)) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + client.best_containing(a1.hash(), Some(5)).unwrap().unwrap() + ); + assert_eq!( + a5.hash(), + client.best_containing(a2.hash(), Some(5)).unwrap().unwrap() + ); + assert_eq!( + a5.hash(), + client.best_containing(a3.hash(), Some(5)).unwrap().unwrap() + ); + assert_eq!( + a5.hash(), + client.best_containing(a4.hash(), Some(5)).unwrap().unwrap() + ); + assert_eq!( + a5.hash(), + client.best_containing(a5.hash(), Some(5)).unwrap().unwrap() + ); + + assert_eq!( + b4.hash(), + client.best_containing(b2.hash(), Some(5)).unwrap().unwrap() + ); + assert_eq!( + b4.hash(), + client.best_containing(b3.hash(), Some(5)).unwrap().unwrap() + ); + assert_eq!( + b4.hash(), + client.best_containing(b4.hash(), Some(5)).unwrap().unwrap() + ); + + assert_eq!( + c3.hash(), + client.best_containing(c3.hash(), Some(5)).unwrap().unwrap() + ); + + assert_eq!( + d2.hash(), + client.best_containing(d2.hash(), Some(5)).unwrap().unwrap() + ); + + // search only blocks with number <= 4 + + assert_eq!( + a4.hash(), + client + .best_containing(genesis_hash, Some(4)) + .unwrap() + .unwrap() + ); + assert_eq!( + a4.hash(), + client.best_containing(a1.hash(), Some(4)).unwrap().unwrap() + ); + assert_eq!( + a4.hash(), + client.best_containing(a2.hash(), Some(4)).unwrap().unwrap() + ); + assert_eq!( + a4.hash(), + client.best_containing(a3.hash(), Some(4)).unwrap().unwrap() + ); + assert_eq!( + a4.hash(), + client.best_containing(a4.hash(), Some(4)).unwrap().unwrap() + ); + assert_eq!(None, client.best_containing(a5.hash(), Some(4)).unwrap()); + + assert_eq!( + b4.hash(), + client.best_containing(b2.hash(), Some(4)).unwrap().unwrap() + ); + assert_eq!( + b4.hash(), + client.best_containing(b3.hash(), Some(4)).unwrap().unwrap() + ); + assert_eq!( + b4.hash(), + client.best_containing(b4.hash(), Some(4)).unwrap().unwrap() + ); + + assert_eq!( + c3.hash(), + client.best_containing(c3.hash(), Some(4)).unwrap().unwrap() + ); + + assert_eq!( + d2.hash(), + client.best_containing(d2.hash(), Some(4)).unwrap().unwrap() + ); + + // search only blocks with number <= 3 + + assert_eq!( + a3.hash(), + client + .best_containing(genesis_hash, Some(3)) + .unwrap() + .unwrap() + ); + assert_eq!( + a3.hash(), + client.best_containing(a1.hash(), Some(3)).unwrap().unwrap() + ); + assert_eq!( + a3.hash(), + client.best_containing(a2.hash(), Some(3)).unwrap().unwrap() + ); + assert_eq!( + a3.hash(), + client.best_containing(a3.hash(), Some(3)).unwrap().unwrap() + ); + assert_eq!(None, client.best_containing(a4.hash(), Some(3)).unwrap()); + assert_eq!(None, client.best_containing(a5.hash(), Some(3)).unwrap()); + + assert_eq!( + b3.hash(), + client.best_containing(b2.hash(), Some(3)).unwrap().unwrap() + ); + assert_eq!( + b3.hash(), + client.best_containing(b3.hash(), Some(3)).unwrap().unwrap() + ); + assert_eq!(None, client.best_containing(b4.hash(), Some(3)).unwrap()); + + assert_eq!( + c3.hash(), + client.best_containing(c3.hash(), Some(3)).unwrap().unwrap() + ); + + assert_eq!( + d2.hash(), + client.best_containing(d2.hash(), Some(3)).unwrap().unwrap() + ); + + // search only blocks with number <= 2 + + assert_eq!( + a2.hash(), + client + .best_containing(genesis_hash, Some(2)) + .unwrap() + .unwrap() + ); + assert_eq!( + a2.hash(), + client.best_containing(a1.hash(), Some(2)).unwrap().unwrap() + ); + assert_eq!( + a2.hash(), + client.best_containing(a2.hash(), Some(2)).unwrap().unwrap() + ); + assert_eq!(None, client.best_containing(a3.hash(), Some(2)).unwrap()); + assert_eq!(None, client.best_containing(a4.hash(), Some(2)).unwrap()); + assert_eq!(None, client.best_containing(a5.hash(), Some(2)).unwrap()); + + assert_eq!( + b2.hash(), + client.best_containing(b2.hash(), Some(2)).unwrap().unwrap() + ); + assert_eq!(None, client.best_containing(b3.hash(), Some(2)).unwrap()); + assert_eq!(None, client.best_containing(b4.hash(), Some(2)).unwrap()); + + assert_eq!(None, client.best_containing(c3.hash(), Some(2)).unwrap()); + + assert_eq!( + d2.hash(), + client.best_containing(d2.hash(), Some(2)).unwrap().unwrap() + ); + + // search only blocks with number <= 1 + + assert_eq!( + a1.hash(), + client + .best_containing(genesis_hash, Some(1)) + .unwrap() + .unwrap() + ); + assert_eq!( + a1.hash(), + client.best_containing(a1.hash(), Some(1)).unwrap().unwrap() + ); + assert_eq!(None, client.best_containing(a2.hash(), Some(1)).unwrap()); + assert_eq!(None, client.best_containing(a3.hash(), Some(1)).unwrap()); + assert_eq!(None, client.best_containing(a4.hash(), Some(1)).unwrap()); + assert_eq!(None, client.best_containing(a5.hash(), Some(1)).unwrap()); + + assert_eq!(None, client.best_containing(b2.hash(), Some(1)).unwrap()); + assert_eq!(None, client.best_containing(b3.hash(), Some(1)).unwrap()); + assert_eq!(None, client.best_containing(b4.hash(), Some(1)).unwrap()); + + assert_eq!(None, client.best_containing(c3.hash(), Some(1)).unwrap()); + + assert_eq!(None, client.best_containing(d2.hash(), Some(1)).unwrap()); + + // search only blocks with number <= 0 + + assert_eq!( + genesis_hash, + client + .best_containing(genesis_hash, Some(0)) + .unwrap() + .unwrap() + ); + assert_eq!(None, client.best_containing(a1.hash(), Some(0)).unwrap()); + assert_eq!(None, client.best_containing(a2.hash(), Some(0)).unwrap()); + assert_eq!(None, client.best_containing(a3.hash(), Some(0)).unwrap()); + assert_eq!(None, client.best_containing(a4.hash(), Some(0)).unwrap()); + assert_eq!(None, client.best_containing(a5.hash(), Some(0)).unwrap()); + + assert_eq!(None, client.best_containing(b2.hash(), Some(0)).unwrap()); + assert_eq!(None, client.best_containing(b3.hash(), Some(0)).unwrap()); + assert_eq!(None, client.best_containing(b4.hash(), Some(0)).unwrap()); + + assert_eq!( + None, + client.best_containing(c3.hash().clone(), Some(0)).unwrap() + ); + + assert_eq!( + None, + client.best_containing(d2.hash().clone(), Some(0)).unwrap() + ); + } + + #[test] + fn best_containing_with_max_depth_higher_than_best() { + // block tree: + // G -> A1 -> A2 + + let client = test_client::new(); + + // G -> A1 + let a1 = client.new_block().unwrap().bake().unwrap(); + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client.new_block().unwrap().bake().unwrap(); + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + let genesis_hash = client.info().unwrap().chain.genesis_hash; + + assert_eq!( + a2.hash(), + client + .best_containing(genesis_hash, Some(10)) + .unwrap() + .unwrap() + ); + } + + #[test] + fn key_changes_works() { + let (client, _, test_cases) = prepare_client_with_key_changes(); + + for (index, (begin, end, key, expected_result)) in test_cases.into_iter().enumerate() { + let end = client.block_hash(end).unwrap().unwrap(); + let actual_result = client + .key_changes(begin, BlockId::Hash(end), &StorageKey(key)) + .unwrap(); + match actual_result == expected_result { + true => (), + false => panic!(format!( + "Failed test {}: actual = {:?}, expected = {:?}", + index, actual_result, expected_result + )), + } + } + } + + #[test] + fn import_with_justification() { + use test_client::blockchain::Backend; + + let client = test_client::new(); + + // G -> A1 + let a1 = client.new_block().unwrap().bake().unwrap(); + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + // A2 -> A3 + let justification = vec![1, 2, 3]; + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash())) + .unwrap() + .bake() + .unwrap(); + client + .import_justified(BlockOrigin::Own, a3.clone(), justification.clone()) + .unwrap(); + + assert_eq!( + client.backend().blockchain().last_finalized().unwrap(), + a3.hash(), + ); + + assert_eq!( + client + .backend() + .blockchain() + .justification(BlockId::Hash(a3.hash())) + .unwrap(), + Some(justification), + ); + + assert_eq!( + client + .backend() + .blockchain() + .justification(BlockId::Hash(a1.hash())) + .unwrap(), + None, + ); + + assert_eq!( + client + .backend() + .blockchain() + .justification(BlockId::Hash(a2.hash())) + .unwrap(), + None, + ); + } } diff --git a/core/client/src/error.rs b/core/client/src/error.rs index 3ee3c0e2a1..38b526025d 100644 --- a/core/client/src/error.rs +++ b/core/client/src/error.rs @@ -21,155 +21,155 @@ #![allow(deprecated)] #![allow(missing_docs)] -use std; -use state_machine; -use runtime_primitives::ApplyError; use consensus; use error_chain::*; +use runtime_primitives::ApplyError; +use state_machine; +use std; error_chain! { - links { - Consensus(consensus::Error, consensus::ErrorKind); - } - errors { - /// Backend error. - Backend(s: String) { - description("Unrecoverable backend error"), - display("Backend error: {}", s), - } - - /// Unknown block. - UnknownBlock(h: String) { - description("unknown block"), - display("UnknownBlock: {}", &*h), - } - - /// Applying extrinsic error. - ApplyExtrinsicFailed(e: ApplyError) { - description("Extrinsic error"), - display("Extrinsic error: {:?}", e), - } - - /// Execution error. - Execution(e: Box) { - description("execution error"), - display("Execution: {}", e), - } - - /// Blockchain error. - Blockchain(e: Box) { - description("Blockchain error"), - display("Blockchain: {}", e), - } - - /// Could not get runtime version. - VersionInvalid { - description("Runtime version error"), - display("On-chain runtime does not specify version"), - } - - /// Genesis config is invalid. - GenesisInvalid { - description("Genesis config error"), - display("Genesis config provided is invalid"), - } - - /// Bad justification for header. - BadJustification(h: String) { - description("bad justification for header"), - display("bad justification for header: {}", &*h), - } - - /// Not available on light client. - NotAvailableOnLightClient { - description("not available on light client"), - display("This method is not currently available when running in light client mode"), - } - - /// Invalid remote CHT-based proof. - InvalidCHTProof { - description("invalid header proof"), - display("Remote node has responded with invalid header proof"), - } - - /// Remote fetch has been cancelled. - RemoteFetchCancelled { - description("remote fetch cancelled"), - display("Remote data fetch has been cancelled"), - } - - /// Remote fetch has been failed. - RemoteFetchFailed { - description("remote fetch failed"), - display("Remote data fetch has been failed"), - } - - /// Error decoding call result. - CallResultDecode(method: &'static str) { - description("Error decoding call result") - display("Error decoding call result of {}", method) - } - - /// Error converting a parameter between runtime and node. - RuntimeParamConversion(param: &'static str) { - description("Error converting parameter between runtime and node") - display("Error converting `{}` between runtime and node", param) - } - - /// Changes tries are not supported. - ChangesTriesNotSupported { - description("changes tries are not supported"), - display("Changes tries are not supported by the runtime"), - } - - /// Key changes query has failed. - ChangesTrieAccessFailed(e: String) { - description("invalid changes proof"), - display("Failed to check changes proof: {}", e), - } - - /// Last finalized block not parent of current. - NonSequentialFinalization(s: String) { - description("Did not finalize blocks in sequential order."), - display("Did not finalize blocks in sequential order."), - } - - /// Safety violation: new best block not descendent of last finalized. - NotInFinalizedChain { - description("Potential long-range attack: block not in finalized chain."), - display("Potential long-range attack: block not in finalized chain."), - } - - /// Hash that is required for building CHT is missing. - MissingHashRequiredForCHT(cht_num: u64, block_number: u64) { - description("missed hash required for building CHT"), - display("Failed to get hash of block#{} for building CHT#{}", block_number, cht_num), - } - } + links { + Consensus(consensus::Error, consensus::ErrorKind); + } + errors { + /// Backend error. + Backend(s: String) { + description("Unrecoverable backend error"), + display("Backend error: {}", s), + } + + /// Unknown block. + UnknownBlock(h: String) { + description("unknown block"), + display("UnknownBlock: {}", &*h), + } + + /// Applying extrinsic error. + ApplyExtrinsicFailed(e: ApplyError) { + description("Extrinsic error"), + display("Extrinsic error: {:?}", e), + } + + /// Execution error. + Execution(e: Box) { + description("execution error"), + display("Execution: {}", e), + } + + /// Blockchain error. + Blockchain(e: Box) { + description("Blockchain error"), + display("Blockchain: {}", e), + } + + /// Could not get runtime version. + VersionInvalid { + description("Runtime version error"), + display("On-chain runtime does not specify version"), + } + + /// Genesis config is invalid. + GenesisInvalid { + description("Genesis config error"), + display("Genesis config provided is invalid"), + } + + /// Bad justification for header. + BadJustification(h: String) { + description("bad justification for header"), + display("bad justification for header: {}", &*h), + } + + /// Not available on light client. + NotAvailableOnLightClient { + description("not available on light client"), + display("This method is not currently available when running in light client mode"), + } + + /// Invalid remote CHT-based proof. + InvalidCHTProof { + description("invalid header proof"), + display("Remote node has responded with invalid header proof"), + } + + /// Remote fetch has been cancelled. + RemoteFetchCancelled { + description("remote fetch cancelled"), + display("Remote data fetch has been cancelled"), + } + + /// Remote fetch has been failed. + RemoteFetchFailed { + description("remote fetch failed"), + display("Remote data fetch has been failed"), + } + + /// Error decoding call result. + CallResultDecode(method: &'static str) { + description("Error decoding call result") + display("Error decoding call result of {}", method) + } + + /// Error converting a parameter between runtime and node. + RuntimeParamConversion(param: &'static str) { + description("Error converting parameter between runtime and node") + display("Error converting `{}` between runtime and node", param) + } + + /// Changes tries are not supported. + ChangesTriesNotSupported { + description("changes tries are not supported"), + display("Changes tries are not supported by the runtime"), + } + + /// Key changes query has failed. + ChangesTrieAccessFailed(e: String) { + description("invalid changes proof"), + display("Failed to check changes proof: {}", e), + } + + /// Last finalized block not parent of current. + NonSequentialFinalization(s: String) { + description("Did not finalize blocks in sequential order."), + display("Did not finalize blocks in sequential order."), + } + + /// Safety violation: new best block not descendent of last finalized. + NotInFinalizedChain { + description("Potential long-range attack: block not in finalized chain."), + display("Potential long-range attack: block not in finalized chain."), + } + + /// Hash that is required for building CHT is missing. + MissingHashRequiredForCHT(cht_num: u64, block_number: u64) { + description("missed hash required for building CHT"), + display("Failed to get hash of block#{} for building CHT#{}", block_number, cht_num), + } + } } impl From> for Error { - fn from(e: Box) -> Self { - ErrorKind::Execution(e).into() - } + fn from(e: Box) -> Self { + ErrorKind::Execution(e).into() + } } impl From for Error { - fn from(e: state_machine::backend::Void) -> Self { - match e {} - } + fn from(e: state_machine::backend::Void) -> Self { + match e {} + } } impl Error { - /// Chain a blockchain error. - pub fn from_blockchain(e: Box) -> Self { - ErrorKind::Blockchain(e).into() - } - - /// Chain a state error. - pub fn from_state(e: Box) -> Self { - ErrorKind::Execution(e).into() - } + /// Chain a blockchain error. + pub fn from_blockchain(e: Box) -> Self { + ErrorKind::Blockchain(e).into() + } + + /// Chain a state error. + pub fn from_state(e: Box) -> Self { + ErrorKind::Execution(e).into() + } } impl state_machine::Error for Error {} diff --git a/core/client/src/genesis.rs b/core/client/src/genesis.rs index eea0a251ca..79fc25ca04 100644 --- a/core/client/src/genesis.rs +++ b/core/client/src/genesis.rs @@ -16,219 +16,234 @@ //! Tool for creating the genesis block. -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Hash as HashT, Zero}; +use runtime_primitives::traits::{Block as BlockT, Hash as HashT, Header as HeaderT, Zero}; /// Create a genesis block, given the initial storage. -pub fn construct_genesis_block< - Block: BlockT -> ( - state_root: Block::Hash -) -> Block { - let extrinsics_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root(::std::iter::empty::<(&[u8], &[u8])>()); - Block::new( - <::Header as HeaderT>::new( - Zero::zero(), - extrinsics_root, - state_root, - Default::default(), - Default::default() - ), - Default::default() - ) +pub fn construct_genesis_block(state_root: Block::Hash) -> Block { + let extrinsics_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( + ::std::iter::empty::<(&[u8], &[u8])>(), + ); + Block::new( + <::Header as HeaderT>::new( + Zero::zero(), + extrinsics_root, + state_root, + Default::default(), + Default::default(), + ), + Default::default(), + ) } #[cfg(test)] mod tests { - use super::*; - use parity_codec::{Encode, Decode, Joiner}; - use executor::{NativeExecutionDispatch, native_executor_instance}; - use state_machine::{self, OverlayedChanges, ExecutionStrategy, InMemoryChangesTrieStorage}; - use state_machine::backend::InMemory; - use test_client::{ - runtime::genesismap::{GenesisConfig, additional_storage_with_genesis}, - runtime::{Hash, Transfer, Block, BlockNumber, Header, Digest, Extrinsic}, - AccountKeyring, AuthorityKeyring - }; - use runtime_primitives::traits::BlakeTwo256; - use primitives::Blake2Hasher; - use hex::*; - - native_executor_instance!(Executor, test_client::runtime::api::dispatch, test_client::runtime::native_version, include_bytes!("../../test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm")); - - fn executor() -> executor::NativeExecutor { - NativeExecutionDispatch::new(None) - } - - fn construct_block( - backend: &InMemory, - number: BlockNumber, - parent_hash: Hash, - state_root: Hash, - txs: Vec - ) -> (Vec, Hash) { - use trie::ordered_trie_root; - - let transactions = txs.into_iter().map(|tx| { - let signature = AccountKeyring::from_public(&tx.from).unwrap() - .sign(&tx.encode()).into(); - - Extrinsic::Transfer(tx, signature) - }).collect::>(); - - let extrinsics_root = ordered_trie_root::(transactions.iter().map(Encode::encode)).into(); - - let mut header = Header { - parent_hash, - number, - state_root, - extrinsics_root, - digest: Digest { logs: vec![], }, - }; - let hash = header.hash(); - let mut overlay = OverlayedChanges::default(); - - state_machine::new( - backend, - Some(&InMemoryChangesTrieStorage::new()), - state_machine::NeverOffchainExt::new(), - &mut overlay, - &executor(), - "Core_initialize_block", - &header.encode(), - ).execute( - ExecutionStrategy::NativeElseWasm, - ).unwrap(); - - for tx in transactions.iter() { - state_machine::new( - backend, - Some(&InMemoryChangesTrieStorage::new()), - state_machine::NeverOffchainExt::new(), - &mut overlay, - &executor(), - "BlockBuilder_apply_extrinsic", - &tx.encode(), - ).execute( - ExecutionStrategy::NativeElseWasm, - ).unwrap(); - } - - let (ret_data, _, _) = state_machine::new( - backend, - Some(&InMemoryChangesTrieStorage::new()), - state_machine::NeverOffchainExt::new(), - &mut overlay, - &executor(), - "BlockBuilder_finalize_block", - &[], - ).execute( - ExecutionStrategy::NativeElseWasm, - ).unwrap(); - header = Header::decode(&mut &ret_data[..]).unwrap(); - - (vec![].and(&Block { header, extrinsics: transactions }), hash) - } - - fn block1(genesis_hash: Hash, backend: &InMemory) -> (Vec, Hash) { - construct_block( - backend, - 1, - genesis_hash, - hex!("25e5b37074063ab75c889326246640729b40d0c86932edc527bc80db0e04fe5c").into(), - vec![Transfer { - from: AccountKeyring::One.into(), - to: AccountKeyring::Two.into(), - amount: 69, - nonce: 0, - }] - ) - } - - #[test] - fn construct_genesis_should_work_with_native() { - let mut storage = GenesisConfig::new(false, - vec![AuthorityKeyring::One.into(), AuthorityKeyring::Two.into()], - vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], - 1000 - ).genesis_map(); - let state_root = BlakeTwo256::trie_root(storage.clone().into_iter()); - let block = construct_genesis_block::(state_root); - let genesis_hash = block.header.hash(); - storage.extend(additional_storage_with_genesis(&block).into_iter()); - - let backend = InMemory::from(storage); - let (b1data, _b1hash) = block1(genesis_hash, &backend); - - let mut overlay = OverlayedChanges::default(); - let _ = state_machine::new( - &backend, - Some(&InMemoryChangesTrieStorage::new()), - state_machine::NeverOffchainExt::new(), - &mut overlay, - &executor(), - "Core_execute_block", - &b1data, - ).execute( - ExecutionStrategy::NativeElseWasm, - ).unwrap(); - } - - #[test] - fn construct_genesis_should_work_with_wasm() { - let mut storage = GenesisConfig::new(false, - vec![AuthorityKeyring::One.into(), AuthorityKeyring::Two.into()], - vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], - 1000 - ).genesis_map(); - let state_root = BlakeTwo256::trie_root(storage.clone().into_iter()); - let block = construct_genesis_block::(state_root); - let genesis_hash = block.header.hash(); - storage.extend(additional_storage_with_genesis(&block).into_iter()); - - let backend = InMemory::from(storage); - let (b1data, _b1hash) = block1(genesis_hash, &backend); - - let mut overlay = OverlayedChanges::default(); - let _ = state_machine::new( - &backend, - Some(&InMemoryChangesTrieStorage::new()), - state_machine::NeverOffchainExt::new(), - &mut overlay, - &executor(), - "Core_execute_block", - &b1data, - ).execute( - ExecutionStrategy::AlwaysWasm, - ).unwrap(); - } - - #[test] - fn construct_genesis_with_bad_transaction_should_panic() { - let mut storage = GenesisConfig::new(false, - vec![AuthorityKeyring::One.into(), AuthorityKeyring::Two.into()], - vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], - 68 - ).genesis_map(); - let state_root = BlakeTwo256::trie_root(storage.clone().into_iter()); - let block = construct_genesis_block::(state_root); - let genesis_hash = block.header.hash(); - storage.extend(additional_storage_with_genesis(&block).into_iter()); - - let backend = InMemory::from(storage); - let (b1data, _b1hash) = block1(genesis_hash, &backend); - - let mut overlay = OverlayedChanges::default(); - let r = state_machine::new( - &backend, - Some(&InMemoryChangesTrieStorage::new()), - state_machine::NeverOffchainExt::new(), - &mut overlay, - &Executor::new(None), - "Core_execute_block", - &b1data, - ).execute( - ExecutionStrategy::NativeElseWasm, - ); - assert!(r.is_err()); - } + use super::*; + use executor::{native_executor_instance, NativeExecutionDispatch}; + use hex::*; + use parity_codec::{Decode, Encode, Joiner}; + use primitives::Blake2Hasher; + use runtime_primitives::traits::BlakeTwo256; + use state_machine::backend::InMemory; + use state_machine::{self, ExecutionStrategy, InMemoryChangesTrieStorage, OverlayedChanges}; + use test_client::{ + runtime::genesismap::{additional_storage_with_genesis, GenesisConfig}, + runtime::{Block, BlockNumber, Digest, Extrinsic, Hash, Header, Transfer}, + AccountKeyring, AuthorityKeyring, + }; + + native_executor_instance!(Executor, test_client::runtime::api::dispatch, test_client::runtime::native_version, include_bytes!("../../test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm")); + + fn executor() -> executor::NativeExecutor { + NativeExecutionDispatch::new(None) + } + + fn construct_block( + backend: &InMemory, + number: BlockNumber, + parent_hash: Hash, + state_root: Hash, + txs: Vec, + ) -> (Vec, Hash) { + use trie::ordered_trie_root; + + let transactions = txs + .into_iter() + .map(|tx| { + let signature = AccountKeyring::from_public(&tx.from) + .unwrap() + .sign(&tx.encode()) + .into(); + + Extrinsic::Transfer(tx, signature) + }) + .collect::>(); + + let extrinsics_root = + ordered_trie_root::(transactions.iter().map(Encode::encode)).into(); + + let mut header = Header { + parent_hash, + number, + state_root, + extrinsics_root, + digest: Digest { logs: vec![] }, + }; + let hash = header.hash(); + let mut overlay = OverlayedChanges::default(); + + state_machine::new( + backend, + Some(&InMemoryChangesTrieStorage::new()), + state_machine::NeverOffchainExt::new(), + &mut overlay, + &executor(), + "Core_initialize_block", + &header.encode(), + ) + .execute(ExecutionStrategy::NativeElseWasm) + .unwrap(); + + for tx in transactions.iter() { + state_machine::new( + backend, + Some(&InMemoryChangesTrieStorage::new()), + state_machine::NeverOffchainExt::new(), + &mut overlay, + &executor(), + "BlockBuilder_apply_extrinsic", + &tx.encode(), + ) + .execute(ExecutionStrategy::NativeElseWasm) + .unwrap(); + } + + let (ret_data, _, _) = state_machine::new( + backend, + Some(&InMemoryChangesTrieStorage::new()), + state_machine::NeverOffchainExt::new(), + &mut overlay, + &executor(), + "BlockBuilder_finalize_block", + &[], + ) + .execute(ExecutionStrategy::NativeElseWasm) + .unwrap(); + header = Header::decode(&mut &ret_data[..]).unwrap(); + + ( + vec![].and(&Block { + header, + extrinsics: transactions, + }), + hash, + ) + } + + fn block1(genesis_hash: Hash, backend: &InMemory) -> (Vec, Hash) { + construct_block( + backend, + 1, + genesis_hash, + hex!("25e5b37074063ab75c889326246640729b40d0c86932edc527bc80db0e04fe5c").into(), + vec![Transfer { + from: AccountKeyring::One.into(), + to: AccountKeyring::Two.into(), + amount: 69, + nonce: 0, + }], + ) + } + + #[test] + fn construct_genesis_should_work_with_native() { + let mut storage = GenesisConfig::new( + false, + vec![AuthorityKeyring::One.into(), AuthorityKeyring::Two.into()], + vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], + 1000, + ) + .genesis_map(); + let state_root = BlakeTwo256::trie_root(storage.clone().into_iter()); + let block = construct_genesis_block::(state_root); + let genesis_hash = block.header.hash(); + storage.extend(additional_storage_with_genesis(&block).into_iter()); + + let backend = InMemory::from(storage); + let (b1data, _b1hash) = block1(genesis_hash, &backend); + + let mut overlay = OverlayedChanges::default(); + let _ = state_machine::new( + &backend, + Some(&InMemoryChangesTrieStorage::new()), + state_machine::NeverOffchainExt::new(), + &mut overlay, + &executor(), + "Core_execute_block", + &b1data, + ) + .execute(ExecutionStrategy::NativeElseWasm) + .unwrap(); + } + + #[test] + fn construct_genesis_should_work_with_wasm() { + let mut storage = GenesisConfig::new( + false, + vec![AuthorityKeyring::One.into(), AuthorityKeyring::Two.into()], + vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], + 1000, + ) + .genesis_map(); + let state_root = BlakeTwo256::trie_root(storage.clone().into_iter()); + let block = construct_genesis_block::(state_root); + let genesis_hash = block.header.hash(); + storage.extend(additional_storage_with_genesis(&block).into_iter()); + + let backend = InMemory::from(storage); + let (b1data, _b1hash) = block1(genesis_hash, &backend); + + let mut overlay = OverlayedChanges::default(); + let _ = state_machine::new( + &backend, + Some(&InMemoryChangesTrieStorage::new()), + state_machine::NeverOffchainExt::new(), + &mut overlay, + &executor(), + "Core_execute_block", + &b1data, + ) + .execute(ExecutionStrategy::AlwaysWasm) + .unwrap(); + } + + #[test] + fn construct_genesis_with_bad_transaction_should_panic() { + let mut storage = GenesisConfig::new( + false, + vec![AuthorityKeyring::One.into(), AuthorityKeyring::Two.into()], + vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], + 68, + ) + .genesis_map(); + let state_root = BlakeTwo256::trie_root(storage.clone().into_iter()); + let block = construct_genesis_block::(state_root); + let genesis_hash = block.header.hash(); + storage.extend(additional_storage_with_genesis(&block).into_iter()); + + let backend = InMemory::from(storage); + let (b1data, _b1hash) = block1(genesis_hash, &backend); + + let mut overlay = OverlayedChanges::default(); + let r = state_machine::new( + &backend, + Some(&InMemoryChangesTrieStorage::new()), + state_machine::NeverOffchainExt::new(), + &mut overlay, + &Executor::new(None), + "Core_execute_block", + &b1data, + ) + .execute(ExecutionStrategy::NativeElseWasm); + assert!(r.is_err()); + } } diff --git a/core/client/src/in_mem.rs b/core/client/src/in_mem.rs index 29256169f9..bc4fd7ff37 100644 --- a/core/client/src/in_mem.rs +++ b/core/client/src/in_mem.rs @@ -16,736 +16,883 @@ //! In memory client backend -use std::collections::HashMap; -use std::sync::Arc; -use parking_lot::RwLock; -use primitives::{ChangesTrieConfiguration, storage::well_known_keys}; -use runtime_primitives::generic::BlockId; -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Zero, - NumberFor, As, Digest, DigestItem}; -use runtime_primitives::{Justification, StorageOverlay, ChildrenStorageOverlay}; -use state_machine::backend::{Backend as StateBackend, InMemory, Consolidate}; -use state_machine::{self, InMemoryChangesTrieStorage, ChangesTrieAnchorBlockId}; +use consensus::well_known_cache_keys::Id as CacheKeyId; use hash_db::Hasher; use heapsize::HeapSizeOf; +use parking_lot::RwLock; +use primitives::{storage::well_known_keys, ChangesTrieConfiguration}; +use runtime_primitives::generic::BlockId; +use runtime_primitives::traits::{ + As, Block as BlockT, Digest, DigestItem, Header as HeaderT, NumberFor, Zero, +}; +use runtime_primitives::{ChildrenStorageOverlay, Justification, StorageOverlay}; +use state_machine::backend::{Backend as StateBackend, Consolidate, InMemory}; +use state_machine::{self, ChangesTrieAnchorBlockId, InMemoryChangesTrieStorage}; +use std::collections::HashMap; +use std::sync::Arc; use trie::MemoryDB; -use consensus::well_known_cache_keys::Id as CacheKeyId; -use crate::error; use crate::backend::{self, NewBlockState}; -use crate::light; -use crate::leaves::LeafSet; use crate::blockchain::{self, BlockStatus, HeaderBackend}; +use crate::error; +use crate::leaves::LeafSet; +use crate::light; struct PendingBlock { - block: StoredBlock, - state: NewBlockState, + block: StoredBlock, + state: NewBlockState, } #[derive(PartialEq, Eq, Clone)] enum StoredBlock { - Header(B::Header, Option), - Full(B, Option), + Header(B::Header, Option), + Full(B, Option), } impl StoredBlock { - fn new(header: B::Header, body: Option>, just: Option) -> Self { - match body { - Some(body) => StoredBlock::Full(B::new(header, body), just), - None => StoredBlock::Header(header, just), - } - } - - fn header(&self) -> &B::Header { - match *self { - StoredBlock::Header(ref h, _) => h, - StoredBlock::Full(ref b, _) => b.header(), - } - } - - fn justification(&self) -> Option<&Justification> { - match *self { - StoredBlock::Header(_, ref j) | StoredBlock::Full(_, ref j) => j.as_ref() - } - } - - fn extrinsics(&self) -> Option<&[B::Extrinsic]> { - match *self { - StoredBlock::Header(_, _) => None, - StoredBlock::Full(ref b, _) => Some(b.extrinsics()), - } - } - - fn into_inner(self) -> (B::Header, Option>, Option) { - match self { - StoredBlock::Header(header, just) => (header, None, just), - StoredBlock::Full(block, just) => { - let (header, body) = block.deconstruct(); - (header, Some(body), just) - } - } - } + fn new( + header: B::Header, + body: Option>, + just: Option, + ) -> Self { + match body { + Some(body) => StoredBlock::Full(B::new(header, body), just), + None => StoredBlock::Header(header, just), + } + } + + fn header(&self) -> &B::Header { + match *self { + StoredBlock::Header(ref h, _) => h, + StoredBlock::Full(ref b, _) => b.header(), + } + } + + fn justification(&self) -> Option<&Justification> { + match *self { + StoredBlock::Header(_, ref j) | StoredBlock::Full(_, ref j) => j.as_ref(), + } + } + + fn extrinsics(&self) -> Option<&[B::Extrinsic]> { + match *self { + StoredBlock::Header(_, _) => None, + StoredBlock::Full(ref b, _) => Some(b.extrinsics()), + } + } + + fn into_inner(self) -> (B::Header, Option>, Option) { + match self { + StoredBlock::Header(header, just) => (header, None, just), + StoredBlock::Full(block, just) => { + let (header, body) = block.deconstruct(); + (header, Some(body), just) + } + } + } } #[derive(Clone)] struct BlockchainStorage { - blocks: HashMap>, - hashes: HashMap, Block::Hash>, - best_hash: Block::Hash, - best_number: NumberFor, - finalized_hash: Block::Hash, - finalized_number: NumberFor, - genesis_hash: Block::Hash, - header_cht_roots: HashMap, Block::Hash>, - changes_trie_cht_roots: HashMap, Block::Hash>, - leaves: LeafSet>, - aux: HashMap, Vec>, + blocks: HashMap>, + hashes: HashMap, Block::Hash>, + best_hash: Block::Hash, + best_number: NumberFor, + finalized_hash: Block::Hash, + finalized_number: NumberFor, + genesis_hash: Block::Hash, + header_cht_roots: HashMap, Block::Hash>, + changes_trie_cht_roots: HashMap, Block::Hash>, + leaves: LeafSet>, + aux: HashMap, Vec>, } /// In-memory blockchain. Supports concurrent reads. pub struct Blockchain { - storage: Arc>>, + storage: Arc>>, } impl Clone for Blockchain { - fn clone(&self) -> Self { - let storage = Arc::new(RwLock::new(self.storage.read().clone())); - Blockchain { - storage: storage.clone(), - } - } + fn clone(&self) -> Self { + let storage = Arc::new(RwLock::new(self.storage.read().clone())); + Blockchain { + storage: storage.clone(), + } + } } impl Blockchain { - /// Get header hash of given block. - pub fn id(&self, id: BlockId) -> Option { - match id { - BlockId::Hash(h) => Some(h), - BlockId::Number(n) => self.storage.read().hashes.get(&n).cloned(), - } - } - - /// Create new in-memory blockchain storage. - pub fn new() -> Blockchain { - let storage = Arc::new(RwLock::new( - BlockchainStorage { - blocks: HashMap::new(), - hashes: HashMap::new(), - best_hash: Default::default(), - best_number: Zero::zero(), - finalized_hash: Default::default(), - finalized_number: Zero::zero(), - genesis_hash: Default::default(), - header_cht_roots: HashMap::new(), - changes_trie_cht_roots: HashMap::new(), - leaves: LeafSet::new(), - aux: HashMap::new(), - })); - Blockchain { - storage: storage.clone(), - } - } - - /// Insert a block header and associated data. - pub fn insert( - &self, - hash: Block::Hash, - header: ::Header, - justification: Option, - body: Option::Extrinsic>>, - new_state: NewBlockState, - ) -> crate::error::Result<()> { - let number = header.number().clone(); - if new_state.is_best() { - self.apply_head(&header)?; - } - - { - let mut storage = self.storage.write(); - storage.leaves.import(hash.clone(), number.clone(), header.parent_hash().clone()); - storage.blocks.insert(hash.clone(), StoredBlock::new(header, body, justification)); - - if let NewBlockState::Final = new_state { - storage.finalized_hash = hash; - storage.finalized_number = number.clone(); - } - - if number == Zero::zero() { - storage.genesis_hash = hash; - } - } - - Ok(()) - } - - /// Get total number of blocks. - pub fn blocks_count(&self) -> usize { - self.storage.read().blocks.len() - } - - /// Compare this blockchain with another in-mem blockchain - pub fn equals_to(&self, other: &Self) -> bool { - self.canon_equals_to(other) && self.storage.read().blocks == other.storage.read().blocks - } - - /// Compare canonical chain to other canonical chain. - pub fn canon_equals_to(&self, other: &Self) -> bool { - let this = self.storage.read(); - let other = other.storage.read(); - this.hashes == other.hashes - && this.best_hash == other.best_hash - && this.best_number == other.best_number - && this.genesis_hash == other.genesis_hash - } - - /// Insert header CHT root. - pub fn insert_cht_root(&self, block: NumberFor, cht_root: Block::Hash) { - self.storage.write().header_cht_roots.insert(block, cht_root); - } - - /// Set an existing block as head. - pub fn set_head(&self, id: BlockId) -> error::Result<()> { - let header = match self.header(id)? { - Some(h) => h, - None => return Err(error::ErrorKind::UnknownBlock(format!("{}", id)).into()), - }; - - self.apply_head(&header) - } - - fn apply_head(&self, header: &::Header) -> error::Result<()> { - let hash = header.hash(); - let number = header.number(); - - // Note: this may lock storage, so it must happen before obtaining storage - // write lock. - let best_tree_route = { - let best_hash = self.storage.read().best_hash; - if &best_hash == header.parent_hash() { - None - } else { - let route = crate::blockchain::tree_route( - self, - BlockId::Hash(best_hash), - BlockId::Hash(*header.parent_hash()), - )?; - Some(route) - } - }; - - let mut storage = self.storage.write(); - - if let Some(tree_route) = best_tree_route { - // apply retraction and enaction when reorganizing up to parent hash - let enacted = tree_route.enacted(); - - for entry in enacted { - storage.hashes.insert(entry.number, entry.hash); - } - - for entry in tree_route.retracted().iter().skip(enacted.len()) { - storage.hashes.remove(&entry.number); - } - } - - storage.best_hash = hash.clone(); - storage.best_number = number.clone(); - storage.hashes.insert(number.clone(), hash.clone()); - - Ok(()) - } - - fn finalize_header(&self, id: BlockId, justification: Option) -> error::Result<()> { - let hash = match self.header(id)? { - Some(h) => h.hash(), - None => return Err(error::ErrorKind::UnknownBlock(format!("{}", id)).into()), - }; - - let mut storage = self.storage.write(); - storage.finalized_hash = hash; - - if justification.is_some() { - let block = storage.blocks.get_mut(&hash) - .expect("hash was fetched from a block in the db; qed"); - - let block_justification = match block { - StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j - }; - - *block_justification = justification; - } - - Ok(()) - } - - fn write_aux(&self, ops: Vec<(Vec, Option>)>) { - let mut storage = self.storage.write(); - for (k, v) in ops { - match v { - Some(v) => storage.aux.insert(k, v), - None => storage.aux.remove(&k), - }; - } - } + /// Get header hash of given block. + pub fn id(&self, id: BlockId) -> Option { + match id { + BlockId::Hash(h) => Some(h), + BlockId::Number(n) => self.storage.read().hashes.get(&n).cloned(), + } + } + + /// Create new in-memory blockchain storage. + pub fn new() -> Blockchain { + let storage = Arc::new(RwLock::new(BlockchainStorage { + blocks: HashMap::new(), + hashes: HashMap::new(), + best_hash: Default::default(), + best_number: Zero::zero(), + finalized_hash: Default::default(), + finalized_number: Zero::zero(), + genesis_hash: Default::default(), + header_cht_roots: HashMap::new(), + changes_trie_cht_roots: HashMap::new(), + leaves: LeafSet::new(), + aux: HashMap::new(), + })); + Blockchain { + storage: storage.clone(), + } + } + + /// Insert a block header and associated data. + pub fn insert( + &self, + hash: Block::Hash, + header: ::Header, + justification: Option, + body: Option::Extrinsic>>, + new_state: NewBlockState, + ) -> crate::error::Result<()> { + let number = header.number().clone(); + if new_state.is_best() { + self.apply_head(&header)?; + } + + { + let mut storage = self.storage.write(); + storage + .leaves + .import(hash.clone(), number.clone(), header.parent_hash().clone()); + storage + .blocks + .insert(hash.clone(), StoredBlock::new(header, body, justification)); + + if let NewBlockState::Final = new_state { + storage.finalized_hash = hash; + storage.finalized_number = number.clone(); + } + + if number == Zero::zero() { + storage.genesis_hash = hash; + } + } + + Ok(()) + } + + /// Get total number of blocks. + pub fn blocks_count(&self) -> usize { + self.storage.read().blocks.len() + } + + /// Compare this blockchain with another in-mem blockchain + pub fn equals_to(&self, other: &Self) -> bool { + self.canon_equals_to(other) && self.storage.read().blocks == other.storage.read().blocks + } + + /// Compare canonical chain to other canonical chain. + pub fn canon_equals_to(&self, other: &Self) -> bool { + let this = self.storage.read(); + let other = other.storage.read(); + this.hashes == other.hashes + && this.best_hash == other.best_hash + && this.best_number == other.best_number + && this.genesis_hash == other.genesis_hash + } + + /// Insert header CHT root. + pub fn insert_cht_root(&self, block: NumberFor, cht_root: Block::Hash) { + self.storage + .write() + .header_cht_roots + .insert(block, cht_root); + } + + /// Set an existing block as head. + pub fn set_head(&self, id: BlockId) -> error::Result<()> { + let header = match self.header(id)? { + Some(h) => h, + None => return Err(error::ErrorKind::UnknownBlock(format!("{}", id)).into()), + }; + + self.apply_head(&header) + } + + fn apply_head(&self, header: &::Header) -> error::Result<()> { + let hash = header.hash(); + let number = header.number(); + + // Note: this may lock storage, so it must happen before obtaining storage + // write lock. + let best_tree_route = { + let best_hash = self.storage.read().best_hash; + if &best_hash == header.parent_hash() { + None + } else { + let route = crate::blockchain::tree_route( + self, + BlockId::Hash(best_hash), + BlockId::Hash(*header.parent_hash()), + )?; + Some(route) + } + }; + + let mut storage = self.storage.write(); + + if let Some(tree_route) = best_tree_route { + // apply retraction and enaction when reorganizing up to parent hash + let enacted = tree_route.enacted(); + + for entry in enacted { + storage.hashes.insert(entry.number, entry.hash); + } + + for entry in tree_route.retracted().iter().skip(enacted.len()) { + storage.hashes.remove(&entry.number); + } + } + + storage.best_hash = hash.clone(); + storage.best_number = number.clone(); + storage.hashes.insert(number.clone(), hash.clone()); + + Ok(()) + } + + fn finalize_header( + &self, + id: BlockId, + justification: Option, + ) -> error::Result<()> { + let hash = match self.header(id)? { + Some(h) => h.hash(), + None => return Err(error::ErrorKind::UnknownBlock(format!("{}", id)).into()), + }; + + let mut storage = self.storage.write(); + storage.finalized_hash = hash; + + if justification.is_some() { + let block = storage + .blocks + .get_mut(&hash) + .expect("hash was fetched from a block in the db; qed"); + + let block_justification = match block { + StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j, + }; + + *block_justification = justification; + } + + Ok(()) + } + + fn write_aux(&self, ops: Vec<(Vec, Option>)>) { + let mut storage = self.storage.write(); + for (k, v) in ops { + match v { + Some(v) => storage.aux.insert(k, v), + None => storage.aux.remove(&k), + }; + } + } } impl HeaderBackend for Blockchain { - fn header(&self, id: BlockId) -> error::Result::Header>> { - Ok(self.id(id).and_then(|hash| { - self.storage.read().blocks.get(&hash).map(|b| b.header().clone()) - })) - } - - fn info(&self) -> error::Result> { - let storage = self.storage.read(); - Ok(blockchain::Info { - best_hash: storage.best_hash, - best_number: storage.best_number, - genesis_hash: storage.genesis_hash, - finalized_hash: storage.finalized_hash, - finalized_number: storage.finalized_number, - }) - } - - fn status(&self, id: BlockId) -> error::Result { - match self.id(id).map_or(false, |hash| self.storage.read().blocks.contains_key(&hash)) { - true => Ok(BlockStatus::InChain), - false => Ok(BlockStatus::Unknown), - } - } - - fn number(&self, hash: Block::Hash) -> error::Result>> { - Ok(self.storage.read().blocks.get(&hash).map(|b| *b.header().number())) - } - - fn hash(&self, number: <::Header as HeaderT>::Number) -> error::Result> { - Ok(self.id(BlockId::Number(number))) - } + fn header(&self, id: BlockId) -> error::Result::Header>> { + Ok(self.id(id).and_then(|hash| { + self.storage + .read() + .blocks + .get(&hash) + .map(|b| b.header().clone()) + })) + } + + fn info(&self) -> error::Result> { + let storage = self.storage.read(); + Ok(blockchain::Info { + best_hash: storage.best_hash, + best_number: storage.best_number, + genesis_hash: storage.genesis_hash, + finalized_hash: storage.finalized_hash, + finalized_number: storage.finalized_number, + }) + } + + fn status(&self, id: BlockId) -> error::Result { + match self + .id(id) + .map_or(false, |hash| self.storage.read().blocks.contains_key(&hash)) + { + true => Ok(BlockStatus::InChain), + false => Ok(BlockStatus::Unknown), + } + } + + fn number(&self, hash: Block::Hash) -> error::Result>> { + Ok(self + .storage + .read() + .blocks + .get(&hash) + .map(|b| *b.header().number())) + } + + fn hash( + &self, + number: <::Header as HeaderT>::Number, + ) -> error::Result> { + Ok(self.id(BlockId::Number(number))) + } } - impl blockchain::Backend for Blockchain { - fn body(&self, id: BlockId) -> error::Result::Extrinsic>>> { - Ok(self.id(id).and_then(|hash| { - self.storage.read().blocks.get(&hash) - .and_then(|b| b.extrinsics().map(|x| x.to_vec())) - })) - } - - fn justification(&self, id: BlockId) -> error::Result> { - Ok(self.id(id).and_then(|hash| self.storage.read().blocks.get(&hash).and_then(|b| - b.justification().map(|x| x.clone())) - )) - } - - fn last_finalized(&self) -> error::Result { - Ok(self.storage.read().finalized_hash.clone()) - } - - fn cache(&self) -> Option>> { - None - } - - fn leaves(&self) -> error::Result> { - Ok(self.storage.read().leaves.hashes()) - } - - fn children(&self, _parent_hash: Block::Hash) -> error::Result> { - unimplemented!() - } + fn body(&self, id: BlockId) -> error::Result::Extrinsic>>> { + Ok(self.id(id).and_then(|hash| { + self.storage + .read() + .blocks + .get(&hash) + .and_then(|b| b.extrinsics().map(|x| x.to_vec())) + })) + } + + fn justification(&self, id: BlockId) -> error::Result> { + Ok(self.id(id).and_then(|hash| { + self.storage + .read() + .blocks + .get(&hash) + .and_then(|b| b.justification().map(|x| x.clone())) + })) + } + + fn last_finalized(&self) -> error::Result { + Ok(self.storage.read().finalized_hash.clone()) + } + + fn cache(&self) -> Option>> { + None + } + + fn leaves(&self) -> error::Result> { + Ok(self.storage.read().leaves.hashes()) + } + + fn children(&self, _parent_hash: Block::Hash) -> error::Result> { + unimplemented!() + } } impl blockchain::ProvideCache for Blockchain { - fn cache(&self) -> Option>> { - None - } + fn cache(&self) -> Option>> { + None + } } impl backend::AuxStore for Blockchain { - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> error::Result<()> { - let mut storage = self.storage.write(); - for (k, v) in insert { - storage.aux.insert(k.to_vec(), v.to_vec()); - } - for k in delete { - storage.aux.remove(*k); - } - Ok(()) - } - - fn get_aux(&self, key: &[u8]) -> error::Result>> { - Ok(self.storage.read().aux.get(key).cloned()) - } + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> error::Result<()> { + let mut storage = self.storage.write(); + for (k, v) in insert { + storage.aux.insert(k.to_vec(), v.to_vec()); + } + for k in delete { + storage.aux.remove(*k); + } + Ok(()) + } + + fn get_aux(&self, key: &[u8]) -> error::Result>> { + Ok(self.storage.read().aux.get(key).cloned()) + } } impl light::blockchain::Storage for Blockchain - where - Block::Hash: From<[u8; 32]>, +where + Block::Hash: From<[u8; 32]>, { - fn import_header( - &self, - header: Block::Header, - _cache: HashMap>, - state: NewBlockState, - aux_ops: Vec<(Vec, Option>)>, - ) -> error::Result<()> { - let hash = header.hash(); - self.insert(hash, header, None, None, state)?; - - self.write_aux(aux_ops); - Ok(()) - } - - fn set_head(&self, id: BlockId) -> error::Result<()> { - Blockchain::set_head(self, id) - } - - fn last_finalized(&self) -> error::Result { - Ok(self.storage.read().finalized_hash.clone()) - } - - fn finalize_header(&self, id: BlockId) -> error::Result<()> { - Blockchain::finalize_header(self, id, None) - } - - fn header_cht_root(&self, _cht_size: u64, block: NumberFor) -> error::Result { - self.storage.read().header_cht_roots.get(&block).cloned() - .ok_or_else(|| error::ErrorKind::Backend(format!("Header CHT for block {} not exists", block)).into()) - } - - fn changes_trie_cht_root(&self, _cht_size: u64, block: NumberFor) -> error::Result { - self.storage.read().changes_trie_cht_roots.get(&block).cloned() - .ok_or_else(|| error::ErrorKind::Backend(format!("Changes trie CHT for block {} not exists", block)).into()) - } - - fn cache(&self) -> Option>> { - None - } + fn import_header( + &self, + header: Block::Header, + _cache: HashMap>, + state: NewBlockState, + aux_ops: Vec<(Vec, Option>)>, + ) -> error::Result<()> { + let hash = header.hash(); + self.insert(hash, header, None, None, state)?; + + self.write_aux(aux_ops); + Ok(()) + } + + fn set_head(&self, id: BlockId) -> error::Result<()> { + Blockchain::set_head(self, id) + } + + fn last_finalized(&self) -> error::Result { + Ok(self.storage.read().finalized_hash.clone()) + } + + fn finalize_header(&self, id: BlockId) -> error::Result<()> { + Blockchain::finalize_header(self, id, None) + } + + fn header_cht_root( + &self, + _cht_size: u64, + block: NumberFor, + ) -> error::Result { + self.storage + .read() + .header_cht_roots + .get(&block) + .cloned() + .ok_or_else(|| { + error::ErrorKind::Backend(format!("Header CHT for block {} not exists", block)) + .into() + }) + } + + fn changes_trie_cht_root( + &self, + _cht_size: u64, + block: NumberFor, + ) -> error::Result { + self.storage + .read() + .changes_trie_cht_roots + .get(&block) + .cloned() + .ok_or_else(|| { + error::ErrorKind::Backend(format!( + "Changes trie CHT for block {} not exists", + block + )) + .into() + }) + } + + fn cache(&self) -> Option>> { + None + } } /// In-memory operation. pub struct BlockImportOperation { - pending_block: Option>, - pending_cache: HashMap>, - old_state: InMemory, - new_state: Option>, - changes_trie_update: Option>, - aux: Vec<(Vec, Option>)>, - finalized_blocks: Vec<(BlockId, Option)>, - set_head: Option>, + pending_block: Option>, + pending_cache: HashMap>, + old_state: InMemory, + new_state: Option>, + changes_trie_update: Option>, + aux: Vec<(Vec, Option>)>, + finalized_blocks: Vec<(BlockId, Option)>, + set_head: Option>, } impl backend::BlockImportOperation for BlockImportOperation where - Block: BlockT, - H: Hasher, + Block: BlockT, + H: Hasher, - H::Out: HeapSizeOf + Ord, + H::Out: HeapSizeOf + Ord, { - type State = InMemory; - - fn state(&self) -> error::Result> { - Ok(Some(&self.old_state)) - } - - fn set_block_data( - &mut self, - header: ::Header, - body: Option::Extrinsic>>, - justification: Option, - state: NewBlockState, - ) -> error::Result<()> { - assert!(self.pending_block.is_none(), "Only one block per operation is allowed"); - self.pending_block = Some(PendingBlock { - block: StoredBlock::new(header, body, justification), - state, - }); - Ok(()) - } - - fn update_cache(&mut self, cache: HashMap>) { - self.pending_cache = cache; - } - - fn update_db_storage(&mut self, update: as StateBackend>::Transaction) -> error::Result<()> { - self.new_state = Some(self.old_state.update(update)); - Ok(()) - } - - fn update_changes_trie(&mut self, update: MemoryDB) -> error::Result<()> { - self.changes_trie_update = Some(update); - Ok(()) - } - - fn reset_storage(&mut self, mut top: StorageOverlay, children: ChildrenStorageOverlay) -> error::Result { - check_genesis_storage(&top, &children)?; - - let mut transaction: Vec<(Option>, Vec, Option>)> = Default::default(); - - for (child_key, child_map) in children { - let (root, is_default, update) = self.old_state.child_storage_root(&child_key, child_map.into_iter().map(|(k, v)| (k, Some(v)))); - transaction.consolidate(update); - - if !is_default { - top.insert(child_key, root); - } - } - - let (root, update) = self.old_state.storage_root(top.into_iter().map(|(k, v)| (k, Some(v)))); - transaction.consolidate(update); - - self.new_state = Some(InMemory::from(transaction)); - Ok(root) - } - - fn insert_aux(&mut self, ops: I) -> error::Result<()> - where I: IntoIterator, Option>)> - { - self.aux.append(&mut ops.into_iter().collect()); - Ok(()) - } - - fn update_storage(&mut self, _update: Vec<(Vec, Option>)>) -> error::Result<()> { - Ok(()) - } - - fn mark_finalized(&mut self, block: BlockId, justification: Option) -> error::Result<()> { - self.finalized_blocks.push((block, justification)); - Ok(()) - } - - fn mark_head(&mut self, block: BlockId) -> error::Result<()> { - assert!(self.pending_block.is_none(), "Only one set block per operation is allowed"); - self.set_head = Some(block); - Ok(()) - } + type State = InMemory; + + fn state(&self) -> error::Result> { + Ok(Some(&self.old_state)) + } + + fn set_block_data( + &mut self, + header: ::Header, + body: Option::Extrinsic>>, + justification: Option, + state: NewBlockState, + ) -> error::Result<()> { + assert!( + self.pending_block.is_none(), + "Only one block per operation is allowed" + ); + self.pending_block = Some(PendingBlock { + block: StoredBlock::new(header, body, justification), + state, + }); + Ok(()) + } + + fn update_cache(&mut self, cache: HashMap>) { + self.pending_cache = cache; + } + + fn update_db_storage( + &mut self, + update: as StateBackend>::Transaction, + ) -> error::Result<()> { + self.new_state = Some(self.old_state.update(update)); + Ok(()) + } + + fn update_changes_trie(&mut self, update: MemoryDB) -> error::Result<()> { + self.changes_trie_update = Some(update); + Ok(()) + } + + fn reset_storage( + &mut self, + mut top: StorageOverlay, + children: ChildrenStorageOverlay, + ) -> error::Result { + check_genesis_storage(&top, &children)?; + + let mut transaction: Vec<(Option>, Vec, Option>)> = Default::default(); + + for (child_key, child_map) in children { + let (root, is_default, update) = self + .old_state + .child_storage_root(&child_key, child_map.into_iter().map(|(k, v)| (k, Some(v)))); + transaction.consolidate(update); + + if !is_default { + top.insert(child_key, root); + } + } + + let (root, update) = self + .old_state + .storage_root(top.into_iter().map(|(k, v)| (k, Some(v)))); + transaction.consolidate(update); + + self.new_state = Some(InMemory::from(transaction)); + Ok(root) + } + + fn insert_aux(&mut self, ops: I) -> error::Result<()> + where + I: IntoIterator, Option>)>, + { + self.aux.append(&mut ops.into_iter().collect()); + Ok(()) + } + + fn update_storage(&mut self, _update: Vec<(Vec, Option>)>) -> error::Result<()> { + Ok(()) + } + + fn mark_finalized( + &mut self, + block: BlockId, + justification: Option, + ) -> error::Result<()> { + self.finalized_blocks.push((block, justification)); + Ok(()) + } + + fn mark_head(&mut self, block: BlockId) -> error::Result<()> { + assert!( + self.pending_block.is_none(), + "Only one set block per operation is allowed" + ); + self.set_head = Some(block); + Ok(()) + } } /// In-memory backend. Keeps all states and blocks in memory. Useful for testing. pub struct Backend where - Block: BlockT, - H: Hasher, - H::Out: HeapSizeOf + Ord, + Block: BlockT, + H: Hasher, + H::Out: HeapSizeOf + Ord, { - states: RwLock>>, - changes_trie_storage: ChangesTrieStorage, - blockchain: Blockchain, + states: RwLock>>, + changes_trie_storage: ChangesTrieStorage, + blockchain: Blockchain, } impl Backend where - Block: BlockT, - H: Hasher, - H::Out: HeapSizeOf + Ord, + Block: BlockT, + H: Hasher, + H::Out: HeapSizeOf + Ord, { - /// Create a new instance of in-mem backend. - pub fn new() -> Backend { - Backend { - states: RwLock::new(HashMap::new()), - changes_trie_storage: ChangesTrieStorage(InMemoryChangesTrieStorage::new()), - blockchain: Blockchain::new(), - } - } + /// Create a new instance of in-mem backend. + pub fn new() -> Backend { + Backend { + states: RwLock::new(HashMap::new()), + changes_trie_storage: ChangesTrieStorage(InMemoryChangesTrieStorage::new()), + blockchain: Blockchain::new(), + } + } } impl backend::AuxStore for Backend where - Block: BlockT, - H: Hasher, - H::Out: HeapSizeOf + Ord, + Block: BlockT, + H: Hasher, + H::Out: HeapSizeOf + Ord, { - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> error::Result<()> { - self.blockchain.insert_aux(insert, delete) - } - - fn get_aux(&self, key: &[u8]) -> error::Result>> { - self.blockchain.get_aux(key) - } + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> error::Result<()> { + self.blockchain.insert_aux(insert, delete) + } + + fn get_aux(&self, key: &[u8]) -> error::Result>> { + self.blockchain.get_aux(key) + } } impl backend::Backend for Backend where - Block: BlockT, - H: Hasher, - H::Out: HeapSizeOf + Ord, + Block: BlockT, + H: Hasher, + H::Out: HeapSizeOf + Ord, { - type BlockImportOperation = BlockImportOperation; - type Blockchain = Blockchain; - type State = InMemory; - type ChangesTrieStorage = ChangesTrieStorage; - - fn begin_operation(&self) -> error::Result { - let old_state = self.state_at(BlockId::Hash(Default::default()))?; - Ok(BlockImportOperation { - pending_block: None, - pending_cache: Default::default(), - old_state, - new_state: None, - changes_trie_update: None, - aux: Default::default(), - finalized_blocks: Default::default(), - set_head: None, - }) - } - - fn begin_state_operation(&self, operation: &mut Self::BlockImportOperation, block: BlockId) -> error::Result<()> { - operation.old_state = self.state_at(block)?; - Ok(()) - } - - fn commit_operation(&self, operation: Self::BlockImportOperation) -> error::Result<()> { - if !operation.finalized_blocks.is_empty() { - for (block, justification) in operation.finalized_blocks { - self.blockchain.finalize_header(block, justification)?; - } - } - - if let Some(pending_block) = operation.pending_block { - let old_state = &operation.old_state; - let (header, body, justification) = pending_block.block.into_inner(); - - let hash = header.hash(); - - self.states.write().insert(hash, operation.new_state.unwrap_or_else(|| old_state.clone())); - - let changes_trie_root = header.digest().log(DigestItem::as_changes_trie_root).cloned(); - if let Some(changes_trie_root) = changes_trie_root { - if let Some(changes_trie_update) = operation.changes_trie_update { - let changes_trie_root: H::Out = changes_trie_root.into(); - self.changes_trie_storage.0.insert(header.number().as_(), changes_trie_root, changes_trie_update); - } - } - - self.blockchain.insert(hash, header, justification, body, pending_block.state)?; - } - - if !operation.aux.is_empty() { - self.blockchain.write_aux(operation.aux); - } - - if let Some(set_head) = operation.set_head { - self.blockchain.set_head(set_head)?; - } - - Ok(()) - } - - fn finalize_block(&self, block: BlockId, justification: Option) -> error::Result<()> { - self.blockchain.finalize_header(block, justification) - } - - fn blockchain(&self) -> &Self::Blockchain { - &self.blockchain - } - - fn changes_trie_storage(&self) -> Option<&Self::ChangesTrieStorage> { - Some(&self.changes_trie_storage) - } - - fn state_at(&self, block: BlockId) -> error::Result { - match block { - BlockId::Hash(h) if h == Default::default() => { - return Ok(Self::State::default()); - }, - _ => {}, - } - - match self.blockchain.id(block).and_then(|id| self.states.read().get(&id).cloned()) { - Some(state) => Ok(state), - None => Err(error::ErrorKind::UnknownBlock(format!("{}", block)).into()), - } - } - - fn revert(&self, _n: NumberFor) -> error::Result> { - Ok(As::sa(0)) - } + type BlockImportOperation = BlockImportOperation; + type Blockchain = Blockchain; + type State = InMemory; + type ChangesTrieStorage = ChangesTrieStorage; + + fn begin_operation(&self) -> error::Result { + let old_state = self.state_at(BlockId::Hash(Default::default()))?; + Ok(BlockImportOperation { + pending_block: None, + pending_cache: Default::default(), + old_state, + new_state: None, + changes_trie_update: None, + aux: Default::default(), + finalized_blocks: Default::default(), + set_head: None, + }) + } + + fn begin_state_operation( + &self, + operation: &mut Self::BlockImportOperation, + block: BlockId, + ) -> error::Result<()> { + operation.old_state = self.state_at(block)?; + Ok(()) + } + + fn commit_operation(&self, operation: Self::BlockImportOperation) -> error::Result<()> { + if !operation.finalized_blocks.is_empty() { + for (block, justification) in operation.finalized_blocks { + self.blockchain.finalize_header(block, justification)?; + } + } + + if let Some(pending_block) = operation.pending_block { + let old_state = &operation.old_state; + let (header, body, justification) = pending_block.block.into_inner(); + + let hash = header.hash(); + + self.states.write().insert( + hash, + operation.new_state.unwrap_or_else(|| old_state.clone()), + ); + + let changes_trie_root = header + .digest() + .log(DigestItem::as_changes_trie_root) + .cloned(); + if let Some(changes_trie_root) = changes_trie_root { + if let Some(changes_trie_update) = operation.changes_trie_update { + let changes_trie_root: H::Out = changes_trie_root.into(); + self.changes_trie_storage.0.insert( + header.number().as_(), + changes_trie_root, + changes_trie_update, + ); + } + } + + self.blockchain + .insert(hash, header, justification, body, pending_block.state)?; + } + + if !operation.aux.is_empty() { + self.blockchain.write_aux(operation.aux); + } + + if let Some(set_head) = operation.set_head { + self.blockchain.set_head(set_head)?; + } + + Ok(()) + } + + fn finalize_block( + &self, + block: BlockId, + justification: Option, + ) -> error::Result<()> { + self.blockchain.finalize_header(block, justification) + } + + fn blockchain(&self) -> &Self::Blockchain { + &self.blockchain + } + + fn changes_trie_storage(&self) -> Option<&Self::ChangesTrieStorage> { + Some(&self.changes_trie_storage) + } + + fn state_at(&self, block: BlockId) -> error::Result { + match block { + BlockId::Hash(h) if h == Default::default() => { + return Ok(Self::State::default()); + } + _ => {} + } + + match self + .blockchain + .id(block) + .and_then(|id| self.states.read().get(&id).cloned()) + { + Some(state) => Ok(state), + None => Err(error::ErrorKind::UnknownBlock(format!("{}", block)).into()), + } + } + + fn revert(&self, _n: NumberFor) -> error::Result> { + Ok(As::sa(0)) + } } impl backend::LocalBackend for Backend where - Block: BlockT, - H: Hasher, - H::Out: HeapSizeOf + Ord, -{} + Block: BlockT, + H: Hasher, + H::Out: HeapSizeOf + Ord, +{ +} impl backend::RemoteBackend for Backend where - Block: BlockT, - H: Hasher, - H::Out: HeapSizeOf + Ord, + Block: BlockT, + H: Hasher, + H::Out: HeapSizeOf + Ord, { - fn is_local_state_available(&self, block: &BlockId) -> bool { - self.blockchain.expect_block_number_from_id(block) - .map(|num| num.is_zero()) - .unwrap_or(false) - } + fn is_local_state_available(&self, block: &BlockId) -> bool { + self.blockchain + .expect_block_number_from_id(block) + .map(|num| num.is_zero()) + .unwrap_or(false) + } } /// Prunable in-memory changes trie storage. -pub struct ChangesTrieStorage(InMemoryChangesTrieStorage) where H::Out: HeapSizeOf; -impl backend::PrunableStateChangesTrieStorage for ChangesTrieStorage where H::Out: HeapSizeOf { - fn oldest_changes_trie_block(&self, _config: &ChangesTrieConfiguration, _best_finalized: u64) -> u64 { - 0 - } +pub struct ChangesTrieStorage(InMemoryChangesTrieStorage) +where + H::Out: HeapSizeOf; +impl backend::PrunableStateChangesTrieStorage for ChangesTrieStorage +where + H::Out: HeapSizeOf, +{ + fn oldest_changes_trie_block( + &self, + _config: &ChangesTrieConfiguration, + _best_finalized: u64, + ) -> u64 { + 0 + } } -impl state_machine::ChangesTrieRootsStorage for ChangesTrieStorage where H::Out: HeapSizeOf { - fn root(&self, anchor: &ChangesTrieAnchorBlockId, block: u64) -> Result, String> { - self.0.root(anchor, block) - } +impl state_machine::ChangesTrieRootsStorage for ChangesTrieStorage +where + H::Out: HeapSizeOf, +{ + fn root( + &self, + anchor: &ChangesTrieAnchorBlockId, + block: u64, + ) -> Result, String> { + self.0.root(anchor, block) + } } -impl state_machine::ChangesTrieStorage for ChangesTrieStorage where H::Out: HeapSizeOf { - fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String> { - self.0.get(key, prefix) - } +impl state_machine::ChangesTrieStorage for ChangesTrieStorage +where + H::Out: HeapSizeOf, +{ + fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String> { + self.0.get(key, prefix) + } } /// Check that genesis storage is valid. -pub fn check_genesis_storage(top: &StorageOverlay, children: &ChildrenStorageOverlay) -> error::Result<()> { - if top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) { - return Err(error::ErrorKind::GenesisInvalid.into()); - } - - if children.keys().any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) { - return Err(error::ErrorKind::GenesisInvalid.into()); - } - - Ok(()) +pub fn check_genesis_storage( + top: &StorageOverlay, + children: &ChildrenStorageOverlay, +) -> error::Result<()> { + if top + .iter() + .any(|(k, _)| well_known_keys::is_child_storage_key(k)) + { + return Err(error::ErrorKind::GenesisInvalid.into()); + } + + if children + .keys() + .any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) + { + return Err(error::ErrorKind::GenesisInvalid.into()); + } + + Ok(()) } #[cfg(test)] mod tests { - use std::sync::Arc; - use test_client; - use primitives::Blake2Hasher; + use primitives::Blake2Hasher; + use std::sync::Arc; + use test_client; - type TestBackend = test_client::client::in_mem::Backend; + type TestBackend = + test_client::client::in_mem::Backend; - #[test] - fn test_leaves_with_complex_block_tree() { - let backend = Arc::new(TestBackend::new()); + #[test] + fn test_leaves_with_complex_block_tree() { + let backend = Arc::new(TestBackend::new()); - test_client::trait_tests::test_leaves_for_backend(backend); - } + test_client::trait_tests::test_leaves_for_backend(backend); + } - #[test] - fn test_blockchain_query_by_number_gets_canonical() { - let backend = Arc::new(TestBackend::new()); + #[test] + fn test_blockchain_query_by_number_gets_canonical() { + let backend = Arc::new(TestBackend::new()); - test_client::trait_tests::test_blockchain_query_by_number_gets_canonical(backend); - } + test_client::trait_tests::test_blockchain_query_by_number_gets_canonical(backend); + } } diff --git a/core/client/src/leaves.rs b/core/client/src/leaves.rs index cc906d59a4..2959f95520 100644 --- a/core/client/src/leaves.rs +++ b/core/client/src/leaves.rs @@ -16,41 +16,41 @@ //! Helper for managing the set of available leaves in the chain for DB implementations. -use std::collections::BTreeMap; -use std::cmp::Reverse; -use kvdb::{KeyValueDB, DBTransaction}; -use runtime_primitives::traits::SimpleArithmetic; -use parity_codec::{Encode, Decode}; use crate::error; +use kvdb::{DBTransaction, KeyValueDB}; +use parity_codec::{Decode, Encode}; +use runtime_primitives::traits::SimpleArithmetic; +use std::cmp::Reverse; +use std::collections::BTreeMap; #[derive(Debug, Clone, PartialEq, Eq)] struct LeafSetItem { - hash: H, - number: Reverse, + hash: H, + number: Reverse, } /// A displaced leaf after import. #[must_use = "Displaced items from the leaf set must be handled."] pub struct ImportDisplaced { - new_hash: H, - displaced: LeafSetItem, + new_hash: H, + displaced: LeafSetItem, } /// Displaced leaves after finalization. #[must_use = "Displaced items from the leaf set must be handled."] pub struct FinalizationDisplaced { - leaves: BTreeMap, Vec>, + leaves: BTreeMap, Vec>, } impl FinalizationDisplaced { - /// Merge with another. This should only be used for displaced items that - /// are produced within one transaction of each other. - pub fn merge(&mut self, mut other: Self) { - // this will ignore keys that are in duplicate, however - // if these are actually produced correctly via the leaf-set within - // one transaction, then there will be no overlap in the keys. - self.leaves.append(&mut other.leaves); - } + /// Merge with another. This should only be used for displaced items that + /// are produced within one transaction of each other. + pub fn merge(&mut self, mut other: Self) { + // this will ignore keys that are in duplicate, however + // if these are actually produced correctly via the leaf-set within + // one transaction, then there will be no overlap in the keys. + self.leaves.append(&mut other.leaves); + } } /// list of leaf hashes ordered by number (descending). @@ -58,299 +58,336 @@ impl FinalizationDisplaced { /// this allows very fast checking and modification of active leaves. #[derive(Debug, Clone, PartialEq, Eq)] pub struct LeafSet { - storage: BTreeMap, Vec>, - pending_added: Vec>, - pending_removed: Vec, + storage: BTreeMap, Vec>, + pending_added: Vec>, + pending_removed: Vec, } -impl LeafSet where - H: Clone + PartialEq + Decode + Encode, - N: std::fmt::Debug + Clone + SimpleArithmetic + Decode + Encode, +impl LeafSet +where + H: Clone + PartialEq + Decode + Encode, + N: std::fmt::Debug + Clone + SimpleArithmetic + Decode + Encode, { - /// Construct a new, blank leaf set. - pub fn new() -> Self { - Self { - storage: BTreeMap::new(), - pending_added: Vec::new(), - pending_removed: Vec::new(), - } - } - - /// Read the leaf list from the DB, using given prefix for keys. - pub fn read_from_db(db: &KeyValueDB, column: Option, prefix: &[u8]) -> error::Result { - let mut storage = BTreeMap::new(); - - for (key, value) in db.iter_from_prefix(column, prefix) { - if !key.starts_with(prefix) { break } - let raw_hash = &mut &key[prefix.len()..]; - let hash = match Decode::decode(raw_hash) { - Some(hash) => hash, - None => return Err(error::ErrorKind::Backend("Error decoding hash".into()).into()), - }; - let number = match Decode::decode(&mut &value[..]) { - Some(number) => number, - None => return Err(error::ErrorKind::Backend("Error decoding number".into()).into()), - }; - storage.entry(Reverse(number)).or_insert_with(Vec::new).push(hash); - } - Ok(Self { - storage, - pending_added: Vec::new(), - pending_removed: Vec::new(), - }) - } - - /// update the leaf list on import. returns a displaced leaf if there was one. - pub fn import(&mut self, hash: H, number: N, parent_hash: H) -> Option> { - // avoid underflow for genesis. - let displaced = if number != N::zero() { - let new_number = Reverse(number.clone() - N::one()); - let was_displaced = self.remove_leaf(&new_number, &parent_hash); - - if was_displaced { - self.pending_removed.push(parent_hash.clone()); - Some(ImportDisplaced { - new_hash: hash.clone(), - displaced: LeafSetItem { - hash: parent_hash, - number: new_number, - }, - }) - } else { - None - } - } else { - None - }; - - self.insert_leaf(Reverse(number.clone()), hash.clone()); - self.pending_added.push(LeafSetItem { hash, number: Reverse(number) }); - displaced - } - - /// Note a block height finalized, displacing all leaves with number less than the finalized block's. - /// - /// Although it would be more technically correct to also prune out leaves at the - /// same number as the finalized block, but with different hashes, the current behavior - /// is simpler and our assumptions about how finalization works means that those leaves - /// will be pruned soon afterwards anyway. - pub fn finalize_height(&mut self, number: N) -> FinalizationDisplaced { - let boundary = if number == N::zero() { - return FinalizationDisplaced { leaves: BTreeMap::new() }; - } else { - number - N::one() - }; - - let below_boundary = self.storage.split_off(&Reverse(boundary)); - self.pending_removed.extend(below_boundary.values().flat_map(|h| h.iter()).cloned()); - FinalizationDisplaced { - leaves: below_boundary, - } - } - - /// Undo all pending operations. - /// - /// This returns an `Undo` struct, where any - /// `Displaced` objects that have returned by previous method calls - /// should be passed to via the appropriate methods. Otherwise, - /// the on-disk state may get out of sync with in-memory state. - pub fn undo(&mut self) -> Undo { - Undo { inner: self } - } - - /// currently since revert only affects the canonical chain - /// we assume that parent has no further children - /// and we add it as leaf again - pub fn revert(&mut self, hash: H, number: N, parent_hash: H) { - self.insert_leaf(Reverse(number.clone() - N::one()), parent_hash); - self.remove_leaf(&Reverse(number), &hash); - } - - /// returns an iterator over all hashes in the leaf set - /// ordered by their block number descending. - pub fn hashes(&self) -> Vec { - self.storage.iter().flat_map(|(_, hashes)| hashes.iter()).cloned().collect() - } - - /// Write the leaf list to the database transaction. - pub fn prepare_transaction(&mut self, tx: &mut DBTransaction, column: Option, prefix: &[u8]) { - let mut buf = prefix.to_vec(); - for LeafSetItem { hash, number } in self.pending_added.drain(..) { - hash.using_encoded(|s| buf.extend(s)); - tx.put_vec(column, &buf[..], number.0.encode()); - buf.truncate(prefix.len()); // reuse allocation. - } - for hash in self.pending_removed.drain(..) { - hash.using_encoded(|s| buf.extend(s)); - tx.delete(column, &buf[..]); - buf.truncate(prefix.len()); // reuse allocation. - } - } - - #[cfg(test)] - fn contains(&self, number: N, hash: H) -> bool { - self.storage.get(&Reverse(number)).map_or(false, |hashes| hashes.contains(&hash)) - } - - fn insert_leaf(&mut self, number: Reverse, hash: H) { - self.storage.entry(number).or_insert_with(Vec::new).push(hash); - } - - // returns true if this leaf was contained, false otherwise. - fn remove_leaf(&mut self, number: &Reverse, hash: &H) -> bool { - let mut empty = false; - let removed = self.storage.get_mut(number).map_or(false, |leaves| { - let mut found = false; - leaves.retain(|h| if h == hash { - found = true; - false - } else { - true - }); - - if leaves.is_empty() { empty = true } - - found - }); - - if removed && empty { - self.storage.remove(number); - } - - removed - } + /// Construct a new, blank leaf set. + pub fn new() -> Self { + Self { + storage: BTreeMap::new(), + pending_added: Vec::new(), + pending_removed: Vec::new(), + } + } + + /// Read the leaf list from the DB, using given prefix for keys. + pub fn read_from_db( + db: &KeyValueDB, + column: Option, + prefix: &[u8], + ) -> error::Result { + let mut storage = BTreeMap::new(); + + for (key, value) in db.iter_from_prefix(column, prefix) { + if !key.starts_with(prefix) { + break; + } + let raw_hash = &mut &key[prefix.len()..]; + let hash = match Decode::decode(raw_hash) { + Some(hash) => hash, + None => return Err(error::ErrorKind::Backend("Error decoding hash".into()).into()), + }; + let number = match Decode::decode(&mut &value[..]) { + Some(number) => number, + None => { + return Err(error::ErrorKind::Backend("Error decoding number".into()).into()); + } + }; + storage + .entry(Reverse(number)) + .or_insert_with(Vec::new) + .push(hash); + } + Ok(Self { + storage, + pending_added: Vec::new(), + pending_removed: Vec::new(), + }) + } + + /// update the leaf list on import. returns a displaced leaf if there was one. + pub fn import(&mut self, hash: H, number: N, parent_hash: H) -> Option> { + // avoid underflow for genesis. + let displaced = if number != N::zero() { + let new_number = Reverse(number.clone() - N::one()); + let was_displaced = self.remove_leaf(&new_number, &parent_hash); + + if was_displaced { + self.pending_removed.push(parent_hash.clone()); + Some(ImportDisplaced { + new_hash: hash.clone(), + displaced: LeafSetItem { + hash: parent_hash, + number: new_number, + }, + }) + } else { + None + } + } else { + None + }; + + self.insert_leaf(Reverse(number.clone()), hash.clone()); + self.pending_added.push(LeafSetItem { + hash, + number: Reverse(number), + }); + displaced + } + + /// Note a block height finalized, displacing all leaves with number less than the finalized block's. + /// + /// Although it would be more technically correct to also prune out leaves at the + /// same number as the finalized block, but with different hashes, the current behavior + /// is simpler and our assumptions about how finalization works means that those leaves + /// will be pruned soon afterwards anyway. + pub fn finalize_height(&mut self, number: N) -> FinalizationDisplaced { + let boundary = if number == N::zero() { + return FinalizationDisplaced { + leaves: BTreeMap::new(), + }; + } else { + number - N::one() + }; + + let below_boundary = self.storage.split_off(&Reverse(boundary)); + self.pending_removed + .extend(below_boundary.values().flat_map(|h| h.iter()).cloned()); + FinalizationDisplaced { + leaves: below_boundary, + } + } + + /// Undo all pending operations. + /// + /// This returns an `Undo` struct, where any + /// `Displaced` objects that have returned by previous method calls + /// should be passed to via the appropriate methods. Otherwise, + /// the on-disk state may get out of sync with in-memory state. + pub fn undo(&mut self) -> Undo { + Undo { inner: self } + } + + /// currently since revert only affects the canonical chain + /// we assume that parent has no further children + /// and we add it as leaf again + pub fn revert(&mut self, hash: H, number: N, parent_hash: H) { + self.insert_leaf(Reverse(number.clone() - N::one()), parent_hash); + self.remove_leaf(&Reverse(number), &hash); + } + + /// returns an iterator over all hashes in the leaf set + /// ordered by their block number descending. + pub fn hashes(&self) -> Vec { + self.storage + .iter() + .flat_map(|(_, hashes)| hashes.iter()) + .cloned() + .collect() + } + + /// Write the leaf list to the database transaction. + pub fn prepare_transaction( + &mut self, + tx: &mut DBTransaction, + column: Option, + prefix: &[u8], + ) { + let mut buf = prefix.to_vec(); + for LeafSetItem { hash, number } in self.pending_added.drain(..) { + hash.using_encoded(|s| buf.extend(s)); + tx.put_vec(column, &buf[..], number.0.encode()); + buf.truncate(prefix.len()); // reuse allocation. + } + for hash in self.pending_removed.drain(..) { + hash.using_encoded(|s| buf.extend(s)); + tx.delete(column, &buf[..]); + buf.truncate(prefix.len()); // reuse allocation. + } + } + + #[cfg(test)] + fn contains(&self, number: N, hash: H) -> bool { + self.storage + .get(&Reverse(number)) + .map_or(false, |hashes| hashes.contains(&hash)) + } + + fn insert_leaf(&mut self, number: Reverse, hash: H) { + self.storage + .entry(number) + .or_insert_with(Vec::new) + .push(hash); + } + + // returns true if this leaf was contained, false otherwise. + fn remove_leaf(&mut self, number: &Reverse, hash: &H) -> bool { + let mut empty = false; + let removed = self.storage.get_mut(number).map_or(false, |leaves| { + let mut found = false; + leaves.retain(|h| { + if h == hash { + found = true; + false + } else { + true + } + }); + + if leaves.is_empty() { + empty = true + } + + found + }); + + if removed && empty { + self.storage.remove(number); + } + + removed + } } /// Helper for undoing operations. pub struct Undo<'a, H: 'a, N: 'a> { - inner: &'a mut LeafSet, + inner: &'a mut LeafSet, } -impl<'a, H: 'a, N: 'a> Undo<'a, H, N> where - H: Clone + PartialEq + Decode + Encode, - N: std::fmt::Debug + Clone + SimpleArithmetic + Decode + Encode, +impl<'a, H: 'a, N: 'a> Undo<'a, H, N> +where + H: Clone + PartialEq + Decode + Encode, + N: std::fmt::Debug + Clone + SimpleArithmetic + Decode + Encode, { - /// Undo an imported block by providing the displaced leaf. - pub fn undo_import(&mut self, displaced: ImportDisplaced) { - let new_number = Reverse(displaced.displaced.number.0.clone() + N::one()); - self.inner.remove_leaf(&new_number, &displaced.new_hash); - self.inner.insert_leaf(new_number, displaced.displaced.hash); - } - - /// Undo a finalization operation by providing the displaced leaves. - pub fn undo_finalization(&mut self, mut displaced: FinalizationDisplaced) { - self.inner.storage.append(&mut displaced.leaves); - } + /// Undo an imported block by providing the displaced leaf. + pub fn undo_import(&mut self, displaced: ImportDisplaced) { + let new_number = Reverse(displaced.displaced.number.0.clone() + N::one()); + self.inner.remove_leaf(&new_number, &displaced.new_hash); + self.inner.insert_leaf(new_number, displaced.displaced.hash); + } + + /// Undo a finalization operation by providing the displaced leaves. + pub fn undo_finalization(&mut self, mut displaced: FinalizationDisplaced) { + self.inner.storage.append(&mut displaced.leaves); + } } impl<'a, H: 'a, N: 'a> Drop for Undo<'a, H, N> { - fn drop(&mut self) { - self.inner.pending_added.clear(); - self.inner.pending_removed.clear(); - } + fn drop(&mut self) { + self.inner.pending_added.clear(); + self.inner.pending_removed.clear(); + } } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn it_works() { - let mut set = LeafSet::new(); - set.import(0u32, 0u32, 0u32); - - set.import(1_1, 1, 0); - set.import(2_1, 2, 1_1); - set.import(3_1, 3, 2_1); - - assert!(set.contains(3, 3_1)); - assert!(!set.contains(2, 2_1)); - assert!(!set.contains(1, 1_1)); - assert!(!set.contains(0, 0)); - - set.import(2_2, 2, 1_1); - - assert!(set.contains(3, 3_1)); - assert!(set.contains(2, 2_2)); - } - - #[test] - fn flush_to_disk() { - const PREFIX: &[u8] = b"abcdefg"; - let db = ::kvdb_memorydb::create(0); - - let mut set = LeafSet::new(); - set.import(0u32, 0u32, 0u32); - - set.import(1_1, 1, 0); - set.import(2_1, 2, 1_1); - set.import(3_1, 3, 2_1); - - let mut tx = DBTransaction::new(); - - set.prepare_transaction(&mut tx, None, PREFIX); - db.write(tx).unwrap(); - - let set2 = LeafSet::read_from_db(&db, None, PREFIX).unwrap(); - assert_eq!(set, set2); - } - - #[test] - fn two_leaves_same_height_can_be_included() { - let mut set = LeafSet::new(); - - set.import(1_1u32, 10u32,0u32); - set.import(1_2, 10, 0); - - assert!(set.storage.contains_key(&Reverse(10))); - assert!(set.contains(10, 1_1)); - assert!(set.contains(10, 1_2)); - assert!(!set.contains(10, 1_3)); - } - - #[test] - fn finalization_consistent_with_disk() { - const PREFIX: &[u8] = b"prefix"; - let db = ::kvdb_memorydb::create(0); - - let mut set = LeafSet::new(); - set.import(10_1u32, 10u32, 0u32); - set.import(11_1, 11, 10_2); - set.import(11_2, 11, 10_2); - set.import(12_1, 12, 11_123); - - assert!(set.contains(10, 10_1)); - - let mut tx = DBTransaction::new(); - set.prepare_transaction(&mut tx, None, PREFIX); - db.write(tx).unwrap(); - - let _ = set.finalize_height(11); - let mut tx = DBTransaction::new(); - set.prepare_transaction(&mut tx, None, PREFIX); - db.write(tx).unwrap(); - - assert!(set.contains(11, 11_1)); - assert!(set.contains(11, 11_2)); - assert!(set.contains(12, 12_1)); - assert!(!set.contains(10, 10_1)); - - let set2 = LeafSet::read_from_db(&db, None, PREFIX).unwrap(); - assert_eq!(set, set2); - } - - #[test] - fn undo_finalization() { - let mut set = LeafSet::new(); - set.import(10_1u32, 10u32, 0u32); - set.import(11_1, 11, 10_2); - set.import(11_2, 11, 10_2); - set.import(12_1, 12, 11_123); - - let displaced = set.finalize_height(11); - assert!(!set.contains(10, 10_1)); - - set.undo().undo_finalization(displaced); - assert!(set.contains(10, 10_1)); - } + use super::*; + + #[test] + fn it_works() { + let mut set = LeafSet::new(); + set.import(0u32, 0u32, 0u32); + + set.import(1_1, 1, 0); + set.import(2_1, 2, 1_1); + set.import(3_1, 3, 2_1); + + assert!(set.contains(3, 3_1)); + assert!(!set.contains(2, 2_1)); + assert!(!set.contains(1, 1_1)); + assert!(!set.contains(0, 0)); + + set.import(2_2, 2, 1_1); + + assert!(set.contains(3, 3_1)); + assert!(set.contains(2, 2_2)); + } + + #[test] + fn flush_to_disk() { + const PREFIX: &[u8] = b"abcdefg"; + let db = ::kvdb_memorydb::create(0); + + let mut set = LeafSet::new(); + set.import(0u32, 0u32, 0u32); + + set.import(1_1, 1, 0); + set.import(2_1, 2, 1_1); + set.import(3_1, 3, 2_1); + + let mut tx = DBTransaction::new(); + + set.prepare_transaction(&mut tx, None, PREFIX); + db.write(tx).unwrap(); + + let set2 = LeafSet::read_from_db(&db, None, PREFIX).unwrap(); + assert_eq!(set, set2); + } + + #[test] + fn two_leaves_same_height_can_be_included() { + let mut set = LeafSet::new(); + + set.import(1_1u32, 10u32, 0u32); + set.import(1_2, 10, 0); + + assert!(set.storage.contains_key(&Reverse(10))); + assert!(set.contains(10, 1_1)); + assert!(set.contains(10, 1_2)); + assert!(!set.contains(10, 1_3)); + } + + #[test] + fn finalization_consistent_with_disk() { + const PREFIX: &[u8] = b"prefix"; + let db = ::kvdb_memorydb::create(0); + + let mut set = LeafSet::new(); + set.import(10_1u32, 10u32, 0u32); + set.import(11_1, 11, 10_2); + set.import(11_2, 11, 10_2); + set.import(12_1, 12, 11_123); + + assert!(set.contains(10, 10_1)); + + let mut tx = DBTransaction::new(); + set.prepare_transaction(&mut tx, None, PREFIX); + db.write(tx).unwrap(); + + let _ = set.finalize_height(11); + let mut tx = DBTransaction::new(); + set.prepare_transaction(&mut tx, None, PREFIX); + db.write(tx).unwrap(); + + assert!(set.contains(11, 11_1)); + assert!(set.contains(11, 11_2)); + assert!(set.contains(12, 12_1)); + assert!(!set.contains(10, 10_1)); + + let set2 = LeafSet::read_from_db(&db, None, PREFIX).unwrap(); + assert_eq!(set, set2); + } + + #[test] + fn undo_finalization() { + let mut set = LeafSet::new(); + set.import(10_1u32, 10u32, 0u32); + set.import(11_1, 11, 10_2); + set.import(11_2, 11, 10_2); + set.import(12_1, 12, 11_123); + + let displaced = set.finalize_height(11); + assert!(!set.contains(10, 10_1)); + + set.undo().undo_finalization(displaced); + assert!(set.contains(10, 10_1)); + } } diff --git a/core/client/src/lib.rs b/core/client/src/lib.rs index d2da243d14..4bd9323660 100644 --- a/core/client/src/lib.rs +++ b/core/client/src/lib.rs @@ -18,54 +18,52 @@ #![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs)] -#![recursion_limit="128"] +#![recursion_limit = "128"] #[macro_use] pub mod runtime_api; #[cfg(feature = "std")] -pub mod error; +pub mod backend; +pub mod block_builder; #[cfg(feature = "std")] pub mod blockchain; #[cfg(feature = "std")] -pub mod backend; +mod call_executor; +#[cfg(feature = "std")] +pub mod children; #[cfg(feature = "std")] pub mod cht; #[cfg(feature = "std")] -pub mod in_mem; +mod client; +#[cfg(feature = "std")] +pub mod error; #[cfg(feature = "std")] pub mod genesis; -pub mod block_builder; #[cfg(feature = "std")] -pub mod light; +pub mod in_mem; #[cfg(feature = "std")] pub mod leaves; #[cfg(feature = "std")] -pub mod children; -#[cfg(feature = "std")] -mod call_executor; -#[cfg(feature = "std")] -mod client; +pub mod light; #[cfg(feature = "std")] mod notifications; - #[cfg(feature = "std")] pub use crate::blockchain::Info as ChainInfo; #[cfg(feature = "std")] pub use crate::call_executor::{CallExecutor, LocalCallExecutor}; #[cfg(feature = "std")] pub use crate::client::{ - new_with_backend, - new_in_mem, - BlockBody, BlockStatus, ImportNotifications, FinalityNotifications, BlockchainEvents, - BlockImportNotification, Client, ClientInfo, ChainHead, ExecutionStrategies, + new_in_mem, new_with_backend, BlockBody, BlockImportNotification, BlockStatus, + BlockchainEvents, ChainHead, Client, ClientInfo, ExecutionStrategies, FinalityNotifications, + ImportNotifications, }; #[cfg(feature = "std")] -pub use crate::notifications::{StorageEventStream, StorageChangeSet}; +pub use crate::leaves::LeafSet; +#[cfg(feature = "std")] +pub use crate::notifications::{StorageChangeSet, StorageEventStream}; #[cfg(feature = "std")] pub use state_machine::ExecutionStrategy; -#[cfg(feature = "std")] -pub use crate::leaves::LeafSet; #[doc(inline)] pub use sr_api_macros::{decl_runtime_apis, impl_runtime_apis}; diff --git a/core/client/src/light/backend.rs b/core/client/src/light/backend.rs index b4b805f3c6..41c7da1766 100644 --- a/core/client/src/light/backend.rs +++ b/core/client/src/light/backend.rs @@ -17,509 +17,559 @@ //! Light client backend. Only stores headers and justifications of blocks. //! Everything else is requested from full nodes on demand. -use std::collections::HashMap; -use std::sync::{Arc, Weak}; use futures::{Future, IntoFuture}; use parking_lot::RwLock; +use std::collections::HashMap; +use std::sync::{Arc, Weak}; -use runtime_primitives::{generic::BlockId, Justification, StorageOverlay, ChildrenStorageOverlay}; -use state_machine::{Backend as StateBackend, TrieBackend, backend::InMemory as InMemoryState}; -use runtime_primitives::traits::{Block as BlockT, NumberFor, Zero, Header}; -use crate::in_mem::{self, check_genesis_storage}; -use crate::backend::{AuxStore, Backend as ClientBackend, BlockImportOperation, RemoteBackend, NewBlockState}; +use crate::backend::{ + AuxStore, Backend as ClientBackend, BlockImportOperation, NewBlockState, RemoteBackend, +}; use crate::blockchain::HeaderBackend as BlockchainHeaderBackend; use crate::error::{Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult}; +use crate::in_mem::{self, check_genesis_storage}; use crate::light::blockchain::{Blockchain, Storage as BlockchainStorage}; use crate::light::fetcher::{Fetcher, RemoteReadRequest}; +use consensus::well_known_cache_keys; use hash_db::Hasher; -use trie::MemoryDB; use heapsize::HeapSizeOf; -use consensus::well_known_cache_keys; +use runtime_primitives::traits::{Block as BlockT, Header, NumberFor, Zero}; +use runtime_primitives::{generic::BlockId, ChildrenStorageOverlay, Justification, StorageOverlay}; +use state_machine::{backend::InMemory as InMemoryState, Backend as StateBackend, TrieBackend}; +use trie::MemoryDB; -const IN_MEMORY_EXPECT_PROOF: &str = "InMemory state backend has Void error type and always suceeds; qed"; +const IN_MEMORY_EXPECT_PROOF: &str = + "InMemory state backend has Void error type and always suceeds; qed"; /// Light client backend. pub struct Backend { - blockchain: Arc>, - genesis_state: RwLock>>, + blockchain: Arc>, + genesis_state: RwLock>>, } /// Light block (header and justification) import operation. pub struct ImportOperation { - header: Option, - cache: HashMap>, - leaf_state: NewBlockState, - aux_ops: Vec<(Vec, Option>)>, - finalized_blocks: Vec>, - set_head: Option>, - storage_update: Option>, - _phantom: ::std::marker::PhantomData<(S, F)>, + header: Option, + cache: HashMap>, + leaf_state: NewBlockState, + aux_ops: Vec<(Vec, Option>)>, + finalized_blocks: Vec>, + set_head: Option>, + storage_update: Option>, + _phantom: ::std::marker::PhantomData<(S, F)>, } /// On-demand state. pub struct OnDemandState { - fetcher: Weak, - blockchain: Weak>, - block: Block::Hash, - cached_header: RwLock>, + fetcher: Weak, + blockchain: Weak>, + block: Block::Hash, + cached_header: RwLock>, } /// On-demand or in-memory genesis state. pub enum OnDemandOrGenesisState { - /// On-demand state - storage values are fetched from remote nodes. - OnDemand(OnDemandState), - /// Genesis state - storage values are stored in-memory. - Genesis(InMemoryState), + /// On-demand state - storage values are fetched from remote nodes. + OnDemand(OnDemandState), + /// Genesis state - storage values are stored in-memory. + Genesis(InMemoryState), } impl Backend { - /// Create new light backend. - pub fn new(blockchain: Arc>) -> Self { - Self { - blockchain, - genesis_state: RwLock::new(None), - } - } - - /// Get shared blockchain reference. - pub fn blockchain(&self) -> &Arc> { - &self.blockchain - } + /// Create new light backend. + pub fn new(blockchain: Arc>) -> Self { + Self { + blockchain, + genesis_state: RwLock::new(None), + } + } + + /// Get shared blockchain reference. + pub fn blockchain(&self) -> &Arc> { + &self.blockchain + } } impl AuxStore for Backend { - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> ClientResult<()> { - self.blockchain.storage().insert_aux(insert, delete) - } - - fn get_aux(&self, key: &[u8]) -> ClientResult>> { - self.blockchain.storage().get_aux(key) - } + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> ClientResult<()> { + self.blockchain.storage().insert_aux(insert, delete) + } + + fn get_aux(&self, key: &[u8]) -> ClientResult>> { + self.blockchain.storage().get_aux(key) + } } -impl ClientBackend for Backend where - Block: BlockT, - S: BlockchainStorage, - F: Fetcher, - H: Hasher, - H::Out: HeapSizeOf + Ord, +impl ClientBackend for Backend +where + Block: BlockT, + S: BlockchainStorage, + F: Fetcher, + H: Hasher, + H::Out: HeapSizeOf + Ord, { - type BlockImportOperation = ImportOperation; - type Blockchain = Blockchain; - type State = OnDemandOrGenesisState; - type ChangesTrieStorage = in_mem::ChangesTrieStorage; - - fn begin_operation(&self) -> ClientResult { - Ok(ImportOperation { - header: None, - cache: Default::default(), - leaf_state: NewBlockState::Normal, - aux_ops: Vec::new(), - finalized_blocks: Vec::new(), - set_head: None, - storage_update: None, - _phantom: Default::default(), - }) - } - - fn begin_state_operation( - &self, - _operation: &mut Self::BlockImportOperation, - _block: BlockId - ) -> ClientResult<()> { - Ok(()) - } - - fn commit_operation(&self, mut operation: Self::BlockImportOperation) -> ClientResult<()> { - if !operation.finalized_blocks.is_empty() { - for block in operation.finalized_blocks { - self.blockchain.storage().finalize_header(block)?; - } - } - - if let Some(header) = operation.header { - let is_genesis_import = header.number().is_zero(); - self.blockchain.storage().import_header( - header, - operation.cache, - operation.leaf_state, - operation.aux_ops, - )?; - - // when importing genesis block => remember its state - if is_genesis_import { - *self.genesis_state.write() = operation.storage_update.take(); - } - } else { - for (key, maybe_val) in operation.aux_ops { - match maybe_val { - Some(val) => self.blockchain.storage().insert_aux( - &[(&key[..], &val[..])], - ::std::iter::empty(), - )?, - None => self.blockchain.storage().insert_aux(::std::iter::empty(), &[&key[..]])?, - } - } - } - - if let Some(set_head) = operation.set_head { - self.blockchain.storage().set_head(set_head)?; - } - - Ok(()) - } - - fn finalize_block(&self, block: BlockId, _justification: Option) -> ClientResult<()> { - self.blockchain.storage().finalize_header(block) - } - - fn blockchain(&self) -> &Blockchain { - &self.blockchain - } - - fn changes_trie_storage(&self) -> Option<&Self::ChangesTrieStorage> { - None - } - - fn state_at(&self, block: BlockId) -> ClientResult { - let block_number = self.blockchain.expect_block_number_from_id(&block)?; - - // special case for genesis block - if block_number.is_zero() { - if let Some(genesis_state) = self.genesis_state.read().clone() { - return Ok(OnDemandOrGenesisState::Genesis(genesis_state)); - } - } - - // else create on-demand state - let block_hash = self.blockchain.expect_block_hash_from_id(&block)?; - Ok(OnDemandOrGenesisState::OnDemand(OnDemandState { - fetcher: self.blockchain.fetcher(), - blockchain: Arc::downgrade(&self.blockchain), - block: block_hash, - cached_header: RwLock::new(None), - })) - } - - fn revert(&self, _n: NumberFor) -> ClientResult> { - Err(ClientErrorKind::NotAvailableOnLightClient.into()) - } + type BlockImportOperation = ImportOperation; + type Blockchain = Blockchain; + type State = OnDemandOrGenesisState; + type ChangesTrieStorage = in_mem::ChangesTrieStorage; + + fn begin_operation(&self) -> ClientResult { + Ok(ImportOperation { + header: None, + cache: Default::default(), + leaf_state: NewBlockState::Normal, + aux_ops: Vec::new(), + finalized_blocks: Vec::new(), + set_head: None, + storage_update: None, + _phantom: Default::default(), + }) + } + + fn begin_state_operation( + &self, + _operation: &mut Self::BlockImportOperation, + _block: BlockId, + ) -> ClientResult<()> { + Ok(()) + } + + fn commit_operation(&self, mut operation: Self::BlockImportOperation) -> ClientResult<()> { + if !operation.finalized_blocks.is_empty() { + for block in operation.finalized_blocks { + self.blockchain.storage().finalize_header(block)?; + } + } + + if let Some(header) = operation.header { + let is_genesis_import = header.number().is_zero(); + self.blockchain.storage().import_header( + header, + operation.cache, + operation.leaf_state, + operation.aux_ops, + )?; + + // when importing genesis block => remember its state + if is_genesis_import { + *self.genesis_state.write() = operation.storage_update.take(); + } + } else { + for (key, maybe_val) in operation.aux_ops { + match maybe_val { + Some(val) => self + .blockchain + .storage() + .insert_aux(&[(&key[..], &val[..])], ::std::iter::empty())?, + None => self + .blockchain + .storage() + .insert_aux(::std::iter::empty(), &[&key[..]])?, + } + } + } + + if let Some(set_head) = operation.set_head { + self.blockchain.storage().set_head(set_head)?; + } + + Ok(()) + } + + fn finalize_block( + &self, + block: BlockId, + _justification: Option, + ) -> ClientResult<()> { + self.blockchain.storage().finalize_header(block) + } + + fn blockchain(&self) -> &Blockchain { + &self.blockchain + } + + fn changes_trie_storage(&self) -> Option<&Self::ChangesTrieStorage> { + None + } + + fn state_at(&self, block: BlockId) -> ClientResult { + let block_number = self.blockchain.expect_block_number_from_id(&block)?; + + // special case for genesis block + if block_number.is_zero() { + if let Some(genesis_state) = self.genesis_state.read().clone() { + return Ok(OnDemandOrGenesisState::Genesis(genesis_state)); + } + } + + // else create on-demand state + let block_hash = self.blockchain.expect_block_hash_from_id(&block)?; + Ok(OnDemandOrGenesisState::OnDemand(OnDemandState { + fetcher: self.blockchain.fetcher(), + blockchain: Arc::downgrade(&self.blockchain), + block: block_hash, + cached_header: RwLock::new(None), + })) + } + + fn revert(&self, _n: NumberFor) -> ClientResult> { + Err(ClientErrorKind::NotAvailableOnLightClient.into()) + } } impl RemoteBackend for Backend where - Block: BlockT, - S: BlockchainStorage, - F: Fetcher, - H: Hasher, - H::Out: HeapSizeOf + Ord, + Block: BlockT, + S: BlockchainStorage, + F: Fetcher, + H: Hasher, + H::Out: HeapSizeOf + Ord, { - fn is_local_state_available(&self, block: &BlockId) -> bool { - self.genesis_state.read().is_some() - && self.blockchain.expect_block_number_from_id(block) - .map(|num| num.is_zero()) - .unwrap_or(false) - } + fn is_local_state_available(&self, block: &BlockId) -> bool { + self.genesis_state.read().is_some() + && self + .blockchain + .expect_block_number_from_id(block) + .map(|num| num.is_zero()) + .unwrap_or(false) + } } impl BlockImportOperation for ImportOperation where - Block: BlockT, - F: Fetcher, - S: BlockchainStorage, - H: Hasher, - H::Out: HeapSizeOf + Ord, + Block: BlockT, + F: Fetcher, + S: BlockchainStorage, + H: Hasher, + H::Out: HeapSizeOf + Ord, { - type State = OnDemandOrGenesisState; - - fn state(&self) -> ClientResult> { - // None means 'locally-stateless' backend - Ok(None) - } - - fn set_block_data( - &mut self, - header: Block::Header, - _body: Option>, - _justification: Option, - state: NewBlockState, - ) -> ClientResult<()> { - self.leaf_state = state; - self.header = Some(header); - Ok(()) - } - - fn update_cache(&mut self, cache: HashMap>) { - self.cache = cache; - } - - fn update_db_storage(&mut self, _update: >::Transaction) -> ClientResult<()> { - // we're not storing anything locally => ignore changes - Ok(()) - } - - fn update_changes_trie(&mut self, _update: MemoryDB) -> ClientResult<()> { - // we're not storing anything locally => ignore changes - Ok(()) - } - - fn reset_storage(&mut self, top: StorageOverlay, children: ChildrenStorageOverlay) -> ClientResult { - check_genesis_storage(&top, &children)?; - - // this is only called when genesis block is imported => shouldn't be performance bottleneck - let mut storage: HashMap>, StorageOverlay> = HashMap::new(); - storage.insert(None, top); - for (child_key, child_storage) in children { - storage.insert(Some(child_key), child_storage); - } - let storage_update: InMemoryState = storage.into(); - let (storage_root, _) = storage_update.storage_root(::std::iter::empty()); - self.storage_update = Some(storage_update); - - Ok(storage_root) - } - - fn insert_aux(&mut self, ops: I) -> ClientResult<()> - where I: IntoIterator, Option>)> - { - self.aux_ops.append(&mut ops.into_iter().collect()); - Ok(()) - } - - fn update_storage(&mut self, _update: Vec<(Vec, Option>)>) -> ClientResult<()> { - // we're not storing anything locally => ignore changes - Ok(()) - } - - fn mark_finalized(&mut self, block: BlockId, _justification: Option) -> ClientResult<()> { - self.finalized_blocks.push(block); - Ok(()) - } - - fn mark_head(&mut self, block: BlockId) -> ClientResult<()> { - self.set_head = Some(block); - Ok(()) - } + type State = OnDemandOrGenesisState; + + fn state(&self) -> ClientResult> { + // None means 'locally-stateless' backend + Ok(None) + } + + fn set_block_data( + &mut self, + header: Block::Header, + _body: Option>, + _justification: Option, + state: NewBlockState, + ) -> ClientResult<()> { + self.leaf_state = state; + self.header = Some(header); + Ok(()) + } + + fn update_cache(&mut self, cache: HashMap>) { + self.cache = cache; + } + + fn update_db_storage( + &mut self, + _update: >::Transaction, + ) -> ClientResult<()> { + // we're not storing anything locally => ignore changes + Ok(()) + } + + fn update_changes_trie(&mut self, _update: MemoryDB) -> ClientResult<()> { + // we're not storing anything locally => ignore changes + Ok(()) + } + + fn reset_storage( + &mut self, + top: StorageOverlay, + children: ChildrenStorageOverlay, + ) -> ClientResult { + check_genesis_storage(&top, &children)?; + + // this is only called when genesis block is imported => shouldn't be performance bottleneck + let mut storage: HashMap>, StorageOverlay> = HashMap::new(); + storage.insert(None, top); + for (child_key, child_storage) in children { + storage.insert(Some(child_key), child_storage); + } + let storage_update: InMemoryState = storage.into(); + let (storage_root, _) = storage_update.storage_root(::std::iter::empty()); + self.storage_update = Some(storage_update); + + Ok(storage_root) + } + + fn insert_aux(&mut self, ops: I) -> ClientResult<()> + where + I: IntoIterator, Option>)>, + { + self.aux_ops.append(&mut ops.into_iter().collect()); + Ok(()) + } + + fn update_storage(&mut self, _update: Vec<(Vec, Option>)>) -> ClientResult<()> { + // we're not storing anything locally => ignore changes + Ok(()) + } + + fn mark_finalized( + &mut self, + block: BlockId, + _justification: Option, + ) -> ClientResult<()> { + self.finalized_blocks.push(block); + Ok(()) + } + + fn mark_head(&mut self, block: BlockId) -> ClientResult<()> { + self.set_head = Some(block); + Ok(()) + } } impl StateBackend for OnDemandState where - Block: BlockT, - S: BlockchainStorage, - F: Fetcher, - H: Hasher, + Block: BlockT, + S: BlockchainStorage, + F: Fetcher, + H: Hasher, { - type Error = ClientError; - type Transaction = (); - type TrieBackendStorage = MemoryDB; - - fn storage(&self, key: &[u8]) -> ClientResult>> { - let mut header = self.cached_header.read().clone(); - if header.is_none() { - let cached_header = self.blockchain.upgrade() - .ok_or_else(|| ClientErrorKind::UnknownBlock(format!("{}", self.block)).into()) - .and_then(|blockchain| blockchain.expect_header(BlockId::Hash(self.block)))?; - header = Some(cached_header.clone()); - *self.cached_header.write() = Some(cached_header); - } - - self.fetcher.upgrade().ok_or(ClientErrorKind::NotAvailableOnLightClient)? - .remote_read(RemoteReadRequest { - block: self.block, - header: header.expect("if block above guarantees that header is_some(); qed"), - key: key.to_vec(), - retry_count: None, - }) - .into_future().wait() - } - - fn child_storage(&self, _storage_key: &[u8], _key: &[u8]) -> ClientResult>> { - Err(ClientErrorKind::NotAvailableOnLightClient.into()) - } - - fn for_keys_with_prefix(&self, _prefix: &[u8], _action: A) { - // whole state is not available on light node - } - - fn for_keys_in_child_storage(&self, _storage_key: &[u8], _action: A) { - // whole state is not available on light node - } - - fn storage_root(&self, _delta: I) -> (H::Out, Self::Transaction) - where - I: IntoIterator, Option>)> - { - (H::Out::default(), ()) - } - - fn child_storage_root(&self, _key: &[u8], _delta: I) -> (Vec, bool, Self::Transaction) - where - I: IntoIterator, Option>)> - { - (H::Out::default().as_ref().to_vec(), true, ()) - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - // whole state is not available on light node - Vec::new() - } - - fn keys(&self, _prefix: &Vec) -> Vec> { - // whole state is not available on light node - Vec::new() - } - - fn try_into_trie_backend(self) -> Option> { - None - } + type Error = ClientError; + type Transaction = (); + type TrieBackendStorage = MemoryDB; + + fn storage(&self, key: &[u8]) -> ClientResult>> { + let mut header = self.cached_header.read().clone(); + if header.is_none() { + let cached_header = self + .blockchain + .upgrade() + .ok_or_else(|| ClientErrorKind::UnknownBlock(format!("{}", self.block)).into()) + .and_then(|blockchain| blockchain.expect_header(BlockId::Hash(self.block)))?; + header = Some(cached_header.clone()); + *self.cached_header.write() = Some(cached_header); + } + + self.fetcher + .upgrade() + .ok_or(ClientErrorKind::NotAvailableOnLightClient)? + .remote_read(RemoteReadRequest { + block: self.block, + header: header.expect("if block above guarantees that header is_some(); qed"), + key: key.to_vec(), + retry_count: None, + }) + .into_future() + .wait() + } + + fn child_storage(&self, _storage_key: &[u8], _key: &[u8]) -> ClientResult>> { + Err(ClientErrorKind::NotAvailableOnLightClient.into()) + } + + fn for_keys_with_prefix(&self, _prefix: &[u8], _action: A) { + // whole state is not available on light node + } + + fn for_keys_in_child_storage(&self, _storage_key: &[u8], _action: A) { + // whole state is not available on light node + } + + fn storage_root(&self, _delta: I) -> (H::Out, Self::Transaction) + where + I: IntoIterator, Option>)>, + { + (H::Out::default(), ()) + } + + fn child_storage_root(&self, _key: &[u8], _delta: I) -> (Vec, bool, Self::Transaction) + where + I: IntoIterator, Option>)>, + { + (H::Out::default().as_ref().to_vec(), true, ()) + } + + fn pairs(&self) -> Vec<(Vec, Vec)> { + // whole state is not available on light node + Vec::new() + } + + fn keys(&self, _prefix: &Vec) -> Vec> { + // whole state is not available on light node + Vec::new() + } + + fn try_into_trie_backend(self) -> Option> { + None + } } impl StateBackend for OnDemandOrGenesisState where - Block: BlockT, - F: Fetcher, - S: BlockchainStorage, - H: Hasher, - H::Out: HeapSizeOf + Ord, + Block: BlockT, + F: Fetcher, + S: BlockchainStorage, + H: Hasher, + H::Out: HeapSizeOf + Ord, { - type Error = ClientError; - type Transaction = (); - type TrieBackendStorage = MemoryDB; - - fn storage(&self, key: &[u8]) -> ClientResult>> { - match *self { - OnDemandOrGenesisState::OnDemand(ref state) => - StateBackend::::storage(state, key), - OnDemandOrGenesisState::Genesis(ref state) => - Ok(state.storage(key).expect(IN_MEMORY_EXPECT_PROOF)), - } - } - - fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> ClientResult>> { - match *self { - OnDemandOrGenesisState::OnDemand(ref state) => - StateBackend::::child_storage(state, storage_key, key), - OnDemandOrGenesisState::Genesis(ref state) => - Ok(state.child_storage(storage_key, key).expect(IN_MEMORY_EXPECT_PROOF)), - } - } - - fn for_keys_with_prefix(&self, prefix: &[u8], action: A) { - match *self { - OnDemandOrGenesisState::OnDemand(ref state) => - StateBackend::::for_keys_with_prefix(state, prefix, action), - OnDemandOrGenesisState::Genesis(ref state) => state.for_keys_with_prefix(prefix, action), - } - } - - fn for_keys_in_child_storage(&self, storage_key: &[u8], action: A) { - match *self { - OnDemandOrGenesisState::OnDemand(ref state) => - StateBackend::::for_keys_in_child_storage(state, storage_key, action), - OnDemandOrGenesisState::Genesis(ref state) => state.for_keys_in_child_storage(storage_key, action), - } - } - - fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) - where - I: IntoIterator, Option>)> - { - match *self { - OnDemandOrGenesisState::OnDemand(ref state) => - StateBackend::::storage_root(state, delta), - OnDemandOrGenesisState::Genesis(ref state) => { - let (root, _) = state.storage_root(delta); - (root, ()) - }, - } - } - - fn child_storage_root(&self, key: &[u8], delta: I) -> (Vec, bool, Self::Transaction) - where - I: IntoIterator, Option>)> - { - match *self { - OnDemandOrGenesisState::OnDemand(ref state) => - StateBackend::::child_storage_root(state, key, delta), - OnDemandOrGenesisState::Genesis(ref state) => { - let (root, is_equal, _) = state.child_storage_root(key, delta); - (root, is_equal, ()) - }, - } - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - match *self { - OnDemandOrGenesisState::OnDemand(ref state) => - StateBackend::::pairs(state), - OnDemandOrGenesisState::Genesis(ref state) => state.pairs(), - } - } - - fn keys(&self, prefix: &Vec) -> Vec> { - match *self { - OnDemandOrGenesisState::OnDemand(ref state) => - StateBackend::::keys(state, prefix), - OnDemandOrGenesisState::Genesis(ref state) => state.keys(prefix), - } - } - - fn try_into_trie_backend(self) -> Option> { - match self { - OnDemandOrGenesisState::OnDemand(state) => state.try_into_trie_backend(), - OnDemandOrGenesisState::Genesis(state) => state.try_into_trie_backend(), - } - } + type Error = ClientError; + type Transaction = (); + type TrieBackendStorage = MemoryDB; + + fn storage(&self, key: &[u8]) -> ClientResult>> { + match *self { + OnDemandOrGenesisState::OnDemand(ref state) => StateBackend::::storage(state, key), + OnDemandOrGenesisState::Genesis(ref state) => { + Ok(state.storage(key).expect(IN_MEMORY_EXPECT_PROOF)) + } + } + } + + fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> ClientResult>> { + match *self { + OnDemandOrGenesisState::OnDemand(ref state) => { + StateBackend::::child_storage(state, storage_key, key) + } + OnDemandOrGenesisState::Genesis(ref state) => Ok(state + .child_storage(storage_key, key) + .expect(IN_MEMORY_EXPECT_PROOF)), + } + } + + fn for_keys_with_prefix(&self, prefix: &[u8], action: A) { + match *self { + OnDemandOrGenesisState::OnDemand(ref state) => { + StateBackend::::for_keys_with_prefix(state, prefix, action) + } + OnDemandOrGenesisState::Genesis(ref state) => { + state.for_keys_with_prefix(prefix, action) + } + } + } + + fn for_keys_in_child_storage(&self, storage_key: &[u8], action: A) { + match *self { + OnDemandOrGenesisState::OnDemand(ref state) => { + StateBackend::::for_keys_in_child_storage(state, storage_key, action) + } + OnDemandOrGenesisState::Genesis(ref state) => { + state.for_keys_in_child_storage(storage_key, action) + } + } + } + + fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) + where + I: IntoIterator, Option>)>, + { + match *self { + OnDemandOrGenesisState::OnDemand(ref state) => { + StateBackend::::storage_root(state, delta) + } + OnDemandOrGenesisState::Genesis(ref state) => { + let (root, _) = state.storage_root(delta); + (root, ()) + } + } + } + + fn child_storage_root(&self, key: &[u8], delta: I) -> (Vec, bool, Self::Transaction) + where + I: IntoIterator, Option>)>, + { + match *self { + OnDemandOrGenesisState::OnDemand(ref state) => { + StateBackend::::child_storage_root(state, key, delta) + } + OnDemandOrGenesisState::Genesis(ref state) => { + let (root, is_equal, _) = state.child_storage_root(key, delta); + (root, is_equal, ()) + } + } + } + + fn pairs(&self) -> Vec<(Vec, Vec)> { + match *self { + OnDemandOrGenesisState::OnDemand(ref state) => StateBackend::::pairs(state), + OnDemandOrGenesisState::Genesis(ref state) => state.pairs(), + } + } + + fn keys(&self, prefix: &Vec) -> Vec> { + match *self { + OnDemandOrGenesisState::OnDemand(ref state) => StateBackend::::keys(state, prefix), + OnDemandOrGenesisState::Genesis(ref state) => state.keys(prefix), + } + } + + fn try_into_trie_backend(self) -> Option> { + match self { + OnDemandOrGenesisState::OnDemand(state) => state.try_into_trie_backend(), + OnDemandOrGenesisState::Genesis(state) => state.try_into_trie_backend(), + } + } } #[cfg(test)] mod tests { - use primitives::Blake2Hasher; - use test_client::{self, runtime::Block}; - use crate::backend::NewBlockState; - use crate::light::blockchain::tests::{DummyBlockchain, DummyStorage}; - use super::*; - - #[test] - fn local_state_is_created_when_genesis_state_is_available() { - let def = Default::default(); - let header0 = test_client::runtime::Header::new(0, def, def, def, Default::default()); - - let backend: Backend<_, _, Blake2Hasher> = Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); - let mut op = backend.begin_operation().unwrap(); - op.set_block_data(header0, None, None, NewBlockState::Final).unwrap(); - op.reset_storage(Default::default(), Default::default()).unwrap(); - backend.commit_operation(op).unwrap(); - - match backend.state_at(BlockId::Number(0)).unwrap() { - OnDemandOrGenesisState::Genesis(_) => (), - _ => panic!("unexpected state"), - } - } - - #[test] - fn remote_state_is_created_when_genesis_state_is_inavailable() { - let backend: Backend<_, _, Blake2Hasher> = Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); - - match backend.state_at(BlockId::Number(0)).unwrap() { - OnDemandOrGenesisState::OnDemand(_) => (), - _ => panic!("unexpected state"), - } - } - - #[test] - fn light_aux_store_is_updated_via_non_importing_op() { - let backend = Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); - let mut op = ClientBackend::::begin_operation(&backend).unwrap(); - BlockImportOperation::::insert_aux(&mut op, vec![(vec![1], Some(vec![2]))]).unwrap(); - ClientBackend::::commit_operation(&backend, op).unwrap(); - - assert_eq!(AuxStore::get_aux(&backend, &[1]).unwrap(), Some(vec![2])); - } + use super::*; + use crate::backend::NewBlockState; + use crate::light::blockchain::tests::{DummyBlockchain, DummyStorage}; + use primitives::Blake2Hasher; + use test_client::{self, runtime::Block}; + + #[test] + fn local_state_is_created_when_genesis_state_is_available() { + let def = Default::default(); + let header0 = test_client::runtime::Header::new(0, def, def, def, Default::default()); + + let backend: Backend<_, _, Blake2Hasher> = + Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); + let mut op = backend.begin_operation().unwrap(); + op.set_block_data(header0, None, None, NewBlockState::Final) + .unwrap(); + op.reset_storage(Default::default(), Default::default()) + .unwrap(); + backend.commit_operation(op).unwrap(); + + match backend.state_at(BlockId::Number(0)).unwrap() { + OnDemandOrGenesisState::Genesis(_) => (), + _ => panic!("unexpected state"), + } + } + + #[test] + fn remote_state_is_created_when_genesis_state_is_inavailable() { + let backend: Backend<_, _, Blake2Hasher> = + Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); + + match backend.state_at(BlockId::Number(0)).unwrap() { + OnDemandOrGenesisState::OnDemand(_) => (), + _ => panic!("unexpected state"), + } + } + + #[test] + fn light_aux_store_is_updated_via_non_importing_op() { + let backend = Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); + let mut op = ClientBackend::::begin_operation(&backend).unwrap(); + BlockImportOperation::::insert_aux( + &mut op, + vec![(vec![1], Some(vec![2]))], + ) + .unwrap(); + ClientBackend::::commit_operation(&backend, op).unwrap(); + + assert_eq!(AuxStore::get_aux(&backend, &[1]).unwrap(), Some(vec![2])); + } } diff --git a/core/client/src/light/blockchain.rs b/core/client/src/light/blockchain.rs index e081c14d1a..fb67dfb2e4 100644 --- a/core/client/src/light/blockchain.rs +++ b/core/client/src/light/blockchain.rs @@ -17,276 +17,311 @@ //! Light client blockchin backend. Only stores headers and justifications of recent //! blocks. CHT roots are stored for headers of ancient blocks. -use std::{sync::{Weak, Arc}, collections::HashMap}; use futures::{Future, IntoFuture}; use parking_lot::Mutex; +use std::{ + collections::HashMap, + sync::{Arc, Weak}, +}; -use runtime_primitives::{Justification, generic::BlockId}; -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}; use consensus::well_known_cache_keys; +use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}; +use runtime_primitives::{generic::BlockId, Justification}; use crate::backend::{AuxStore, NewBlockState}; -use crate::blockchain::{Backend as BlockchainBackend, BlockStatus, Cache as BlockchainCache, - HeaderBackend as BlockchainHeaderBackend, Info as BlockchainInfo, ProvideCache}; +use crate::blockchain::{ + Backend as BlockchainBackend, BlockStatus, Cache as BlockchainCache, + HeaderBackend as BlockchainHeaderBackend, Info as BlockchainInfo, ProvideCache, +}; use crate::cht; use crate::error::{ErrorKind as ClientErrorKind, Result as ClientResult}; use crate::light::fetcher::{Fetcher, RemoteHeaderRequest}; /// Light client blockchain storage. pub trait Storage: AuxStore + BlockchainHeaderBackend { - /// Store new header. Should refuse to revert any finalized blocks. - /// - /// Takes new authorities, the leaf state of the new block, and - /// any auxiliary storage updates to place in the same operation. - fn import_header( - &self, - header: Block::Header, - cache: HashMap>, - state: NewBlockState, - aux_ops: Vec<(Vec, Option>)>, - ) -> ClientResult<()>; - - /// Set an existing block as new best block. - fn set_head(&self, block: BlockId) -> ClientResult<()>; - - /// Mark historic header as finalized. - fn finalize_header(&self, block: BlockId) -> ClientResult<()>; - - /// Get last finalized header. - fn last_finalized(&self) -> ClientResult; - - /// Get headers CHT root for given block. Fails if the block is not pruned (not a part of any CHT). - fn header_cht_root(&self, cht_size: u64, block: NumberFor) -> ClientResult; - - /// Get changes trie CHT root for given block. Fails if the block is not pruned (not a part of any CHT). - fn changes_trie_cht_root(&self, cht_size: u64, block: NumberFor) -> ClientResult; - - /// Get storage cache. - fn cache(&self) -> Option>>; + /// Store new header. Should refuse to revert any finalized blocks. + /// + /// Takes new authorities, the leaf state of the new block, and + /// any auxiliary storage updates to place in the same operation. + fn import_header( + &self, + header: Block::Header, + cache: HashMap>, + state: NewBlockState, + aux_ops: Vec<(Vec, Option>)>, + ) -> ClientResult<()>; + + /// Set an existing block as new best block. + fn set_head(&self, block: BlockId) -> ClientResult<()>; + + /// Mark historic header as finalized. + fn finalize_header(&self, block: BlockId) -> ClientResult<()>; + + /// Get last finalized header. + fn last_finalized(&self) -> ClientResult; + + /// Get headers CHT root for given block. Fails if the block is not pruned (not a part of any CHT). + fn header_cht_root(&self, cht_size: u64, block: NumberFor) -> ClientResult; + + /// Get changes trie CHT root for given block. Fails if the block is not pruned (not a part of any CHT). + fn changes_trie_cht_root( + &self, + cht_size: u64, + block: NumberFor, + ) -> ClientResult; + + /// Get storage cache. + fn cache(&self) -> Option>>; } /// Light client blockchain. pub struct Blockchain { - fetcher: Mutex>, - storage: S, + fetcher: Mutex>, + storage: S, } impl Blockchain { - /// Create new light blockchain backed with given storage. - pub fn new(storage: S) -> Self { - Self { - fetcher: Mutex::new(Default::default()), - storage, - } - } - - /// Sets fetcher reference. - pub fn set_fetcher(&self, fetcher: Weak) { - *self.fetcher.lock() = fetcher; - } - - /// Get fetcher weak reference. - pub fn fetcher(&self) -> Weak { - self.fetcher.lock().clone() - } - - /// Get storage reference. - pub fn storage(&self) -> &S { - &self.storage - } + /// Create new light blockchain backed with given storage. + pub fn new(storage: S) -> Self { + Self { + fetcher: Mutex::new(Default::default()), + storage, + } + } + + /// Sets fetcher reference. + pub fn set_fetcher(&self, fetcher: Weak) { + *self.fetcher.lock() = fetcher; + } + + /// Get fetcher weak reference. + pub fn fetcher(&self) -> Weak { + self.fetcher.lock().clone() + } + + /// Get storage reference. + pub fn storage(&self) -> &S { + &self.storage + } } -impl BlockchainHeaderBackend for Blockchain where Block: BlockT, S: Storage, F: Fetcher { - fn header(&self, id: BlockId) -> ClientResult> { - match self.storage.header(id)? { - Some(header) => Ok(Some(header)), - None => { - let number = match id { - BlockId::Hash(hash) => match self.storage.number(hash)? { - Some(number) => number, - None => return Ok(None), - }, - BlockId::Number(number) => number, - }; - - // if the header is from future or genesis (we never prune genesis) => return - if number.is_zero() || self.storage.status(BlockId::Number(number))? == BlockStatus::Unknown { - return Ok(None); - } - - self.fetcher().upgrade().ok_or(ClientErrorKind::NotAvailableOnLightClient)? - .remote_header(RemoteHeaderRequest { - cht_root: self.storage.header_cht_root(cht::SIZE, number)?, - block: number, - retry_count: None, - }) - .into_future().wait() - .map(Some) - } - } - } - - fn info(&self) -> ClientResult> { - self.storage.info() - } - - fn status(&self, id: BlockId) -> ClientResult { - self.storage.status(id) - } - - fn number(&self, hash: Block::Hash) -> ClientResult>> { - self.storage.number(hash) - } - - fn hash(&self, number: <::Header as HeaderT>::Number) -> ClientResult> { - self.storage.hash(number) - } +impl BlockchainHeaderBackend for Blockchain +where + Block: BlockT, + S: Storage, + F: Fetcher, +{ + fn header(&self, id: BlockId) -> ClientResult> { + match self.storage.header(id)? { + Some(header) => Ok(Some(header)), + None => { + let number = match id { + BlockId::Hash(hash) => match self.storage.number(hash)? { + Some(number) => number, + None => return Ok(None), + }, + BlockId::Number(number) => number, + }; + + // if the header is from future or genesis (we never prune genesis) => return + if number.is_zero() + || self.storage.status(BlockId::Number(number))? == BlockStatus::Unknown + { + return Ok(None); + } + + self.fetcher() + .upgrade() + .ok_or(ClientErrorKind::NotAvailableOnLightClient)? + .remote_header(RemoteHeaderRequest { + cht_root: self.storage.header_cht_root(cht::SIZE, number)?, + block: number, + retry_count: None, + }) + .into_future() + .wait() + .map(Some) + } + } + } + + fn info(&self) -> ClientResult> { + self.storage.info() + } + + fn status(&self, id: BlockId) -> ClientResult { + self.storage.status(id) + } + + fn number(&self, hash: Block::Hash) -> ClientResult>> { + self.storage.number(hash) + } + + fn hash( + &self, + number: <::Header as HeaderT>::Number, + ) -> ClientResult> { + self.storage.hash(number) + } } -impl BlockchainBackend for Blockchain where Block: BlockT, S: Storage, F: Fetcher { - fn body(&self, _id: BlockId) -> ClientResult>> { - // TODO: #1445 fetch from remote node - Ok(None) - } - - fn justification(&self, _id: BlockId) -> ClientResult> { - Ok(None) - } - - fn last_finalized(&self) -> ClientResult { - self.storage.last_finalized() - } - - fn cache(&self) -> Option>> { - self.storage.cache() - } - - fn leaves(&self) -> ClientResult> { - unimplemented!() - } - - fn children(&self, _parent_hash: Block::Hash) -> ClientResult> { - unimplemented!() - } +impl BlockchainBackend for Blockchain +where + Block: BlockT, + S: Storage, + F: Fetcher, +{ + fn body(&self, _id: BlockId) -> ClientResult>> { + // TODO: #1445 fetch from remote node + Ok(None) + } + + fn justification(&self, _id: BlockId) -> ClientResult> { + Ok(None) + } + + fn last_finalized(&self) -> ClientResult { + self.storage.last_finalized() + } + + fn cache(&self) -> Option>> { + self.storage.cache() + } + + fn leaves(&self) -> ClientResult> { + unimplemented!() + } + + fn children(&self, _parent_hash: Block::Hash) -> ClientResult> { + unimplemented!() + } } impl, F, Block: BlockT> ProvideCache for Blockchain { - fn cache(&self) -> Option>> { - self.storage.cache() - } + fn cache(&self) -> Option>> { + self.storage.cache() + } } #[cfg(test)] pub mod tests { - use std::collections::HashMap; - use test_client::runtime::{Hash, Block, Header}; - use crate::blockchain::Info; - use crate::light::fetcher::tests::OkCallFetcher; - use super::*; - - pub type DummyBlockchain = Blockchain; - - pub struct DummyStorage { - pub changes_tries_cht_roots: HashMap, - pub aux_store: Mutex, Vec>>, - } - - impl DummyStorage { - pub fn new() -> Self { - DummyStorage { - changes_tries_cht_roots: HashMap::new(), - aux_store: Mutex::new(HashMap::new()), - } - } - } - - impl BlockchainHeaderBackend for DummyStorage { - fn header(&self, _id: BlockId) -> ClientResult> { - Err(ClientErrorKind::Backend("Test error".into()).into()) - } - - fn info(&self) -> ClientResult> { - Err(ClientErrorKind::Backend("Test error".into()).into()) - } - - fn status(&self, _id: BlockId) -> ClientResult { - Err(ClientErrorKind::Backend("Test error".into()).into()) - } - - fn number(&self, hash: Hash) -> ClientResult>> { - if hash == Default::default() { - Ok(Some(Default::default())) - } else { - Err(ClientErrorKind::Backend("Test error".into()).into()) - } - } - - fn hash(&self, number: u64) -> ClientResult> { - if number == 0 { - Ok(Some(Default::default())) - } else { - Err(ClientErrorKind::Backend("Test error".into()).into()) - } - } - } - - impl AuxStore for DummyStorage { - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, _delete: D) -> ClientResult<()> { - for (k, v) in insert.into_iter() { - self.aux_store.lock().insert(k.to_vec(), v.to_vec()); - } - Ok(()) - } - - fn get_aux(&self, key: &[u8]) -> ClientResult>> { - Ok(self.aux_store.lock().get(key).cloned()) - } - } - - impl Storage for DummyStorage { - fn import_header( - &self, - _header: Header, - _cache: HashMap>, - _state: NewBlockState, - _aux_ops: Vec<(Vec, Option>)>, - ) -> ClientResult<()> { - Ok(()) - } - - fn set_head(&self, _block: BlockId) -> ClientResult<()> { - Err(ClientErrorKind::Backend("Test error".into()).into()) - } - - fn finalize_header(&self, _block: BlockId) -> ClientResult<()> { - Err(ClientErrorKind::Backend("Test error".into()).into()) - } - - fn last_finalized(&self) -> ClientResult { - Err(ClientErrorKind::Backend("Test error".into()).into()) - } - - fn header_cht_root(&self, _cht_size: u64, _block: u64) -> ClientResult { - Err(ClientErrorKind::Backend("Test error".into()).into()) - } - - fn changes_trie_cht_root(&self, cht_size: u64, block: u64) -> ClientResult { - cht::block_to_cht_number(cht_size, block) - .and_then(|cht_num| self.changes_tries_cht_roots.get(&cht_num)) - .cloned() - .ok_or_else(|| ClientErrorKind::Backend( - format!("Test error: CHT for block #{} not found", block) - ).into()) - } - - fn cache(&self) -> Option>> { - None - } - } + use super::*; + use crate::blockchain::Info; + use crate::light::fetcher::tests::OkCallFetcher; + use std::collections::HashMap; + use test_client::runtime::{Block, Hash, Header}; + + pub type DummyBlockchain = Blockchain; + + pub struct DummyStorage { + pub changes_tries_cht_roots: HashMap, + pub aux_store: Mutex, Vec>>, + } + + impl DummyStorage { + pub fn new() -> Self { + DummyStorage { + changes_tries_cht_roots: HashMap::new(), + aux_store: Mutex::new(HashMap::new()), + } + } + } + + impl BlockchainHeaderBackend for DummyStorage { + fn header(&self, _id: BlockId) -> ClientResult> { + Err(ClientErrorKind::Backend("Test error".into()).into()) + } + + fn info(&self) -> ClientResult> { + Err(ClientErrorKind::Backend("Test error".into()).into()) + } + + fn status(&self, _id: BlockId) -> ClientResult { + Err(ClientErrorKind::Backend("Test error".into()).into()) + } + + fn number(&self, hash: Hash) -> ClientResult>> { + if hash == Default::default() { + Ok(Some(Default::default())) + } else { + Err(ClientErrorKind::Backend("Test error".into()).into()) + } + } + + fn hash(&self, number: u64) -> ClientResult> { + if number == 0 { + Ok(Some(Default::default())) + } else { + Err(ClientErrorKind::Backend("Test error".into()).into()) + } + } + } + + impl AuxStore for DummyStorage { + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + _delete: D, + ) -> ClientResult<()> { + for (k, v) in insert.into_iter() { + self.aux_store.lock().insert(k.to_vec(), v.to_vec()); + } + Ok(()) + } + + fn get_aux(&self, key: &[u8]) -> ClientResult>> { + Ok(self.aux_store.lock().get(key).cloned()) + } + } + + impl Storage for DummyStorage { + fn import_header( + &self, + _header: Header, + _cache: HashMap>, + _state: NewBlockState, + _aux_ops: Vec<(Vec, Option>)>, + ) -> ClientResult<()> { + Ok(()) + } + + fn set_head(&self, _block: BlockId) -> ClientResult<()> { + Err(ClientErrorKind::Backend("Test error".into()).into()) + } + + fn finalize_header(&self, _block: BlockId) -> ClientResult<()> { + Err(ClientErrorKind::Backend("Test error".into()).into()) + } + + fn last_finalized(&self) -> ClientResult { + Err(ClientErrorKind::Backend("Test error".into()).into()) + } + + fn header_cht_root(&self, _cht_size: u64, _block: u64) -> ClientResult { + Err(ClientErrorKind::Backend("Test error".into()).into()) + } + + fn changes_trie_cht_root(&self, cht_size: u64, block: u64) -> ClientResult { + cht::block_to_cht_number(cht_size, block) + .and_then(|cht_num| self.changes_tries_cht_roots.get(&cht_num)) + .cloned() + .ok_or_else(|| { + ClientErrorKind::Backend(format!( + "Test error: CHT for block #{} not found", + block + )) + .into() + }) + } + + fn cache(&self) -> Option>> { + None + } + } } diff --git a/core/client/src/light/call_executor.rs b/core/client/src/light/call_executor.rs index 1e50d3398e..460ab3792c 100644 --- a/core/client/src/light/call_executor.rs +++ b/core/client/src/light/call_executor.rs @@ -17,351 +17,397 @@ //! Light client call exector. Executes methods on remote full nodes, fetching //! execution proof and checking it locally. -use std::{collections::HashSet, sync::Arc, panic::UnwindSafe, result, marker::PhantomData}; -use futures::{IntoFuture, Future}; +use futures::{Future, IntoFuture}; +use std::{collections::HashSet, marker::PhantomData, panic::UnwindSafe, result, sync::Arc}; -use parity_codec::{Encode, Decode}; -use primitives::{H256, Blake2Hasher, convert_hash, NativeOrEncoded, OffchainExt}; +use hash_db::Hasher; +use parity_codec::{Decode, Encode}; +use primitives::{convert_hash, Blake2Hasher, NativeOrEncoded, OffchainExt, H256}; use runtime_primitives::generic::BlockId; use runtime_primitives::traits::{As, Block as BlockT, Header as HeaderT}; -use state_machine::{self, Backend as StateBackend, CodeExecutor, OverlayedChanges, ExecutionStrategy, - create_proof_check_backend, execution_proof_check_on_trie_backend, ExecutionManager, NeverOffchainExt}; -use hash_db::Hasher; +use state_machine::{ + self, create_proof_check_backend, execution_proof_check_on_trie_backend, + Backend as StateBackend, CodeExecutor, ExecutionManager, ExecutionStrategy, NeverOffchainExt, + OverlayedChanges, +}; use crate::backend::RemoteBackend; use crate::blockchain::Backend as ChainBackend; use crate::call_executor::CallExecutor; use crate::error::{Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult}; use crate::light::fetcher::{Fetcher, RemoteCallRequest}; -use executor::{RuntimeVersion, NativeVersion}; +use executor::{NativeVersion, RuntimeVersion}; use heapsize::HeapSizeOf; use trie::MemoryDB; /// Call executor that executes methods on remote node, querying execution proof /// and checking proof by re-executing locally. pub struct RemoteCallExecutor { - blockchain: Arc, - fetcher: Arc, + blockchain: Arc, + fetcher: Arc, } /// Remote or local call executor. /// /// Calls are executed locally if state is available locally. Otherwise, calls /// are redirected to remote call executor. -pub struct RemoteOrLocalCallExecutor, B, R, L> { - backend: Arc, - remote: R, - local: L, - _block: PhantomData, +pub struct RemoteOrLocalCallExecutor, B, R, L> { + backend: Arc, + remote: R, + local: L, + _block: PhantomData, } impl Clone for RemoteCallExecutor { - fn clone(&self) -> Self { - RemoteCallExecutor { - blockchain: self.blockchain.clone(), - fetcher: self.fetcher.clone(), - } - } + fn clone(&self) -> Self { + RemoteCallExecutor { + blockchain: self.blockchain.clone(), + fetcher: self.fetcher.clone(), + } + } } impl RemoteCallExecutor { - /// Creates new instance of remote call executor. - pub fn new(blockchain: Arc, fetcher: Arc) -> Self { - RemoteCallExecutor { blockchain, fetcher } - } + /// Creates new instance of remote call executor. + pub fn new(blockchain: Arc, fetcher: Arc) -> Self { + RemoteCallExecutor { + blockchain, + fetcher, + } + } } impl CallExecutor for RemoteCallExecutor where - Block: BlockT, - B: ChainBackend, - F: Fetcher, - Block::Hash: Ord, + Block: BlockT, + B: ChainBackend, + F: Fetcher, + Block::Hash: Ord, { - type Error = ClientError; - - fn call< - O: OffchainExt, - >( - &self, - id: &BlockId, - method: &str, - call_data: &[u8], - _strategy: ExecutionStrategy, - _side_effects_handler: Option<&mut O>, - ) - -> ClientResult> { - let block_hash = self.blockchain.expect_block_hash_from_id(id)?; - let block_header = self.blockchain.expect_header(id.clone())?; - - self.fetcher.remote_call(RemoteCallRequest { - block: block_hash, - header: block_header, - method: method.into(), - call_data: call_data.to_vec(), - retry_count: None, - }).into_future().wait() - } - - fn contextual_call< - O: OffchainExt, - PB: Fn() -> ClientResult, - EM: Fn( - Result, Self::Error>, - Result, Self::Error> - ) -> Result, Self::Error>, - R: Encode + Decode + PartialEq, - NC, - >( - &self, - at: &BlockId, - method: &str, - call_data: &[u8], - changes: &mut OverlayedChanges, - initialized_block: &mut Option>, - _prepare_environment_block: PB, - execution_manager: ExecutionManager, - _native_call: Option, - side_effects_handler: Option<&mut O>, - ) -> ClientResult> where ExecutionManager: Clone { - // it is only possible to execute contextual call if changes are empty - if !changes.is_empty() || initialized_block.is_some() { - return Err(ClientErrorKind::NotAvailableOnLightClient.into()); - } - - self.call(at, method, call_data, (&execution_manager).into(), side_effects_handler).map(NativeOrEncoded::Encoded) - } - - fn runtime_version(&self, id: &BlockId) -> ClientResult { - let call_result = self.call(id, "version", &[], ExecutionStrategy::NativeElseWasm, NeverOffchainExt::new())?; - RuntimeVersion::decode(&mut call_result.as_slice()) - .ok_or_else(|| ClientErrorKind::VersionInvalid.into()) - } - - fn call_at_state< - O: OffchainExt, - S: StateBackend, - FF: FnOnce( - Result, Self::Error>, - Result, Self::Error> - ) -> Result, Self::Error>, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result, - >(&self, - _state: &S, - _changes: &mut OverlayedChanges, - _method: &str, - _call_data: &[u8], - _m: ExecutionManager, - _native_call: Option, - _side_effects_handler: Option<&mut O>, - ) -> ClientResult<(NativeOrEncoded, S::Transaction, Option>)> { - Err(ClientErrorKind::NotAvailableOnLightClient.into()) - } - - fn prove_at_trie_state>( - &self, - _state: &state_machine::TrieBackend, - _changes: &mut OverlayedChanges, - _method: &str, - _call_data: &[u8] - ) -> ClientResult<(Vec, Vec>)> { - Err(ClientErrorKind::NotAvailableOnLightClient.into()) - } - - fn native_runtime_version(&self) -> Option<&NativeVersion> { - None - } + type Error = ClientError; + + fn call( + &self, + id: &BlockId, + method: &str, + call_data: &[u8], + _strategy: ExecutionStrategy, + _side_effects_handler: Option<&mut O>, + ) -> ClientResult> { + let block_hash = self.blockchain.expect_block_hash_from_id(id)?; + let block_header = self.blockchain.expect_header(id.clone())?; + + self.fetcher + .remote_call(RemoteCallRequest { + block: block_hash, + header: block_header, + method: method.into(), + call_data: call_data.to_vec(), + retry_count: None, + }) + .into_future() + .wait() + } + + fn contextual_call< + O: OffchainExt, + PB: Fn() -> ClientResult, + EM: Fn( + Result, Self::Error>, + Result, Self::Error>, + ) -> Result, Self::Error>, + R: Encode + Decode + PartialEq, + NC, + >( + &self, + at: &BlockId, + method: &str, + call_data: &[u8], + changes: &mut OverlayedChanges, + initialized_block: &mut Option>, + _prepare_environment_block: PB, + execution_manager: ExecutionManager, + _native_call: Option, + side_effects_handler: Option<&mut O>, + ) -> ClientResult> + where + ExecutionManager: Clone, + { + // it is only possible to execute contextual call if changes are empty + if !changes.is_empty() || initialized_block.is_some() { + return Err(ClientErrorKind::NotAvailableOnLightClient.into()); + } + + self.call( + at, + method, + call_data, + (&execution_manager).into(), + side_effects_handler, + ) + .map(NativeOrEncoded::Encoded) + } + + fn runtime_version(&self, id: &BlockId) -> ClientResult { + let call_result = self.call( + id, + "version", + &[], + ExecutionStrategy::NativeElseWasm, + NeverOffchainExt::new(), + )?; + RuntimeVersion::decode(&mut call_result.as_slice()) + .ok_or_else(|| ClientErrorKind::VersionInvalid.into()) + } + + fn call_at_state< + O: OffchainExt, + S: StateBackend, + FF: FnOnce( + Result, Self::Error>, + Result, Self::Error>, + ) -> Result, Self::Error>, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result, + >( + &self, + _state: &S, + _changes: &mut OverlayedChanges, + _method: &str, + _call_data: &[u8], + _m: ExecutionManager, + _native_call: Option, + _side_effects_handler: Option<&mut O>, + ) -> ClientResult<( + NativeOrEncoded, + S::Transaction, + Option>, + )> { + Err(ClientErrorKind::NotAvailableOnLightClient.into()) + } + + fn prove_at_trie_state>( + &self, + _state: &state_machine::TrieBackend, + _changes: &mut OverlayedChanges, + _method: &str, + _call_data: &[u8], + ) -> ClientResult<(Vec, Vec>)> { + Err(ClientErrorKind::NotAvailableOnLightClient.into()) + } + + fn native_runtime_version(&self) -> Option<&NativeVersion> { + None + } } impl Clone for RemoteOrLocalCallExecutor - where - Block: BlockT, - B: RemoteBackend, - R: CallExecutor + Clone, - L: CallExecutor + Clone, +where + Block: BlockT, + B: RemoteBackend, + R: CallExecutor + Clone, + L: CallExecutor + Clone, { - fn clone(&self) -> Self { - RemoteOrLocalCallExecutor { - backend: self.backend.clone(), - remote: self.remote.clone(), - local: self.local.clone(), - _block: Default::default(), - } - } + fn clone(&self) -> Self { + RemoteOrLocalCallExecutor { + backend: self.backend.clone(), + remote: self.remote.clone(), + local: self.local.clone(), + _block: Default::default(), + } + } } impl RemoteOrLocalCallExecutor - where - Block: BlockT, - B: RemoteBackend, - Remote: CallExecutor, - Local: CallExecutor, +where + Block: BlockT, + B: RemoteBackend, + Remote: CallExecutor, + Local: CallExecutor, { - /// Creates new instance of remote/local call executor. - pub fn new(backend: Arc, remote: Remote, local: Local) -> Self { - RemoteOrLocalCallExecutor { backend, remote, local, _block: Default::default(), } - } + /// Creates new instance of remote/local call executor. + pub fn new(backend: Arc, remote: Remote, local: Local) -> Self { + RemoteOrLocalCallExecutor { + backend, + remote, + local, + _block: Default::default(), + } + } } -impl CallExecutor for - RemoteOrLocalCallExecutor - where - Block: BlockT, - B: RemoteBackend, - Remote: CallExecutor, - Local: CallExecutor, +impl CallExecutor + for RemoteOrLocalCallExecutor +where + Block: BlockT, + B: RemoteBackend, + Remote: CallExecutor, + Local: CallExecutor, { - type Error = ClientError; - - fn call< - O: OffchainExt, - >( - &self, - id: &BlockId, - method: &str, - call_data: &[u8], - strategy: ExecutionStrategy, - side_effects_handler: Option<&mut O>, - ) -> ClientResult> { - match self.backend.is_local_state_available(id) { - true => self.local.call(id, method, call_data, strategy, side_effects_handler), - false => self.remote.call(id, method, call_data, strategy, side_effects_handler), - } - } - - fn contextual_call< - O: OffchainExt, - PB: Fn() -> ClientResult, - EM: Fn( - Result, Self::Error>, - Result, Self::Error> - ) -> Result, Self::Error>, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - >( - &self, - at: &BlockId, - method: &str, - call_data: &[u8], - changes: &mut OverlayedChanges, - initialized_block: &mut Option>, - prepare_environment_block: PB, - _manager: ExecutionManager, - native_call: Option, - side_effects_handler: Option<&mut O>, - ) -> ClientResult> where ExecutionManager: Clone { - // there's no actual way/need to specify native/wasm execution strategy on light node - // => we can safely ignore passed values - - match self.backend.is_local_state_available(at) { - true => CallExecutor::contextual_call::< - _, - _, - fn( - Result, Local::Error>, - Result, Local::Error>, - ) -> Result, Local::Error>, - _, - NC - >( - &self.local, - at, - method, - call_data, - changes, - initialized_block, - prepare_environment_block, - ExecutionManager::NativeWhenPossible, - native_call, - side_effects_handler, - ).map_err(|e| ClientErrorKind::Execution(Box::new(e.to_string())).into()), - false => CallExecutor::contextual_call::< - _, - _, - fn( - Result, Remote::Error>, - Result, Remote::Error>, - ) -> Result, Remote::Error>, - _, - NC - >( - &self.remote, - at, - method, - call_data, - changes, - initialized_block, - prepare_environment_block, - ExecutionManager::NativeWhenPossible, - native_call, - side_effects_handler, - ).map_err(|e| ClientErrorKind::Execution(Box::new(e.to_string())).into()), - } - } - - fn runtime_version(&self, id: &BlockId) -> ClientResult { - match self.backend.is_local_state_available(id) { - true => self.local.runtime_version(id), - false => self.remote.runtime_version(id), - } - } - - fn call_at_state< - O: OffchainExt, - S: StateBackend, - FF: FnOnce( - Result, Self::Error>, - Result, Self::Error> - ) -> Result, Self::Error>, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - >(&self, - state: &S, - changes: &mut OverlayedChanges, - method: &str, - call_data: &[u8], - _manager: ExecutionManager, - native_call: Option, - side_effects_handler: Option<&mut O>, - ) -> ClientResult<(NativeOrEncoded, S::Transaction, Option>)> { - // there's no actual way/need to specify native/wasm execution strategy on light node - // => we can safely ignore passed values - - CallExecutor::call_at_state::< - _, - _, - fn( - Result, Remote::Error>, - Result, Remote::Error>, - ) -> Result, Remote::Error>, - _, - NC - >( - &self.remote, - state, - changes, - method, - call_data, - ExecutionManager::NativeWhenPossible, - native_call, - side_effects_handler, - ).map_err(|e| ClientErrorKind::Execution(Box::new(e.to_string())).into()) - } - - fn prove_at_trie_state>( - &self, - state: &state_machine::TrieBackend, - changes: &mut OverlayedChanges, - method: &str, - call_data: &[u8] - ) -> ClientResult<(Vec, Vec>)> { - self.remote.prove_at_trie_state(state, changes, method, call_data) - } - - fn native_runtime_version(&self) -> Option<&NativeVersion> { - None - } + type Error = ClientError; + + fn call( + &self, + id: &BlockId, + method: &str, + call_data: &[u8], + strategy: ExecutionStrategy, + side_effects_handler: Option<&mut O>, + ) -> ClientResult> { + match self.backend.is_local_state_available(id) { + true => self + .local + .call(id, method, call_data, strategy, side_effects_handler), + false => self + .remote + .call(id, method, call_data, strategy, side_effects_handler), + } + } + + fn contextual_call< + O: OffchainExt, + PB: Fn() -> ClientResult, + EM: Fn( + Result, Self::Error>, + Result, Self::Error>, + ) -> Result, Self::Error>, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + >( + &self, + at: &BlockId, + method: &str, + call_data: &[u8], + changes: &mut OverlayedChanges, + initialized_block: &mut Option>, + prepare_environment_block: PB, + _manager: ExecutionManager, + native_call: Option, + side_effects_handler: Option<&mut O>, + ) -> ClientResult> + where + ExecutionManager: Clone, + { + // there's no actual way/need to specify native/wasm execution strategy on light node + // => we can safely ignore passed values + + match self.backend.is_local_state_available(at) { + true => CallExecutor::contextual_call::< + _, + _, + fn( + Result, Local::Error>, + Result, Local::Error>, + ) -> Result, Local::Error>, + _, + NC, + >( + &self.local, + at, + method, + call_data, + changes, + initialized_block, + prepare_environment_block, + ExecutionManager::NativeWhenPossible, + native_call, + side_effects_handler, + ) + .map_err(|e| ClientErrorKind::Execution(Box::new(e.to_string())).into()), + false => CallExecutor::contextual_call::< + _, + _, + fn( + Result, Remote::Error>, + Result, Remote::Error>, + ) -> Result, Remote::Error>, + _, + NC, + >( + &self.remote, + at, + method, + call_data, + changes, + initialized_block, + prepare_environment_block, + ExecutionManager::NativeWhenPossible, + native_call, + side_effects_handler, + ) + .map_err(|e| ClientErrorKind::Execution(Box::new(e.to_string())).into()), + } + } + + fn runtime_version(&self, id: &BlockId) -> ClientResult { + match self.backend.is_local_state_available(id) { + true => self.local.runtime_version(id), + false => self.remote.runtime_version(id), + } + } + + fn call_at_state< + O: OffchainExt, + S: StateBackend, + FF: FnOnce( + Result, Self::Error>, + Result, Self::Error>, + ) -> Result, Self::Error>, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + >( + &self, + state: &S, + changes: &mut OverlayedChanges, + method: &str, + call_data: &[u8], + _manager: ExecutionManager, + native_call: Option, + side_effects_handler: Option<&mut O>, + ) -> ClientResult<( + NativeOrEncoded, + S::Transaction, + Option>, + )> { + // there's no actual way/need to specify native/wasm execution strategy on light node + // => we can safely ignore passed values + + CallExecutor::call_at_state::< + _, + _, + fn( + Result, Remote::Error>, + Result, Remote::Error>, + ) -> Result, Remote::Error>, + _, + NC, + >( + &self.remote, + state, + changes, + method, + call_data, + ExecutionManager::NativeWhenPossible, + native_call, + side_effects_handler, + ) + .map_err(|e| ClientErrorKind::Execution(Box::new(e.to_string())).into()) + } + + fn prove_at_trie_state>( + &self, + state: &state_machine::TrieBackend, + changes: &mut OverlayedChanges, + method: &str, + call_data: &[u8], + ) -> ClientResult<(Vec, Vec>)> { + self.remote + .prove_at_trie_state(state, changes, method, call_data) + } + + fn native_runtime_version(&self) -> Option<&NativeVersion> { + None + } } /// Prove contextual execution using given block header in environment. @@ -369,38 +415,41 @@ impl CallExecutor for /// Method is executed using passed header as environment' current block. /// Proof includes both environment preparation proof and method execution proof. pub fn prove_execution( - state: S, - header: Block::Header, - executor: &E, - method: &str, - call_data: &[u8], + state: S, + header: Block::Header, + executor: &E, + method: &str, + call_data: &[u8], ) -> ClientResult<(Vec, Vec>)> - where - Block: BlockT, - S: StateBackend, - E: CallExecutor, +where + Block: BlockT, + S: StateBackend, + E: CallExecutor, { - let trie_state = state.try_into_trie_backend() - .ok_or_else(|| Box::new(state_machine::ExecutionError::UnableToGenerateProof) as Box)?; - - // prepare execution environment + record preparation proof - let mut changes = Default::default(); - let (_, init_proof) = executor.prove_at_trie_state( - &trie_state, - &mut changes, - "Core_initialize_block", - &header.encode(), - )?; - - // execute method + record execution proof - let (result, exec_proof) = executor.prove_at_trie_state(&trie_state, &mut changes, method, call_data)?; - let total_proof = init_proof.into_iter() - .chain(exec_proof.into_iter()) - .collect::>() - .into_iter() - .collect(); - - Ok((result, total_proof)) + let trie_state = state.try_into_trie_backend().ok_or_else(|| { + Box::new(state_machine::ExecutionError::UnableToGenerateProof) as Box + })?; + + // prepare execution environment + record preparation proof + let mut changes = Default::default(); + let (_, init_proof) = executor.prove_at_trie_state( + &trie_state, + &mut changes, + "Core_initialize_block", + &header.encode(), + )?; + + // execute method + record execution proof + let (result, exec_proof) = + executor.prove_at_trie_state(&trie_state, &mut changes, method, call_data)?; + let total_proof = init_proof + .into_iter() + .chain(exec_proof.into_iter()) + .collect::>() + .into_iter() + .collect(); + + Ok((result, total_proof)) } /// Check remote contextual execution proof using given backend. @@ -408,139 +457,191 @@ pub fn prove_execution( /// Method is executed using passed header as environment' current block. /// Proof shoul include both environment preparation proof and method execution proof. pub fn check_execution_proof( - executor: &E, - request: &RemoteCallRequest

, - remote_proof: Vec> + executor: &E, + request: &RemoteCallRequest
, + remote_proof: Vec>, ) -> ClientResult> - where - Header: HeaderT, - E: CodeExecutor, - H: Hasher, - H::Out: Ord + HeapSizeOf, +where + Header: HeaderT, + E: CodeExecutor, + H: Hasher, + H::Out: Ord + HeapSizeOf, { - let local_state_root = request.header.state_root(); - let root: H::Out = convert_hash(&local_state_root); - - // prepare execution environment + check preparation proof - let mut changes = OverlayedChanges::default(); - let trie_backend = create_proof_check_backend(root, remote_proof)?; - let next_block =
::new( - *request.header.number() + As::sa(1), - Default::default(), - Default::default(), - request.header.hash(), - Default::default(), - ); - execution_proof_check_on_trie_backend::( - &trie_backend, - &mut changes, - executor, - "Core_initialize_block", - &next_block.encode(), - )?; - - // execute method - let local_result = execution_proof_check_on_trie_backend::( - &trie_backend, - &mut changes, - executor, - &request.method, - &request.call_data, - )?; - - Ok(local_result) + let local_state_root = request.header.state_root(); + let root: H::Out = convert_hash(&local_state_root); + + // prepare execution environment + check preparation proof + let mut changes = OverlayedChanges::default(); + let trie_backend = create_proof_check_backend(root, remote_proof)?; + let next_block =
::new( + *request.header.number() + As::sa(1), + Default::default(), + Default::default(), + request.header.hash(), + Default::default(), + ); + execution_proof_check_on_trie_backend::( + &trie_backend, + &mut changes, + executor, + "Core_initialize_block", + &next_block.encode(), + )?; + + // execute method + let local_result = execution_proof_check_on_trie_backend::( + &trie_backend, + &mut changes, + executor, + &request.method, + &request.call_data, + )?; + + Ok(local_result) } #[cfg(test)] mod tests { - use consensus::BlockOrigin; - use test_client::{self, runtime::{Block, Header}, runtime::RuntimeApi, TestClient}; - use executor::NativeExecutionDispatch; - use crate::backend::{Backend, NewBlockState}; - use crate::in_mem::Backend as InMemBackend; - use crate::light::fetcher::tests::OkCallFetcher; - use super::*; - - #[test] - fn execution_proof_is_generated_and_checked() { - type TestClient = test_client::client::Client< - test_client::Backend, - test_client::Executor, - Block, - RuntimeApi - >; - - fn execute(remote_client: &TestClient, at: u64, method: &'static str) -> (Vec, Vec) { - let remote_block_id = BlockId::Number(at); - let remote_root = remote_client.state_at(&remote_block_id) - .unwrap().storage_root(::std::iter::empty()).0; - - // 'fetch' execution proof from remote node - let (remote_result, remote_execution_proof) = remote_client.execution_proof( - &remote_block_id, - method, - &[] - ).unwrap(); - - // check remote execution proof locally - let local_executor = test_client::LocalExecutor::new(None); - let local_result = check_execution_proof(&local_executor, &RemoteCallRequest { - block: test_client::runtime::Hash::default(), - header: test_client::runtime::Header { - state_root: remote_root.into(), - parent_hash: Default::default(), - number: at, - extrinsics_root: Default::default(), - digest: Default::default(), - }, - method: method.into(), - call_data: vec![], - retry_count: None, - }, remote_execution_proof).unwrap(); - - (remote_result, local_result) - } - - // prepare remote client - let remote_client = test_client::new(); - for _ in 1..3 { - remote_client.import_justified( - BlockOrigin::Own, - remote_client.new_block().unwrap().bake().unwrap(), - Default::default(), - ).unwrap(); - } - - // check method that doesn't requires environment - let (remote, local) = execute(&remote_client, 0, "Core_version"); - assert_eq!(remote, local); - - // check method that requires environment - let (_, block) = execute(&remote_client, 0, "BlockBuilder_finalize_block"); - let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); - assert_eq!(local_block.number, 1); - - // check method that requires environment - let (_, block) = execute(&remote_client, 2, "BlockBuilder_finalize_block"); - let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); - assert_eq!(local_block.number, 3); - } - - #[test] - fn code_is_executed_locally_or_remotely() { - let backend = Arc::new(InMemBackend::new()); - let def = H256::default(); - let header0 = test_client::runtime::Header::new(0, def, def, def, Default::default()); - let hash0 = header0.hash(); - let header1 = test_client::runtime::Header::new(1, def, def, hash0, Default::default()); - let hash1 = header1.hash(); - backend.blockchain().insert(hash0, header0, None, None, NewBlockState::Final).unwrap(); - backend.blockchain().insert(hash1, header1, None, None, NewBlockState::Final).unwrap(); - - let local_executor = RemoteCallExecutor::new(Arc::new(backend.blockchain().clone()), Arc::new(OkCallFetcher::new(vec![1]))); - let remote_executor = RemoteCallExecutor::new(Arc::new(backend.blockchain().clone()), Arc::new(OkCallFetcher::new(vec![2]))); - let remote_or_local = RemoteOrLocalCallExecutor::new(backend, remote_executor, local_executor); - assert_eq!(remote_or_local.call(&BlockId::Number(0), "test_method", &[], ExecutionStrategy::NativeElseWasm, NeverOffchainExt::new()).unwrap(), vec![1]); - assert_eq!(remote_or_local.call(&BlockId::Number(1), "test_method", &[], ExecutionStrategy::NativeElseWasm, NeverOffchainExt::new()).unwrap(), vec![2]); - } + use super::*; + use crate::backend::{Backend, NewBlockState}; + use crate::in_mem::Backend as InMemBackend; + use crate::light::fetcher::tests::OkCallFetcher; + use consensus::BlockOrigin; + use executor::NativeExecutionDispatch; + use test_client::{ + self, + runtime::RuntimeApi, + runtime::{Block, Header}, + TestClient, + }; + + #[test] + fn execution_proof_is_generated_and_checked() { + type TestClient = test_client::client::Client< + test_client::Backend, + test_client::Executor, + Block, + RuntimeApi, + >; + + fn execute( + remote_client: &TestClient, + at: u64, + method: &'static str, + ) -> (Vec, Vec) { + let remote_block_id = BlockId::Number(at); + let remote_root = remote_client + .state_at(&remote_block_id) + .unwrap() + .storage_root(::std::iter::empty()) + .0; + + // 'fetch' execution proof from remote node + let (remote_result, remote_execution_proof) = remote_client + .execution_proof(&remote_block_id, method, &[]) + .unwrap(); + + // check remote execution proof locally + let local_executor = test_client::LocalExecutor::new(None); + let local_result = check_execution_proof( + &local_executor, + &RemoteCallRequest { + block: test_client::runtime::Hash::default(), + header: test_client::runtime::Header { + state_root: remote_root.into(), + parent_hash: Default::default(), + number: at, + extrinsics_root: Default::default(), + digest: Default::default(), + }, + method: method.into(), + call_data: vec![], + retry_count: None, + }, + remote_execution_proof, + ) + .unwrap(); + + (remote_result, local_result) + } + + // prepare remote client + let remote_client = test_client::new(); + for _ in 1..3 { + remote_client + .import_justified( + BlockOrigin::Own, + remote_client.new_block().unwrap().bake().unwrap(), + Default::default(), + ) + .unwrap(); + } + + // check method that doesn't requires environment + let (remote, local) = execute(&remote_client, 0, "Core_version"); + assert_eq!(remote, local); + + // check method that requires environment + let (_, block) = execute(&remote_client, 0, "BlockBuilder_finalize_block"); + let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); + assert_eq!(local_block.number, 1); + + // check method that requires environment + let (_, block) = execute(&remote_client, 2, "BlockBuilder_finalize_block"); + let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); + assert_eq!(local_block.number, 3); + } + + #[test] + fn code_is_executed_locally_or_remotely() { + let backend = Arc::new(InMemBackend::new()); + let def = H256::default(); + let header0 = test_client::runtime::Header::new(0, def, def, def, Default::default()); + let hash0 = header0.hash(); + let header1 = test_client::runtime::Header::new(1, def, def, hash0, Default::default()); + let hash1 = header1.hash(); + backend + .blockchain() + .insert(hash0, header0, None, None, NewBlockState::Final) + .unwrap(); + backend + .blockchain() + .insert(hash1, header1, None, None, NewBlockState::Final) + .unwrap(); + + let local_executor = RemoteCallExecutor::new( + Arc::new(backend.blockchain().clone()), + Arc::new(OkCallFetcher::new(vec![1])), + ); + let remote_executor = RemoteCallExecutor::new( + Arc::new(backend.blockchain().clone()), + Arc::new(OkCallFetcher::new(vec![2])), + ); + let remote_or_local = + RemoteOrLocalCallExecutor::new(backend, remote_executor, local_executor); + assert_eq!( + remote_or_local + .call( + &BlockId::Number(0), + "test_method", + &[], + ExecutionStrategy::NativeElseWasm, + NeverOffchainExt::new() + ) + .unwrap(), + vec![1] + ); + assert_eq!( + remote_or_local + .call( + &BlockId::Number(1), + "test_method", + &[], + ExecutionStrategy::NativeElseWasm, + NeverOffchainExt::new() + ) + .unwrap(), + vec![2] + ); + } } diff --git a/core/client/src/light/fetcher.rs b/core/client/src/light/fetcher.rs index 4cbbc819b3..92e1c3cd27 100644 --- a/core/client/src/light/fetcher.rs +++ b/core/client/src/light/fetcher.rs @@ -16,17 +16,19 @@ //! Light client data fetcher. Fetches requested data from remote full nodes. -use std::sync::Arc; +use futures::IntoFuture; use std::collections::BTreeMap; use std::marker::PhantomData; -use futures::IntoFuture; +use std::sync::Arc; use hash_db::{HashDB, Hasher}; use heapsize::HeapSizeOf; -use primitives::{ChangesTrieConfiguration, convert_hash}; +use primitives::{convert_hash, ChangesTrieConfiguration}; use runtime_primitives::traits::{As, Block as BlockT, Header as HeaderT, NumberFor}; -use state_machine::{CodeExecutor, ChangesTrieRootsStorage, ChangesTrieAnchorBlockId, - TrieBackend, read_proof_check, key_changes_proof_check, create_proof_check_backend_storage}; +use state_machine::{ + create_proof_check_backend_storage, key_changes_proof_check, read_proof_check, + ChangesTrieAnchorBlockId, ChangesTrieRootsStorage, CodeExecutor, TrieBackend, +}; use crate::cht; use crate::error::{Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult}; @@ -36,100 +38,106 @@ use crate::light::call_executor::check_execution_proof; /// Remote call request. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct RemoteCallRequest { - /// Call at state of given block. - pub block: Header::Hash, - /// Header of block at which call is performed. - pub header: Header, - /// Method to call. - pub method: String, - /// Call data. - pub call_data: Vec, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, + /// Call at state of given block. + pub block: Header::Hash, + /// Header of block at which call is performed. + pub header: Header, + /// Method to call. + pub method: String, + /// Call data. + pub call_data: Vec, + /// Number of times to retry request. None means that default RETRY_COUNT is used. + pub retry_count: Option, } /// Remote canonical header request. #[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] pub struct RemoteHeaderRequest { - /// The root of CHT this block is included in. - pub cht_root: Header::Hash, - /// Number of the header to query. - pub block: Header::Number, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, + /// The root of CHT this block is included in. + pub cht_root: Header::Hash, + /// Number of the header to query. + pub block: Header::Number, + /// Number of times to retry request. None means that default RETRY_COUNT is used. + pub retry_count: Option, } /// Remote storage read request. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct RemoteReadRequest { - /// Read at state of given block. - pub block: Header::Hash, - /// Header of block at which read is performed. - pub header: Header, - /// Storage key to read. - pub key: Vec, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, + /// Read at state of given block. + pub block: Header::Hash, + /// Header of block at which read is performed. + pub header: Header, + /// Storage key to read. + pub key: Vec, + /// Number of times to retry request. None means that default RETRY_COUNT is used. + pub retry_count: Option, } /// Remote key changes read request. #[derive(Clone, Debug, PartialEq, Eq)] pub struct RemoteChangesRequest { - /// Changes trie configuration. - pub changes_trie_config: ChangesTrieConfiguration, - /// Query changes from range of blocks, starting (and including) with this hash... - pub first_block: (Header::Number, Header::Hash), - /// ...ending (and including) with this hash. Should come after first_block and - /// be the part of the same fork. - pub last_block: (Header::Number, Header::Hash), - /// Only use digests from blocks up to this hash. Should be last_block OR come - /// after this block and be the part of the same fork. - pub max_block: (Header::Number, Header::Hash), - /// Known changes trie roots for the range of blocks [tries_roots.0..max_block]. - /// Proofs for roots of ascendants of tries_roots.0 are provided by the remote node. - pub tries_roots: (Header::Number, Header::Hash, Vec), - /// Storage key to read. - pub key: Vec, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, + /// Changes trie configuration. + pub changes_trie_config: ChangesTrieConfiguration, + /// Query changes from range of blocks, starting (and including) with this hash... + pub first_block: (Header::Number, Header::Hash), + /// ...ending (and including) with this hash. Should come after first_block and + /// be the part of the same fork. + pub last_block: (Header::Number, Header::Hash), + /// Only use digests from blocks up to this hash. Should be last_block OR come + /// after this block and be the part of the same fork. + pub max_block: (Header::Number, Header::Hash), + /// Known changes trie roots for the range of blocks [tries_roots.0..max_block]. + /// Proofs for roots of ascendants of tries_roots.0 are provided by the remote node. + pub tries_roots: (Header::Number, Header::Hash, Vec), + /// Storage key to read. + pub key: Vec, + /// Number of times to retry request. None means that default RETRY_COUNT is used. + pub retry_count: Option, } /// Key changes read proof. #[derive(Debug, PartialEq, Eq)] pub struct ChangesProof { - /// Max block that has been used in changes query. - pub max_block: Header::Number, - /// All touched nodes of all changes tries. - pub proof: Vec>, - /// All changes tries roots that have been touched AND are missing from - /// the requester' node. It is a map of block number => changes trie root. - pub roots: BTreeMap, - /// The proofs for all changes tries roots that have been touched AND are - /// missing from the requester' node. It is a map of CHT number => proof. - pub roots_proof: Vec>, + /// Max block that has been used in changes query. + pub max_block: Header::Number, + /// All touched nodes of all changes tries. + pub proof: Vec>, + /// All changes tries roots that have been touched AND are missing from + /// the requester' node. It is a map of block number => changes trie root. + pub roots: BTreeMap, + /// The proofs for all changes tries roots that have been touched AND are + /// missing from the requester' node. It is a map of CHT number => proof. + pub roots_proof: Vec>, } /// Light client data fetcher. Implementations of this trait must check if remote data /// is correct (see FetchedDataChecker) and return already checked data. pub trait Fetcher: Send + Sync { - /// Remote header future. - type RemoteHeaderResult: IntoFuture; - /// Remote storage read future. - type RemoteReadResult: IntoFuture>, Error=ClientError>; - /// Remote call result future. - type RemoteCallResult: IntoFuture, Error=ClientError>; - /// Remote changes result future. - type RemoteChangesResult: IntoFuture, u32)>, Error=ClientError>; - - /// Fetch remote header. - fn remote_header(&self, request: RemoteHeaderRequest) -> Self::RemoteHeaderResult; - /// Fetch remote storage value. - fn remote_read(&self, request: RemoteReadRequest) -> Self::RemoteReadResult; - /// Fetch remote call result. - fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult; - /// Fetch remote changes ((block number, extrinsic index)) where given key has been changed - /// at a given blocks range. - fn remote_changes(&self, request: RemoteChangesRequest) -> Self::RemoteChangesResult; + /// Remote header future. + type RemoteHeaderResult: IntoFuture; + /// Remote storage read future. + type RemoteReadResult: IntoFuture>, Error = ClientError>; + /// Remote call result future. + type RemoteCallResult: IntoFuture, Error = ClientError>; + /// Remote changes result future. + type RemoteChangesResult: IntoFuture, u32)>, Error = ClientError>; + + /// Fetch remote header. + fn remote_header( + &self, + request: RemoteHeaderRequest, + ) -> Self::RemoteHeaderResult; + /// Fetch remote storage value. + fn remote_read(&self, request: RemoteReadRequest) -> Self::RemoteReadResult; + /// Fetch remote call result. + fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult; + /// Fetch remote changes ((block number, extrinsic index)) where given key has been changed + /// at a given blocks range. + fn remote_changes( + &self, + request: RemoteChangesRequest, + ) -> Self::RemoteChangesResult; } /// Light client remote data checker. @@ -137,597 +145,798 @@ pub trait Fetcher: Send + Sync { /// Implementations of this trait should not use any prunable blockchain data /// except that is passed to its methods. pub trait FetchChecker: Send + Sync { - /// Check remote header proof. - fn check_header_proof( - &self, - request: &RemoteHeaderRequest, - header: Option, - remote_proof: Vec> - ) -> ClientResult; - /// Check remote storage read proof. - fn check_read_proof( - &self, - request: &RemoteReadRequest, - remote_proof: Vec> - ) -> ClientResult>>; - /// Check remote method execution proof. - fn check_execution_proof( - &self, - request: &RemoteCallRequest, - remote_proof: Vec> - ) -> ClientResult>; - /// Check remote changes query proof. - fn check_changes_proof( - &self, - request: &RemoteChangesRequest, - proof: ChangesProof - ) -> ClientResult, u32)>>; + /// Check remote header proof. + fn check_header_proof( + &self, + request: &RemoteHeaderRequest, + header: Option, + remote_proof: Vec>, + ) -> ClientResult; + /// Check remote storage read proof. + fn check_read_proof( + &self, + request: &RemoteReadRequest, + remote_proof: Vec>, + ) -> ClientResult>>; + /// Check remote method execution proof. + fn check_execution_proof( + &self, + request: &RemoteCallRequest, + remote_proof: Vec>, + ) -> ClientResult>; + /// Check remote changes query proof. + fn check_changes_proof( + &self, + request: &RemoteChangesRequest, + proof: ChangesProof, + ) -> ClientResult, u32)>>; } /// Remote data checker. pub struct LightDataChecker, F> { - blockchain: Arc>, - executor: E, - _hasher: PhantomData<(B, H)>, + blockchain: Arc>, + executor: E, + _hasher: PhantomData<(B, H)>, } impl, F> LightDataChecker { - /// Create new light data checker. - pub fn new(blockchain: Arc>, executor: E) -> Self { - Self { - blockchain, executor, _hasher: PhantomData - } - } - - /// Check remote changes query proof assuming that CHT-s are of given size. - fn check_changes_proof_with_cht_size( - &self, - request: &RemoteChangesRequest, - remote_proof: ChangesProof, - cht_size: u64, - ) -> ClientResult, u32)>> - where - H: Hasher, - H::Out: Ord + HeapSizeOf, - { - // since we need roots of all changes tries for the range begin..max - // => remote node can't use max block greater that one that we have passed - if remote_proof.max_block > request.max_block.0 || remote_proof.max_block < request.last_block.0 { - return Err(ClientErrorKind::ChangesTrieAccessFailed(format!( - "Invalid max_block used by the remote node: {}. Local: {}..{}..{}", - remote_proof.max_block, request.first_block.0, request.last_block.0, request.max_block.0, - )).into()); - } - - // check if remote node has responded with extra changes trie roots proofs - // all changes tries roots must be in range [request.first_block.0; request.tries_roots.0) - let is_extra_first_root = remote_proof.roots.keys().next() - .map(|first_root| *first_root < request.first_block.0 - || *first_root >= request.tries_roots.0) - .unwrap_or(false); - let is_extra_last_root = remote_proof.roots.keys().next_back() - .map(|last_root| *last_root >= request.tries_roots.0) - .unwrap_or(false); - if is_extra_first_root || is_extra_last_root { - return Err(ClientErrorKind::ChangesTrieAccessFailed(format!( + /// Create new light data checker. + pub fn new(blockchain: Arc>, executor: E) -> Self { + Self { + blockchain, + executor, + _hasher: PhantomData, + } + } + + /// Check remote changes query proof assuming that CHT-s are of given size. + fn check_changes_proof_with_cht_size( + &self, + request: &RemoteChangesRequest, + remote_proof: ChangesProof, + cht_size: u64, + ) -> ClientResult, u32)>> + where + H: Hasher, + H::Out: Ord + HeapSizeOf, + { + // since we need roots of all changes tries for the range begin..max + // => remote node can't use max block greater that one that we have passed + if remote_proof.max_block > request.max_block.0 + || remote_proof.max_block < request.last_block.0 + { + return Err(ClientErrorKind::ChangesTrieAccessFailed(format!( + "Invalid max_block used by the remote node: {}. Local: {}..{}..{}", + remote_proof.max_block, + request.first_block.0, + request.last_block.0, + request.max_block.0, + )) + .into()); + } + + // check if remote node has responded with extra changes trie roots proofs + // all changes tries roots must be in range [request.first_block.0; request.tries_roots.0) + let is_extra_first_root = remote_proof + .roots + .keys() + .next() + .map(|first_root| { + *first_root < request.first_block.0 || *first_root >= request.tries_roots.0 + }) + .unwrap_or(false); + let is_extra_last_root = remote_proof + .roots + .keys() + .next_back() + .map(|last_root| *last_root >= request.tries_roots.0) + .unwrap_or(false); + if is_extra_first_root || is_extra_last_root { + return Err(ClientErrorKind::ChangesTrieAccessFailed(format!( "Extra changes tries roots proofs provided by the remote node: [{:?}..{:?}]. Expected in range: [{}; {})", remote_proof.roots.keys().next(), remote_proof.roots.keys().next_back(), request.first_block.0, request.tries_roots.0, )).into()); - } - - // if request has been composed when some required headers were already pruned - // => remote node has sent us CHT-based proof of required changes tries roots - // => check that this proof is correct before proceeding with changes proof - let remote_max_block = remote_proof.max_block; - let remote_roots = remote_proof.roots; - let remote_roots_proof = remote_proof.roots_proof; - let remote_proof = remote_proof.proof; - if !remote_roots.is_empty() { - self.check_changes_tries_proof( - cht_size, - &remote_roots, - remote_roots_proof, - )?; - } - - // and now check the key changes proof + get the changes - key_changes_proof_check::<_, H>( - &request.changes_trie_config, - &RootsStorage { - roots: (request.tries_roots.0, &request.tries_roots.2), - prev_roots: remote_roots, - }, - remote_proof, - request.first_block.0.as_(), - &ChangesTrieAnchorBlockId { - hash: convert_hash(&request.last_block.1), - number: request.last_block.0.as_(), - }, - remote_max_block.as_(), - &request.key) - .map(|pairs| pairs.into_iter().map(|(b, x)| (As::sa(b), x)).collect()) - .map_err(|err| ClientErrorKind::ChangesTrieAccessFailed(err).into()) - } - - /// Check CHT-based proof for changes tries roots. - fn check_changes_tries_proof( - &self, - cht_size: u64, - remote_roots: &BTreeMap, B::Hash>, - remote_roots_proof: Vec>, - ) -> ClientResult<()> - where - H: Hasher, - H::Out: Ord + HeapSizeOf, - { - // all the checks are sharing the same storage - let storage = create_proof_check_backend_storage(remote_roots_proof); - - // we remote_roots.keys() are sorted => we can use this to group changes tries roots - // that are belongs to the same CHT - let blocks = remote_roots.keys().cloned(); - cht::for_each_cht_group::(cht_size, blocks, |mut storage, _, cht_blocks| { - // get local changes trie CHT root for given CHT - // it should be there, because it is never pruned AND request has been composed - // when required header has been pruned (=> replaced with CHT) - let first_block = cht_blocks.first().cloned() - .expect("for_each_cht_group never calls callback with empty groups"); - let local_cht_root = self.blockchain.storage().changes_trie_cht_root(cht_size, first_block)?; - - // check changes trie root for every block within CHT range - for block in cht_blocks { - // check if the proofs storage contains the root - // normally this happens in when the proving backend is created, but since - // we share the storage for multiple checks, do it here - let mut cht_root = H::Out::default(); - cht_root.as_mut().copy_from_slice(local_cht_root.as_ref()); - if !storage.contains(&cht_root, &[]) { - return Err(ClientErrorKind::InvalidCHTProof.into()); - } - - // check proof for single changes trie root - let proving_backend = TrieBackend::new(storage, cht_root); - let remote_changes_trie_root = remote_roots[&block]; - cht::check_proof_on_proving_backend::( - local_cht_root, - block, - remote_changes_trie_root, - &proving_backend)?; - - // and return the storage to use in following checks - storage = proving_backend.into_storage(); - } - - Ok(storage) - }, storage) - } + } + + // if request has been composed when some required headers were already pruned + // => remote node has sent us CHT-based proof of required changes tries roots + // => check that this proof is correct before proceeding with changes proof + let remote_max_block = remote_proof.max_block; + let remote_roots = remote_proof.roots; + let remote_roots_proof = remote_proof.roots_proof; + let remote_proof = remote_proof.proof; + if !remote_roots.is_empty() { + self.check_changes_tries_proof(cht_size, &remote_roots, remote_roots_proof)?; + } + + // and now check the key changes proof + get the changes + key_changes_proof_check::<_, H>( + &request.changes_trie_config, + &RootsStorage { + roots: (request.tries_roots.0, &request.tries_roots.2), + prev_roots: remote_roots, + }, + remote_proof, + request.first_block.0.as_(), + &ChangesTrieAnchorBlockId { + hash: convert_hash(&request.last_block.1), + number: request.last_block.0.as_(), + }, + remote_max_block.as_(), + &request.key, + ) + .map(|pairs| pairs.into_iter().map(|(b, x)| (As::sa(b), x)).collect()) + .map_err(|err| ClientErrorKind::ChangesTrieAccessFailed(err).into()) + } + + /// Check CHT-based proof for changes tries roots. + fn check_changes_tries_proof( + &self, + cht_size: u64, + remote_roots: &BTreeMap, B::Hash>, + remote_roots_proof: Vec>, + ) -> ClientResult<()> + where + H: Hasher, + H::Out: Ord + HeapSizeOf, + { + // all the checks are sharing the same storage + let storage = create_proof_check_backend_storage(remote_roots_proof); + + // we remote_roots.keys() are sorted => we can use this to group changes tries roots + // that are belongs to the same CHT + let blocks = remote_roots.keys().cloned(); + cht::for_each_cht_group::( + cht_size, + blocks, + |mut storage, _, cht_blocks| { + // get local changes trie CHT root for given CHT + // it should be there, because it is never pruned AND request has been composed + // when required header has been pruned (=> replaced with CHT) + let first_block = cht_blocks + .first() + .cloned() + .expect("for_each_cht_group never calls callback with empty groups"); + let local_cht_root = self + .blockchain + .storage() + .changes_trie_cht_root(cht_size, first_block)?; + + // check changes trie root for every block within CHT range + for block in cht_blocks { + // check if the proofs storage contains the root + // normally this happens in when the proving backend is created, but since + // we share the storage for multiple checks, do it here + let mut cht_root = H::Out::default(); + cht_root.as_mut().copy_from_slice(local_cht_root.as_ref()); + if !storage.contains(&cht_root, &[]) { + return Err(ClientErrorKind::InvalidCHTProof.into()); + } + + // check proof for single changes trie root + let proving_backend = TrieBackend::new(storage, cht_root); + let remote_changes_trie_root = remote_roots[&block]; + cht::check_proof_on_proving_backend::( + local_cht_root, + block, + remote_changes_trie_root, + &proving_backend, + )?; + + // and return the storage to use in following checks + storage = proving_backend.into_storage(); + } + + Ok(storage) + }, + storage, + ) + } } impl FetchChecker for LightDataChecker - where - Block: BlockT, - E: CodeExecutor, - H: Hasher, - H::Out: Ord + HeapSizeOf, - S: BlockchainStorage, - F: Send + Sync, +where + Block: BlockT, + E: CodeExecutor, + H: Hasher, + H::Out: Ord + HeapSizeOf, + S: BlockchainStorage, + F: Send + Sync, { - fn check_header_proof( - &self, - request: &RemoteHeaderRequest, - remote_header: Option, - remote_proof: Vec> - ) -> ClientResult { - let remote_header = remote_header.ok_or_else(|| - ClientError::from(ClientErrorKind::InvalidCHTProof))?; - let remote_header_hash = remote_header.hash(); - cht::check_proof::( - request.cht_root, - request.block, - remote_header_hash, - remote_proof) - .map(|_| remote_header) - } - - fn check_read_proof( - &self, - request: &RemoteReadRequest, - remote_proof: Vec> - ) -> ClientResult>> { - read_proof_check::(convert_hash(request.header.state_root()), remote_proof, &request.key) - .map_err(Into::into) - } - - fn check_execution_proof( - &self, - request: &RemoteCallRequest, - remote_proof: Vec> - ) -> ClientResult> { - check_execution_proof::<_, _, H>(&self.executor, request, remote_proof) - } - - fn check_changes_proof( - &self, - request: &RemoteChangesRequest, - remote_proof: ChangesProof - ) -> ClientResult, u32)>> { - self.check_changes_proof_with_cht_size(request, remote_proof, cht::SIZE) - } + fn check_header_proof( + &self, + request: &RemoteHeaderRequest, + remote_header: Option, + remote_proof: Vec>, + ) -> ClientResult { + let remote_header = + remote_header.ok_or_else(|| ClientError::from(ClientErrorKind::InvalidCHTProof))?; + let remote_header_hash = remote_header.hash(); + cht::check_proof::( + request.cht_root, + request.block, + remote_header_hash, + remote_proof, + ) + .map(|_| remote_header) + } + + fn check_read_proof( + &self, + request: &RemoteReadRequest, + remote_proof: Vec>, + ) -> ClientResult>> { + read_proof_check::( + convert_hash(request.header.state_root()), + remote_proof, + &request.key, + ) + .map_err(Into::into) + } + + fn check_execution_proof( + &self, + request: &RemoteCallRequest, + remote_proof: Vec>, + ) -> ClientResult> { + check_execution_proof::<_, _, H>(&self.executor, request, remote_proof) + } + + fn check_changes_proof( + &self, + request: &RemoteChangesRequest, + remote_proof: ChangesProof, + ) -> ClientResult, u32)>> { + self.check_changes_proof_with_cht_size(request, remote_proof, cht::SIZE) + } } /// A view of BTreeMap as a changes trie roots storage. struct RootsStorage<'a, Number: As, Hash: 'a> { - roots: (Number, &'a [Hash]), - prev_roots: BTreeMap, + roots: (Number, &'a [Hash]), + prev_roots: BTreeMap, } impl<'a, H, Number, Hash> ChangesTrieRootsStorage for RootsStorage<'a, Number, Hash> - where - H: Hasher, - Number: Send + Sync + Eq + ::std::cmp::Ord + Copy + As, - Hash: 'a + Send + Sync + Clone + AsRef<[u8]>, +where + H: Hasher, + Number: Send + Sync + Eq + ::std::cmp::Ord + Copy + As, + Hash: 'a + Send + Sync + Clone + AsRef<[u8]>, { - fn root(&self, _anchor: &ChangesTrieAnchorBlockId, block: u64) -> Result, String> { - // we can't ask for roots from parallel forks here => ignore anchor - let root = if block < self.roots.0.as_() { - self.prev_roots.get(&As::sa(block)).cloned() - } else { - block.checked_sub(self.roots.0.as_()) - .and_then(|index| self.roots.1.get(index as usize)) - .cloned() - }; - - Ok(root.map(|root| { - let mut hasher_root: H::Out = Default::default(); - hasher_root.as_mut().copy_from_slice(root.as_ref()); - hasher_root - })) - } + fn root( + &self, + _anchor: &ChangesTrieAnchorBlockId, + block: u64, + ) -> Result, String> { + // we can't ask for roots from parallel forks here => ignore anchor + let root = if block < self.roots.0.as_() { + self.prev_roots.get(&As::sa(block)).cloned() + } else { + block + .checked_sub(self.roots.0.as_()) + .and_then(|index| self.roots.1.get(index as usize)) + .cloned() + }; + + Ok(root.map(|root| { + let mut hasher_root: H::Out = Default::default(); + hasher_root.as_mut().copy_from_slice(root.as_ref()); + hasher_root + })) + } } #[cfg(test)] pub mod tests { - use futures::future::{ok, err, FutureResult}; - use parking_lot::Mutex; - use parity_codec::Decode; - use crate::client::tests::prepare_client_with_key_changes; - use executor::{self, NativeExecutionDispatch}; - use crate::error::Error as ClientError; - use test_client::{ - self, TestClient, blockchain::HeaderBackend, AccountKeyring, - runtime::{self, Hash, Block, Header} - }; - use consensus::BlockOrigin; - - use crate::in_mem::{Blockchain as InMemoryBlockchain}; - use crate::light::fetcher::{Fetcher, FetchChecker, LightDataChecker, - RemoteCallRequest, RemoteHeaderRequest}; - use crate::light::blockchain::tests::{DummyStorage, DummyBlockchain}; - use primitives::{twox_128, Blake2Hasher}; - use primitives::storage::{StorageKey, well_known_keys}; - use runtime_primitives::generic::BlockId; - use state_machine::Backend; - use super::*; - - pub type OkCallFetcher = Mutex>; - - impl Fetcher for OkCallFetcher { - type RemoteHeaderResult = FutureResult; - type RemoteReadResult = FutureResult>, ClientError>; - type RemoteCallResult = FutureResult, ClientError>; - type RemoteChangesResult = FutureResult, u32)>, ClientError>; - - fn remote_header(&self, _request: RemoteHeaderRequest
) -> Self::RemoteHeaderResult { - err("Not implemented on test node".into()) - } - - fn remote_read(&self, _request: RemoteReadRequest
) -> Self::RemoteReadResult { - err("Not implemented on test node".into()) - } - - fn remote_call(&self, _request: RemoteCallRequest
) -> Self::RemoteCallResult { - ok((*self.lock()).clone()) - } - - fn remote_changes(&self, _request: RemoteChangesRequest
) -> Self::RemoteChangesResult { - err("Not implemented on test node".into()) - } - } - - type TestChecker = LightDataChecker, Blake2Hasher, Block, DummyStorage, OkCallFetcher>; - - fn prepare_for_read_proof_check() -> (TestChecker, Header, Vec>, u32) { - // prepare remote client - let remote_client = test_client::new(); - let remote_block_id = BlockId::Number(0); - let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); - let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); - remote_block_header.state_root = remote_client.state_at(&remote_block_id).unwrap().storage_root(::std::iter::empty()).0.into(); - - // 'fetch' read proof from remote node - let authorities_len = remote_client.storage(&remote_block_id, &StorageKey(well_known_keys::AUTHORITY_COUNT.to_vec())) - .unwrap() - .and_then(|v| Decode::decode(&mut &v.0[..])).unwrap(); - let remote_read_proof = remote_client.read_proof(&remote_block_id, well_known_keys::AUTHORITY_COUNT).unwrap(); - - // check remote read proof locally - let local_storage = InMemoryBlockchain::::new(); - local_storage.insert( - remote_block_hash, - remote_block_header.clone(), - None, - None, - crate::backend::NewBlockState::Final, - ).unwrap(); - let local_executor = test_client::LocalExecutor::new(None); - let local_checker = LightDataChecker::new(Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor); - (local_checker, remote_block_header, remote_read_proof, authorities_len) - } - - fn prepare_for_header_proof_check(insert_cht: bool) -> (TestChecker, Hash, Header, Vec>) { - // prepare remote client - let remote_client = test_client::new(); - let mut local_headers_hashes = Vec::new(); - for i in 0..4 { - let builder = remote_client.new_block().unwrap(); - remote_client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap(); - local_headers_hashes.push(remote_client.block_hash(i + 1) - .map_err(|_| ClientErrorKind::Backend("TestError".into()).into())); - } - - // 'fetch' header proof from remote node - let remote_block_id = BlockId::Number(1); - let (remote_block_header, remote_header_proof) = remote_client.header_proof_with_cht_size(&remote_block_id, 4).unwrap(); - - // check remote read proof locally - let local_storage = InMemoryBlockchain::::new(); - let local_cht_root = cht::compute_root::(4, 0, local_headers_hashes).unwrap(); - if insert_cht { - local_storage.insert_cht_root(1, local_cht_root); - } - let local_executor = test_client::LocalExecutor::new(None); - let local_checker = LightDataChecker::new(Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor); - (local_checker, local_cht_root, remote_block_header, remote_header_proof) - } - - #[test] - fn storage_read_proof_is_generated_and_checked() { - let (local_checker, remote_block_header, remote_read_proof, authorities_len) = prepare_for_read_proof_check(); - assert_eq!((&local_checker as &FetchChecker).check_read_proof(&RemoteReadRequest::
{ - block: remote_block_header.hash(), - header: remote_block_header, - key: well_known_keys::AUTHORITY_COUNT.to_vec(), - retry_count: None, - }, remote_read_proof).unwrap().unwrap()[0], authorities_len as u8); - } - - #[test] - fn header_proof_is_generated_and_checked() { - let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); - assert_eq!((&local_checker as &FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ - cht_root: local_cht_root, - block: 1, - retry_count: None, - }, Some(remote_block_header.clone()), remote_header_proof).unwrap(), remote_block_header); - } - - #[test] - fn check_header_proof_fails_if_cht_root_is_invalid() { - let (local_checker, _, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); - remote_block_header.number = 100; - assert!((&local_checker as &FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ - cht_root: Default::default(), - block: 1, - retry_count: None, - }, Some(remote_block_header.clone()), remote_header_proof).is_err()); - } - - #[test] - fn check_header_proof_fails_if_invalid_header_provided() { - let (local_checker, local_cht_root, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); - remote_block_header.number = 100; - assert!((&local_checker as &FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ - cht_root: local_cht_root, - block: 1, - retry_count: None, - }, Some(remote_block_header.clone()), remote_header_proof).is_err()); - } - - #[test] - fn changes_proof_is_generated_and_checked_when_headers_are_not_pruned() { - let (remote_client, local_roots, test_cases) = prepare_client_with_key_changes(); - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - test_client::LocalExecutor::new(None) - ); - let local_checker = &local_checker as &FetchChecker; - let max = remote_client.info().unwrap().chain.best_number; - let max_hash = remote_client.info().unwrap().chain.best_hash; - - for (index, (begin, end, key, expected_result)) in test_cases.into_iter().enumerate() { - let begin_hash = remote_client.block_hash(begin).unwrap().unwrap(); - let end_hash = remote_client.block_hash(end).unwrap().unwrap(); - - // 'fetch' changes proof from remote node - let key = StorageKey(key); - let remote_proof = remote_client.key_changes_proof( - begin_hash, end_hash, begin_hash, max_hash, &key - ).unwrap(); - - // check proof on local client - let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); - let request = RemoteChangesRequest::
{ - changes_trie_config: runtime::changes_trie_config(), - first_block: (begin, begin_hash), - last_block: (end, end_hash), - max_block: (max, max_hash), - tries_roots: (begin, begin_hash, local_roots_range), - key: key.0, - retry_count: None, - }; - let local_result = local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof, - roots: remote_proof.roots, - roots_proof: remote_proof.roots_proof, - }).unwrap(); - - // ..and ensure that result is the same as on remote node - match local_result == expected_result { - true => (), - false => panic!(format!("Failed test {}: local = {:?}, expected = {:?}", - index, local_result, expected_result)), - } - } - } - - #[test] - fn changes_proof_is_generated_and_checked_when_headers_are_pruned() { - // we're testing this test case here: - // (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), - let (remote_client, remote_roots, _) = prepare_client_with_key_changes(); - let dave = twox_128(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); - let dave = StorageKey(dave); - - // 'fetch' changes proof from remote node: - // we're fetching changes for range b1..b4 - // we do not know changes trie roots before b3 (i.e. we only know b3+b4) - // but we have changes trie CHT root for b1...b4 - let b1 = remote_client.block_hash_from_id(&BlockId::Number(1)).unwrap().unwrap(); - let b3 = remote_client.block_hash_from_id(&BlockId::Number(3)).unwrap().unwrap(); - let b4 = remote_client.block_hash_from_id(&BlockId::Number(4)).unwrap().unwrap(); - let remote_proof = remote_client.key_changes_proof_with_cht_size( - b1, b4, b3, b4, &dave, 4 - ).unwrap(); - - // prepare local checker, having a root of changes trie CHT#0 - let local_cht_root = cht::compute_root::(4, 0, remote_roots.iter().cloned().map(|ct| Ok(Some(ct)))).unwrap(); - let mut local_storage = DummyStorage::new(); - local_storage.changes_tries_cht_roots.insert(0, local_cht_root); - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(local_storage)), - test_client::LocalExecutor::new(None) - ); - - // check proof on local client - let request = RemoteChangesRequest::
{ - changes_trie_config: runtime::changes_trie_config(), - first_block: (1, b1), - last_block: (4, b4), - max_block: (4, b4), - tries_roots: (3, b3, vec![remote_roots[2].clone(), remote_roots[3].clone()]), - key: dave.0, - retry_count: None, - }; - let local_result = local_checker.check_changes_proof_with_cht_size(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof, - roots: remote_proof.roots, - roots_proof: remote_proof.roots_proof, - }, 4).unwrap(); - - assert_eq!(local_result, vec![(4, 0), (1, 1), (1, 0)]); - } - - #[test] - fn check_changes_proof_fails_if_proof_is_wrong() { - let (remote_client, local_roots, test_cases) = prepare_client_with_key_changes(); - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - test_client::LocalExecutor::new(None) - ); - let local_checker = &local_checker as &FetchChecker; - let max = remote_client.info().unwrap().chain.best_number; - let max_hash = remote_client.info().unwrap().chain.best_hash; - - let (begin, end, key, _) = test_cases[0].clone(); - let begin_hash = remote_client.block_hash(begin).unwrap().unwrap(); - let end_hash = remote_client.block_hash(end).unwrap().unwrap(); - - // 'fetch' changes proof from remote node - let key = StorageKey(key); - let remote_proof = remote_client.key_changes_proof( - begin_hash, end_hash, begin_hash, max_hash, &key).unwrap(); - - let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); - let request = RemoteChangesRequest::
{ - changes_trie_config: runtime::changes_trie_config(), - first_block: (begin, begin_hash), - last_block: (end, end_hash), - max_block: (max, max_hash), - tries_roots: (begin, begin_hash, local_roots_range.clone()), - key: key.0, - retry_count: None, - }; - - // check proof on local client using max from the future - assert!(local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block + 1, - proof: remote_proof.proof.clone(), - roots: remote_proof.roots.clone(), - roots_proof: remote_proof.roots_proof.clone(), - }).is_err()); - - // check proof on local client using broken proof - assert!(local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: local_roots_range.clone().into_iter().map(|v| v.as_ref().to_vec()).collect(), - roots: remote_proof.roots, - roots_proof: remote_proof.roots_proof, - }).is_err()); - - // extra roots proofs are provided - assert!(local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof.clone(), - roots: vec![(begin - 1, Default::default())].into_iter().collect(), - roots_proof: vec![], - }).is_err()); - assert!(local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof.clone(), - roots: vec![(end + 1, Default::default())].into_iter().collect(), - roots_proof: vec![], - }).is_err()); - } - - #[test] - fn check_changes_tries_proof_fails_if_proof_is_wrong() { - // we're testing this test case here: - // (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), - let (remote_client, remote_roots, _) = prepare_client_with_key_changes(); - let local_cht_root = cht::compute_root::( - 4, 0, remote_roots.iter().cloned().map(|ct| Ok(Some(ct)))).unwrap(); - let dave = twox_128(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); - let dave = StorageKey(dave); - - // 'fetch' changes proof from remote node: - // we're fetching changes for range b1..b4 - // we do not know changes trie roots before b3 (i.e. we only know b3+b4) - // but we have changes trie CHT root for b1...b4 - let b1 = remote_client.block_hash_from_id(&BlockId::Number(1)).unwrap().unwrap(); - let b3 = remote_client.block_hash_from_id(&BlockId::Number(3)).unwrap().unwrap(); - let b4 = remote_client.block_hash_from_id(&BlockId::Number(4)).unwrap().unwrap(); - let remote_proof = remote_client.key_changes_proof_with_cht_size( - b1, b4, b3, b4, &dave, 4 - ).unwrap(); - - // fails when changes trie CHT is missing from the local db - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - test_client::LocalExecutor::new(None) - ); - assert!(local_checker.check_changes_tries_proof(4, &remote_proof.roots, - remote_proof.roots_proof.clone()).is_err()); - - // fails when proof is broken - let mut local_storage = DummyStorage::new(); - local_storage.changes_tries_cht_roots.insert(0, local_cht_root); - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(local_storage)), - test_client::LocalExecutor::new(None) - ); - assert!(local_checker.check_changes_tries_proof(4, &remote_proof.roots, vec![]).is_err()); - } + use crate::client::tests::prepare_client_with_key_changes; + use crate::error::Error as ClientError; + use consensus::BlockOrigin; + use executor::{self, NativeExecutionDispatch}; + use futures::future::{err, ok, FutureResult}; + use parity_codec::Decode; + use parking_lot::Mutex; + use test_client::{ + self, + blockchain::HeaderBackend, + runtime::{self, Block, Hash, Header}, + AccountKeyring, TestClient, + }; + + use super::*; + use crate::in_mem::Blockchain as InMemoryBlockchain; + use crate::light::blockchain::tests::{DummyBlockchain, DummyStorage}; + use crate::light::fetcher::{ + FetchChecker, Fetcher, LightDataChecker, RemoteCallRequest, RemoteHeaderRequest, + }; + use primitives::storage::{well_known_keys, StorageKey}; + use primitives::{twox_128, Blake2Hasher}; + use runtime_primitives::generic::BlockId; + use state_machine::Backend; + + pub type OkCallFetcher = Mutex>; + + impl Fetcher for OkCallFetcher { + type RemoteHeaderResult = FutureResult; + type RemoteReadResult = FutureResult>, ClientError>; + type RemoteCallResult = FutureResult, ClientError>; + type RemoteChangesResult = FutureResult, u32)>, ClientError>; + + fn remote_header(&self, _request: RemoteHeaderRequest
) -> Self::RemoteHeaderResult { + err("Not implemented on test node".into()) + } + + fn remote_read(&self, _request: RemoteReadRequest
) -> Self::RemoteReadResult { + err("Not implemented on test node".into()) + } + + fn remote_call(&self, _request: RemoteCallRequest
) -> Self::RemoteCallResult { + ok((*self.lock()).clone()) + } + + fn remote_changes( + &self, + _request: RemoteChangesRequest
, + ) -> Self::RemoteChangesResult { + err("Not implemented on test node".into()) + } + } + + type TestChecker = LightDataChecker< + executor::NativeExecutor, + Blake2Hasher, + Block, + DummyStorage, + OkCallFetcher, + >; + + fn prepare_for_read_proof_check() -> (TestChecker, Header, Vec>, u32) { + // prepare remote client + let remote_client = test_client::new(); + let remote_block_id = BlockId::Number(0); + let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); + let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); + remote_block_header.state_root = remote_client + .state_at(&remote_block_id) + .unwrap() + .storage_root(::std::iter::empty()) + .0 + .into(); + + // 'fetch' read proof from remote node + let authorities_len = remote_client + .storage( + &remote_block_id, + &StorageKey(well_known_keys::AUTHORITY_COUNT.to_vec()), + ) + .unwrap() + .and_then(|v| Decode::decode(&mut &v.0[..])) + .unwrap(); + let remote_read_proof = remote_client + .read_proof(&remote_block_id, well_known_keys::AUTHORITY_COUNT) + .unwrap(); + + // check remote read proof locally + let local_storage = InMemoryBlockchain::::new(); + local_storage + .insert( + remote_block_hash, + remote_block_header.clone(), + None, + None, + crate::backend::NewBlockState::Final, + ) + .unwrap(); + let local_executor = test_client::LocalExecutor::new(None); + let local_checker = LightDataChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor, + ); + ( + local_checker, + remote_block_header, + remote_read_proof, + authorities_len, + ) + } + + fn prepare_for_header_proof_check( + insert_cht: bool, + ) -> (TestChecker, Hash, Header, Vec>) { + // prepare remote client + let remote_client = test_client::new(); + let mut local_headers_hashes = Vec::new(); + for i in 0..4 { + let builder = remote_client.new_block().unwrap(); + remote_client + .import(BlockOrigin::Own, builder.bake().unwrap()) + .unwrap(); + local_headers_hashes.push( + remote_client + .block_hash(i + 1) + .map_err(|_| ClientErrorKind::Backend("TestError".into()).into()), + ); + } + + // 'fetch' header proof from remote node + let remote_block_id = BlockId::Number(1); + let (remote_block_header, remote_header_proof) = remote_client + .header_proof_with_cht_size(&remote_block_id, 4) + .unwrap(); + + // check remote read proof locally + let local_storage = InMemoryBlockchain::::new(); + let local_cht_root = + cht::compute_root::(4, 0, local_headers_hashes).unwrap(); + if insert_cht { + local_storage.insert_cht_root(1, local_cht_root); + } + let local_executor = test_client::LocalExecutor::new(None); + let local_checker = LightDataChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor, + ); + ( + local_checker, + local_cht_root, + remote_block_header, + remote_header_proof, + ) + } + + #[test] + fn storage_read_proof_is_generated_and_checked() { + let (local_checker, remote_block_header, remote_read_proof, authorities_len) = + prepare_for_read_proof_check(); + assert_eq!( + (&local_checker as &FetchChecker) + .check_read_proof( + &RemoteReadRequest::
{ + block: remote_block_header.hash(), + header: remote_block_header, + key: well_known_keys::AUTHORITY_COUNT.to_vec(), + retry_count: None, + }, + remote_read_proof + ) + .unwrap() + .unwrap()[0], + authorities_len as u8 + ); + } + + #[test] + fn header_proof_is_generated_and_checked() { + let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = + prepare_for_header_proof_check(true); + assert_eq!( + (&local_checker as &FetchChecker) + .check_header_proof( + &RemoteHeaderRequest::
{ + cht_root: local_cht_root, + block: 1, + retry_count: None, + }, + Some(remote_block_header.clone()), + remote_header_proof + ) + .unwrap(), + remote_block_header + ); + } + + #[test] + fn check_header_proof_fails_if_cht_root_is_invalid() { + let (local_checker, _, mut remote_block_header, remote_header_proof) = + prepare_for_header_proof_check(true); + remote_block_header.number = 100; + assert!((&local_checker as &FetchChecker) + .check_header_proof( + &RemoteHeaderRequest::
{ + cht_root: Default::default(), + block: 1, + retry_count: None, + }, + Some(remote_block_header.clone()), + remote_header_proof + ) + .is_err()); + } + + #[test] + fn check_header_proof_fails_if_invalid_header_provided() { + let (local_checker, local_cht_root, mut remote_block_header, remote_header_proof) = + prepare_for_header_proof_check(true); + remote_block_header.number = 100; + assert!((&local_checker as &FetchChecker) + .check_header_proof( + &RemoteHeaderRequest::
{ + cht_root: local_cht_root, + block: 1, + retry_count: None, + }, + Some(remote_block_header.clone()), + remote_header_proof + ) + .is_err()); + } + + #[test] + fn changes_proof_is_generated_and_checked_when_headers_are_not_pruned() { + let (remote_client, local_roots, test_cases) = prepare_client_with_key_changes(); + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + test_client::LocalExecutor::new(None), + ); + let local_checker = &local_checker as &FetchChecker; + let max = remote_client.info().unwrap().chain.best_number; + let max_hash = remote_client.info().unwrap().chain.best_hash; + + for (index, (begin, end, key, expected_result)) in test_cases.into_iter().enumerate() { + let begin_hash = remote_client.block_hash(begin).unwrap().unwrap(); + let end_hash = remote_client.block_hash(end).unwrap().unwrap(); + + // 'fetch' changes proof from remote node + let key = StorageKey(key); + let remote_proof = remote_client + .key_changes_proof(begin_hash, end_hash, begin_hash, max_hash, &key) + .unwrap(); + + // check proof on local client + let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); + let request = RemoteChangesRequest::
{ + changes_trie_config: runtime::changes_trie_config(), + first_block: (begin, begin_hash), + last_block: (end, end_hash), + max_block: (max, max_hash), + tries_roots: (begin, begin_hash, local_roots_range), + key: key.0, + retry_count: None, + }; + let local_result = local_checker + .check_changes_proof( + &request, + ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof, + roots: remote_proof.roots, + roots_proof: remote_proof.roots_proof, + }, + ) + .unwrap(); + + // ..and ensure that result is the same as on remote node + match local_result == expected_result { + true => (), + false => panic!(format!( + "Failed test {}: local = {:?}, expected = {:?}", + index, local_result, expected_result + )), + } + } + } + + #[test] + fn changes_proof_is_generated_and_checked_when_headers_are_pruned() { + // we're testing this test case here: + // (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), + let (remote_client, remote_roots, _) = prepare_client_with_key_changes(); + let dave = twox_128(&runtime::system::balance_of_key( + AccountKeyring::Dave.into(), + )) + .to_vec(); + let dave = StorageKey(dave); + + // 'fetch' changes proof from remote node: + // we're fetching changes for range b1..b4 + // we do not know changes trie roots before b3 (i.e. we only know b3+b4) + // but we have changes trie CHT root for b1...b4 + let b1 = remote_client + .block_hash_from_id(&BlockId::Number(1)) + .unwrap() + .unwrap(); + let b3 = remote_client + .block_hash_from_id(&BlockId::Number(3)) + .unwrap() + .unwrap(); + let b4 = remote_client + .block_hash_from_id(&BlockId::Number(4)) + .unwrap() + .unwrap(); + let remote_proof = remote_client + .key_changes_proof_with_cht_size(b1, b4, b3, b4, &dave, 4) + .unwrap(); + + // prepare local checker, having a root of changes trie CHT#0 + let local_cht_root = cht::compute_root::( + 4, + 0, + remote_roots.iter().cloned().map(|ct| Ok(Some(ct))), + ) + .unwrap(); + let mut local_storage = DummyStorage::new(); + local_storage + .changes_tries_cht_roots + .insert(0, local_cht_root); + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(local_storage)), + test_client::LocalExecutor::new(None), + ); + + // check proof on local client + let request = RemoteChangesRequest::
{ + changes_trie_config: runtime::changes_trie_config(), + first_block: (1, b1), + last_block: (4, b4), + max_block: (4, b4), + tries_roots: ( + 3, + b3, + vec![remote_roots[2].clone(), remote_roots[3].clone()], + ), + key: dave.0, + retry_count: None, + }; + let local_result = local_checker + .check_changes_proof_with_cht_size( + &request, + ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof, + roots: remote_proof.roots, + roots_proof: remote_proof.roots_proof, + }, + 4, + ) + .unwrap(); + + assert_eq!(local_result, vec![(4, 0), (1, 1), (1, 0)]); + } + + #[test] + fn check_changes_proof_fails_if_proof_is_wrong() { + let (remote_client, local_roots, test_cases) = prepare_client_with_key_changes(); + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + test_client::LocalExecutor::new(None), + ); + let local_checker = &local_checker as &FetchChecker; + let max = remote_client.info().unwrap().chain.best_number; + let max_hash = remote_client.info().unwrap().chain.best_hash; + + let (begin, end, key, _) = test_cases[0].clone(); + let begin_hash = remote_client.block_hash(begin).unwrap().unwrap(); + let end_hash = remote_client.block_hash(end).unwrap().unwrap(); + + // 'fetch' changes proof from remote node + let key = StorageKey(key); + let remote_proof = remote_client + .key_changes_proof(begin_hash, end_hash, begin_hash, max_hash, &key) + .unwrap(); + + let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); + let request = RemoteChangesRequest::
{ + changes_trie_config: runtime::changes_trie_config(), + first_block: (begin, begin_hash), + last_block: (end, end_hash), + max_block: (max, max_hash), + tries_roots: (begin, begin_hash, local_roots_range.clone()), + key: key.0, + retry_count: None, + }; + + // check proof on local client using max from the future + assert!(local_checker + .check_changes_proof( + &request, + ChangesProof { + max_block: remote_proof.max_block + 1, + proof: remote_proof.proof.clone(), + roots: remote_proof.roots.clone(), + roots_proof: remote_proof.roots_proof.clone(), + } + ) + .is_err()); + + // check proof on local client using broken proof + assert!(local_checker + .check_changes_proof( + &request, + ChangesProof { + max_block: remote_proof.max_block, + proof: local_roots_range + .clone() + .into_iter() + .map(|v| v.as_ref().to_vec()) + .collect(), + roots: remote_proof.roots, + roots_proof: remote_proof.roots_proof, + } + ) + .is_err()); + + // extra roots proofs are provided + assert!(local_checker + .check_changes_proof( + &request, + ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof.clone(), + roots: vec![(begin - 1, Default::default())].into_iter().collect(), + roots_proof: vec![], + } + ) + .is_err()); + assert!(local_checker + .check_changes_proof( + &request, + ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof.clone(), + roots: vec![(end + 1, Default::default())].into_iter().collect(), + roots_proof: vec![], + } + ) + .is_err()); + } + + #[test] + fn check_changes_tries_proof_fails_if_proof_is_wrong() { + // we're testing this test case here: + // (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), + let (remote_client, remote_roots, _) = prepare_client_with_key_changes(); + let local_cht_root = cht::compute_root::( + 4, + 0, + remote_roots.iter().cloned().map(|ct| Ok(Some(ct))), + ) + .unwrap(); + let dave = twox_128(&runtime::system::balance_of_key( + AccountKeyring::Dave.into(), + )) + .to_vec(); + let dave = StorageKey(dave); + + // 'fetch' changes proof from remote node: + // we're fetching changes for range b1..b4 + // we do not know changes trie roots before b3 (i.e. we only know b3+b4) + // but we have changes trie CHT root for b1...b4 + let b1 = remote_client + .block_hash_from_id(&BlockId::Number(1)) + .unwrap() + .unwrap(); + let b3 = remote_client + .block_hash_from_id(&BlockId::Number(3)) + .unwrap() + .unwrap(); + let b4 = remote_client + .block_hash_from_id(&BlockId::Number(4)) + .unwrap() + .unwrap(); + let remote_proof = remote_client + .key_changes_proof_with_cht_size(b1, b4, b3, b4, &dave, 4) + .unwrap(); + + // fails when changes trie CHT is missing from the local db + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + test_client::LocalExecutor::new(None), + ); + assert!(local_checker + .check_changes_tries_proof(4, &remote_proof.roots, remote_proof.roots_proof.clone()) + .is_err()); + + // fails when proof is broken + let mut local_storage = DummyStorage::new(); + local_storage + .changes_tries_cht_roots + .insert(0, local_cht_root); + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(local_storage)), + test_client::LocalExecutor::new(None), + ); + assert!(local_checker + .check_changes_tries_proof(4, &remote_proof.roots, vec![]) + .is_err()); + } } diff --git a/core/client/src/light/mod.rs b/core/client/src/light/mod.rs index 2cdcaf4990..5e816d5c91 100644 --- a/core/client/src/light/mod.rs +++ b/core/client/src/light/mod.rs @@ -24,9 +24,9 @@ pub mod fetcher; use std::sync::Arc; use executor::RuntimeInfo; -use primitives::{H256, Blake2Hasher}; -use runtime_primitives::BuildStorage; +use primitives::{Blake2Hasher, H256}; use runtime_primitives::traits::Block as BlockT; +use runtime_primitives::BuildStorage; use state_machine::CodeExecutor; use crate::call_executor::LocalCallExecutor; @@ -38,53 +38,65 @@ use crate::light::call_executor::{RemoteCallExecutor, RemoteOrLocalCallExecutor} use crate::light::fetcher::{Fetcher, LightDataChecker}; /// Create an instance of light client blockchain backend. -pub fn new_light_blockchain, F>(storage: S) -> Arc> { - Arc::new(Blockchain::new(storage)) +pub fn new_light_blockchain, F>( + storage: S, +) -> Arc> { + Arc::new(Blockchain::new(storage)) } /// Create an instance of light client backend. -pub fn new_light_backend(blockchain: Arc>, fetcher: Arc) -> Arc> - where - B: BlockT, - S: BlockchainStorage, - F: Fetcher, +pub fn new_light_backend( + blockchain: Arc>, + fetcher: Arc, +) -> Arc> +where + B: BlockT, + S: BlockchainStorage, + F: Fetcher, { - blockchain.set_fetcher(Arc::downgrade(&fetcher)); - Arc::new(Backend::new(blockchain)) + blockchain.set_fetcher(Arc::downgrade(&fetcher)); + Arc::new(Backend::new(blockchain)) } /// Create an instance of light client. pub fn new_light( - backend: Arc>, - fetcher: Arc, - genesis_storage: GS, - code_executor: E, -) -> ClientResult, RemoteOrLocalCallExecutor< - B, - Backend, - RemoteCallExecutor, F>, - LocalCallExecutor, E> ->, B, RA>> - where - B: BlockT, - S: BlockchainStorage, - F: Fetcher, - GS: BuildStorage, - E: CodeExecutor + RuntimeInfo, + backend: Arc>, + fetcher: Arc, + genesis_storage: GS, + code_executor: E, +) -> ClientResult< + Client< + Backend, + RemoteOrLocalCallExecutor< + B, + Backend, + RemoteCallExecutor, F>, + LocalCallExecutor, E>, + >, + B, + RA, + >, +> +where + B: BlockT, + S: BlockchainStorage, + F: Fetcher, + GS: BuildStorage, + E: CodeExecutor + RuntimeInfo, { - let remote_executor = RemoteCallExecutor::new(backend.blockchain().clone(), fetcher); - let local_executor = LocalCallExecutor::new(backend.clone(), code_executor); - let executor = RemoteOrLocalCallExecutor::new(backend.clone(), remote_executor, local_executor); - Client::new(backend, executor, genesis_storage, Default::default()) + let remote_executor = RemoteCallExecutor::new(backend.blockchain().clone(), fetcher); + let local_executor = LocalCallExecutor::new(backend.clone(), code_executor); + let executor = RemoteOrLocalCallExecutor::new(backend.clone(), remote_executor, local_executor); + Client::new(backend, executor, genesis_storage, Default::default()) } /// Create an instance of fetch data checker. pub fn new_fetch_checker, F>( - blockchain: Arc>, - executor: E, + blockchain: Arc>, + executor: E, ) -> LightDataChecker - where - E: CodeExecutor, +where + E: CodeExecutor, { - LightDataChecker::new(blockchain, executor) + LightDataChecker::new(blockchain, executor) } diff --git a/core/client/src/notifications.rs b/core/client/src/notifications.rs index 139238f343..3664ac3a46 100644 --- a/core/client/src/notifications.rs +++ b/core/client/src/notifications.rs @@ -17,32 +17,32 @@ //! Storage notifications use std::{ - collections::{HashSet, HashMap}, - sync::Arc, + collections::{HashMap, HashSet}, + sync::Arc, }; -use fnv::{FnvHashSet, FnvHashMap}; +use fnv::{FnvHashMap, FnvHashSet}; use futures::sync::mpsc; -use primitives::storage::{StorageKey, StorageData}; +use primitives::storage::{StorageData, StorageKey}; use runtime_primitives::traits::Block as BlockT; /// Storage change set #[derive(Debug)] pub struct StorageChangeSet { - changes: Arc)>>, - filter: Option>, + changes: Arc)>>, + filter: Option>, } impl StorageChangeSet { - /// Convert the change set into iterator over storage items. - pub fn iter<'a>(&'a self) -> impl Iterator)> + 'a { - self.changes - .iter() - .filter(move |&(key, _)| match self.filter { - Some(ref filter) => filter.contains(key), - None => true, - }) - } + /// Convert the change set into iterator over storage items. + pub fn iter<'a>(&'a self) -> impl Iterator)> + 'a { + self.changes + .iter() + .filter(move |&(key, _)| match self.filter { + Some(ref filter) => filter.contains(key), + None => true, + }) + } } /// Type that implements `futures::Stream` of storage change events. @@ -53,236 +53,261 @@ type SubscriberId = u64; /// Manages storage listeners. #[derive(Debug)] pub struct StorageNotifications { - next_id: SubscriberId, - wildcard_listeners: FnvHashSet, - listeners: HashMap>, - sinks: FnvHashMap, - Option>, - )>, + next_id: SubscriberId, + wildcard_listeners: FnvHashSet, + listeners: HashMap>, + sinks: FnvHashMap< + SubscriberId, + ( + mpsc::UnboundedSender<(Block::Hash, StorageChangeSet)>, + Option>, + ), + >, } impl Default for StorageNotifications { - fn default() -> Self { - StorageNotifications { - next_id: Default::default(), - wildcard_listeners: Default::default(), - listeners: Default::default(), - sinks: Default::default(), - } - } + fn default() -> Self { + StorageNotifications { + next_id: Default::default(), + wildcard_listeners: Default::default(), + listeners: Default::default(), + sinks: Default::default(), + } + } } impl StorageNotifications { - /// Trigger notification to all listeners. - /// - /// Note the changes are going to be filtered by listener's filter key. - /// In fact no event might be sent if clients are not interested in the changes. - pub fn trigger(&mut self, hash: &Block::Hash, changeset: impl Iterator, Option>)>) { - let has_wildcard = !self.wildcard_listeners.is_empty(); - - // early exit if no listeners - if !has_wildcard && self.listeners.is_empty() { - return; - } - - let mut subscribers = self.wildcard_listeners.clone(); - let mut changes = Vec::new(); - - // Collect subscribers and changes - for (k, v) in changeset { - let k = StorageKey(k); - let listeners = self.listeners.get(&k); - - if let Some(ref listeners) = listeners { - subscribers.extend(listeners.iter()); - } - - if has_wildcard || listeners.is_some() { - changes.push((k, v.map(StorageData))); - } - } - - // Don't send empty notifications - if changes.is_empty() { - return; - } - - let changes = Arc::new(changes); - // Trigger the events - for subscriber in subscribers { - let should_remove = { - let &(ref sink, ref filter) = self.sinks.get(&subscriber) - .expect("subscribers returned from self.listeners are always in self.sinks; qed"); - sink.unbounded_send((hash.clone(), StorageChangeSet { - changes: changes.clone(), - filter: filter.clone(), - })).is_err() - }; - - if should_remove { - self.remove_subscriber(subscriber); - } - } - } - - fn remove_subscriber(&mut self, subscriber: SubscriberId) { - if let Some((_, filters)) = self.sinks.remove(&subscriber) { - match filters { - None => { - self.wildcard_listeners.remove(&subscriber); - }, - Some(filters) => { - for key in filters { - let remove_key = match self.listeners.get_mut(&key) { - Some(ref mut set) => { - set.remove(&subscriber); - set.is_empty() - }, - None => false, - }; - - if remove_key { - self.listeners.remove(&key); - } - } - }, - } - } - } - - /// Start listening for particular storage keys. - pub fn listen(&mut self, filter_keys: Option<&[StorageKey]>) -> StorageEventStream { - self.next_id += 1; - - // add subscriber for every key - let keys = match filter_keys { - None => { - self.wildcard_listeners.insert(self.next_id); - None - }, - Some(keys) => Some(keys.iter().map(|key| { - self.listeners - .entry(key.clone()) - .or_insert_with(Default::default) - .insert(self.next_id); - key.clone() - }).collect()) - }; - - // insert sink - let (tx, rx) = mpsc::unbounded(); - self.sinks.insert(self.next_id, (tx, keys)); - rx - } + /// Trigger notification to all listeners. + /// + /// Note the changes are going to be filtered by listener's filter key. + /// In fact no event might be sent if clients are not interested in the changes. + pub fn trigger( + &mut self, + hash: &Block::Hash, + changeset: impl Iterator, Option>)>, + ) { + let has_wildcard = !self.wildcard_listeners.is_empty(); + + // early exit if no listeners + if !has_wildcard && self.listeners.is_empty() { + return; + } + + let mut subscribers = self.wildcard_listeners.clone(); + let mut changes = Vec::new(); + + // Collect subscribers and changes + for (k, v) in changeset { + let k = StorageKey(k); + let listeners = self.listeners.get(&k); + + if let Some(ref listeners) = listeners { + subscribers.extend(listeners.iter()); + } + + if has_wildcard || listeners.is_some() { + changes.push((k, v.map(StorageData))); + } + } + + // Don't send empty notifications + if changes.is_empty() { + return; + } + + let changes = Arc::new(changes); + // Trigger the events + for subscriber in subscribers { + let should_remove = { + let &(ref sink, ref filter) = self.sinks.get(&subscriber).expect( + "subscribers returned from self.listeners are always in self.sinks; qed", + ); + sink.unbounded_send(( + hash.clone(), + StorageChangeSet { + changes: changes.clone(), + filter: filter.clone(), + }, + )) + .is_err() + }; + + if should_remove { + self.remove_subscriber(subscriber); + } + } + } + + fn remove_subscriber(&mut self, subscriber: SubscriberId) { + if let Some((_, filters)) = self.sinks.remove(&subscriber) { + match filters { + None => { + self.wildcard_listeners.remove(&subscriber); + } + Some(filters) => { + for key in filters { + let remove_key = match self.listeners.get_mut(&key) { + Some(ref mut set) => { + set.remove(&subscriber); + set.is_empty() + } + None => false, + }; + + if remove_key { + self.listeners.remove(&key); + } + } + } + } + } + } + + /// Start listening for particular storage keys. + pub fn listen( + &mut self, + filter_keys: Option<&[StorageKey]>, + ) -> StorageEventStream { + self.next_id += 1; + + // add subscriber for every key + let keys = match filter_keys { + None => { + self.wildcard_listeners.insert(self.next_id); + None + } + Some(keys) => Some( + keys.iter() + .map(|key| { + self.listeners + .entry(key.clone()) + .or_insert_with(Default::default) + .insert(self.next_id); + key.clone() + }) + .collect(), + ), + }; + + // insert sink + let (tx, rx) = mpsc::unbounded(); + self.sinks.insert(self.next_id, (tx, keys)); + rx + } } #[cfg(test)] mod tests { - use runtime_primitives::testing::{H256 as Hash, Block as RawBlock, ExtrinsicWrapper}; - use super::*; - use futures::Stream; - - #[cfg(test)] - impl From)>> for StorageChangeSet { - fn from(changes: Vec<(StorageKey, Option)>) -> Self { - StorageChangeSet { - changes: Arc::new(changes), - filter: None, - } - } - } - - #[cfg(test)] - impl PartialEq for StorageChangeSet { - fn eq(&self, other: &Self) -> bool { - self.iter().eq(other.iter()) - } - } - - type Block = RawBlock>; - - #[test] - fn triggering_change_should_notify_wildcard_listeners() { - // given - let mut notifications = StorageNotifications::::default(); - let mut recv = notifications.listen(None).wait(); - - // when - let changeset = vec![ - (vec![2], Some(vec![3])), - (vec![3], None), - ]; - notifications.trigger(&Hash::from_low_u64_be(1), changeset.into_iter()); - - // then - assert_eq!(recv.next().unwrap(), Ok((Hash::from_low_u64_be(1), vec![ - (StorageKey(vec![2]), Some(StorageData(vec![3]))), - (StorageKey(vec![3]), None), - ].into()))); - } - - #[test] - fn should_only_notify_interested_listeners() { - // given - let mut notifications = StorageNotifications::::default(); - let mut recv1 = notifications.listen(Some(&[StorageKey(vec![1])])).wait(); - let mut recv2 = notifications.listen(Some(&[StorageKey(vec![2])])).wait(); - - // when - let changeset = vec![ - (vec![2], Some(vec![3])), - (vec![1], None), - ]; - notifications.trigger(&Hash::from_low_u64_be(1), changeset.into_iter()); - - // then - assert_eq!(recv1.next().unwrap(), Ok((Hash::from_low_u64_be(1), vec![ - (StorageKey(vec![1]), None), - ].into()))); - assert_eq!(recv2.next().unwrap(), Ok((Hash::from_low_u64_be(1), vec![ - (StorageKey(vec![2]), Some(StorageData(vec![3]))), - ].into()))); - } - - #[test] - fn should_cleanup_subscribers_if_dropped() { - // given - let mut notifications = StorageNotifications::::default(); - { - let _recv1 = notifications.listen(Some(&[StorageKey(vec![1])])).wait(); - let _recv2 = notifications.listen(Some(&[StorageKey(vec![2])])).wait(); - let _recv3 = notifications.listen(None).wait(); - assert_eq!(notifications.listeners.len(), 2); - assert_eq!(notifications.wildcard_listeners.len(), 1); - } - - // when - let changeset = vec![ - (vec![2], Some(vec![3])), - (vec![1], None), - ]; - notifications.trigger(&Hash::from_low_u64_be(1), changeset.into_iter()); - - // then - assert_eq!(notifications.listeners.len(), 0); - assert_eq!(notifications.wildcard_listeners.len(), 0); - } - - #[test] - fn should_not_send_empty_notifications() { - // given - let mut recv = { - let mut notifications = StorageNotifications::::default(); - let recv = notifications.listen(None).wait(); - - // when - let changeset = vec![]; - notifications.trigger(&Hash::from_low_u64_be(1), changeset.into_iter()); - recv - }; - - // then - assert_eq!(recv.next(), None); - } + use super::*; + use futures::Stream; + use runtime_primitives::testing::{Block as RawBlock, ExtrinsicWrapper, H256 as Hash}; + + #[cfg(test)] + impl From)>> for StorageChangeSet { + fn from(changes: Vec<(StorageKey, Option)>) -> Self { + StorageChangeSet { + changes: Arc::new(changes), + filter: None, + } + } + } + + #[cfg(test)] + impl PartialEq for StorageChangeSet { + fn eq(&self, other: &Self) -> bool { + self.iter().eq(other.iter()) + } + } + + type Block = RawBlock>; + + #[test] + fn triggering_change_should_notify_wildcard_listeners() { + // given + let mut notifications = StorageNotifications::::default(); + let mut recv = notifications.listen(None).wait(); + + // when + let changeset = vec![(vec![2], Some(vec![3])), (vec![3], None)]; + notifications.trigger(&Hash::from_low_u64_be(1), changeset.into_iter()); + + // then + assert_eq!( + recv.next().unwrap(), + Ok(( + Hash::from_low_u64_be(1), + vec![ + (StorageKey(vec![2]), Some(StorageData(vec![3]))), + (StorageKey(vec![3]), None), + ] + .into() + )) + ); + } + + #[test] + fn should_only_notify_interested_listeners() { + // given + let mut notifications = StorageNotifications::::default(); + let mut recv1 = notifications.listen(Some(&[StorageKey(vec![1])])).wait(); + let mut recv2 = notifications.listen(Some(&[StorageKey(vec![2])])).wait(); + + // when + let changeset = vec![(vec![2], Some(vec![3])), (vec![1], None)]; + notifications.trigger(&Hash::from_low_u64_be(1), changeset.into_iter()); + + // then + assert_eq!( + recv1.next().unwrap(), + Ok(( + Hash::from_low_u64_be(1), + vec![(StorageKey(vec![1]), None),].into() + )) + ); + assert_eq!( + recv2.next().unwrap(), + Ok(( + Hash::from_low_u64_be(1), + vec![(StorageKey(vec![2]), Some(StorageData(vec![3]))),].into() + )) + ); + } + + #[test] + fn should_cleanup_subscribers_if_dropped() { + // given + let mut notifications = StorageNotifications::::default(); + { + let _recv1 = notifications.listen(Some(&[StorageKey(vec![1])])).wait(); + let _recv2 = notifications.listen(Some(&[StorageKey(vec![2])])).wait(); + let _recv3 = notifications.listen(None).wait(); + assert_eq!(notifications.listeners.len(), 2); + assert_eq!(notifications.wildcard_listeners.len(), 1); + } + + // when + let changeset = vec![(vec![2], Some(vec![3])), (vec![1], None)]; + notifications.trigger(&Hash::from_low_u64_be(1), changeset.into_iter()); + + // then + assert_eq!(notifications.listeners.len(), 0); + assert_eq!(notifications.wildcard_listeners.len(), 0); + } + + #[test] + fn should_not_send_empty_notifications() { + // given + let mut recv = { + let mut notifications = StorageNotifications::::default(); + let recv = notifications.listen(None).wait(); + + // when + let changeset = vec![]; + notifications.trigger(&Hash::from_low_u64_be(1), changeset.into_iter()); + recv + }; + + // then + assert_eq!(recv.next(), None); + } } diff --git a/core/client/src/runtime_api.rs b/core/client/src/runtime_api.rs index 6bc43ab270..45bc458f6e 100644 --- a/core/client/src/runtime_api.rs +++ b/core/client/src/runtime_api.rs @@ -16,122 +16,131 @@ //! All the functionality required for declaring and implementing runtime apis. -#[doc(hidden)] #[cfg(feature = "std")] -pub use state_machine::OverlayedChanges; +use crate::error; +pub use parity_codec::{Decode, Encode}; #[doc(hidden)] #[cfg(feature = "std")] pub use primitives::NativeOrEncoded; +use primitives::OpaqueMetadata; +#[doc(hidden)] +pub use primitives::{ExecutionContext, OffchainExt}; +#[cfg(feature = "std")] +use rstd::result; +#[doc(hidden)] +pub use rstd::{mem, slice}; #[doc(hidden)] pub use runtime_primitives::{ - traits::{AuthorityIdFor, Block as BlockT, GetNodeBlockType, GetRuntimeBlockType, Header as HeaderT, ApiRef, RuntimeApiInfo}, - generic::BlockId, transaction_validity::TransactionValidity, + generic::BlockId, + traits::{ + ApiRef, AuthorityIdFor, Block as BlockT, GetNodeBlockType, GetRuntimeBlockType, + Header as HeaderT, RuntimeApiInfo, + }, + transaction_validity::TransactionValidity, }; #[doc(hidden)] -pub use primitives::{ExecutionContext, OffchainExt}; -#[doc(hidden)] -pub use runtime_version::{ApiId, RuntimeVersion, ApisVec, create_apis_vec}; +pub use runtime_version::{create_apis_vec, ApiId, ApisVec, RuntimeVersion}; +use sr_api_macros::decl_runtime_apis; #[doc(hidden)] -pub use rstd::{slice, mem}; #[cfg(feature = "std")] -use rstd::result; -pub use parity_codec::{Encode, Decode}; -#[cfg(feature = "std")] -use crate::error; -use sr_api_macros::decl_runtime_apis; -use primitives::OpaqueMetadata; +pub use state_machine::OverlayedChanges; #[cfg(feature = "std")] use std::panic::UnwindSafe; /// Something that can be constructed to a runtime api. #[cfg(feature = "std")] pub trait ConstructRuntimeApi> { - /// The actual runtime api that will be constructed. - type RuntimeApi; + /// The actual runtime api that will be constructed. + type RuntimeApi; - /// Construct an instance of the runtime api. - fn construct_runtime_api<'a>(call: &'a C) -> ApiRef<'a, Self::RuntimeApi>; + /// Construct an instance of the runtime api. + fn construct_runtime_api<'a>(call: &'a C) -> ApiRef<'a, Self::RuntimeApi>; } /// An extension for the `RuntimeApi`. #[cfg(feature = "std")] pub trait ApiExt { - /// The given closure will be called with api instance. Inside the closure any api call is - /// allowed. After doing the api call, the closure is allowed to map the `Result` to a - /// different `Result` type. This can be important, as the internal data structure that keeps - /// track of modifications to the storage, discards changes when the `Result` is an `Err`. - /// On `Ok`, the structure commits the changes to an internal buffer. - fn map_api_result result::Result, R, E>( - &self, - map_call: F - ) -> result::Result where Self: Sized; - - /// Checks if the given api is implemented and versions match. - fn has_api( - &self, - at: &BlockId - ) -> error::Result where Self: Sized { - self.runtime_version_at(at).map(|v| v.has_api::()) - } - - /// Check if the given api is implemented and the version passes a predicate. - fn has_api_with bool>( - &self, - at: &BlockId, - pred: P, - ) -> error::Result where Self: Sized { - self.runtime_version_at(at).map(|v| v.has_api_with::(pred)) - } - - /// Returns the runtime version at the given block id. - fn runtime_version_at(&self, at: &BlockId) -> error::Result; + /// The given closure will be called with api instance. Inside the closure any api call is + /// allowed. After doing the api call, the closure is allowed to map the `Result` to a + /// different `Result` type. This can be important, as the internal data structure that keeps + /// track of modifications to the storage, discards changes when the `Result` is an `Err`. + /// On `Ok`, the structure commits the changes to an internal buffer. + fn map_api_result result::Result, R, E>( + &self, + map_call: F, + ) -> result::Result + where + Self: Sized; + + /// Checks if the given api is implemented and versions match. + fn has_api(&self, at: &BlockId) -> error::Result + where + Self: Sized, + { + self.runtime_version_at(at).map(|v| v.has_api::()) + } + + /// Check if the given api is implemented and the version passes a predicate. + fn has_api_with bool>( + &self, + at: &BlockId, + pred: P, + ) -> error::Result + where + Self: Sized, + { + self.runtime_version_at(at) + .map(|v| v.has_api_with::(pred)) + } + + /// Returns the runtime version at the given block id. + fn runtime_version_at(&self, at: &BlockId) -> error::Result; } /// Something that can call into the runtime at a given block. #[cfg(feature = "std")] pub trait CallRuntimeAt { - /// Calls the given api function with the given encoded arguments at the given block - /// and returns the encoded result. - fn call_api_at< - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - >( - &self, - at: &BlockId, - function: &'static str, - args: Vec, - changes: &mut OverlayedChanges, - initialized_block: &mut Option>, - native_call: Option, - context: ExecutionContext, - ) -> error::Result>; - - /// Returns the runtime version at the given block. - fn runtime_version_at(&self, at: &BlockId) -> error::Result; + /// Calls the given api function with the given encoded arguments at the given block + /// and returns the encoded result. + fn call_api_at< + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + >( + &self, + at: &BlockId, + function: &'static str, + args: Vec, + changes: &mut OverlayedChanges, + initialized_block: &mut Option>, + native_call: Option, + context: ExecutionContext, + ) -> error::Result>; + + /// Returns the runtime version at the given block. + fn runtime_version_at(&self, at: &BlockId) -> error::Result; } decl_runtime_apis! { - /// The `Core` api trait that is mandatory for each runtime. - #[core_trait] - pub trait Core { - /// Returns the version of the runtime. - fn version() -> RuntimeVersion; - /// Execute the given block. - fn execute_block(block: Block); - /// Initialize a block with the given header. - fn initialize_block(header: &::Header); - } - - /// The `Metadata` api trait that returns metadata for the runtime. - pub trait Metadata { - /// Returns the metadata of a runtime. - fn metadata() -> OpaqueMetadata; - } - - /// The `TaggedTransactionQueue` api trait for interfering with the new transaction queue. - pub trait TaggedTransactionQueue { - /// Validate the given transaction. - fn validate_transaction(tx: ::Extrinsic) -> TransactionValidity; - } + /// The `Core` api trait that is mandatory for each runtime. + #[core_trait] + pub trait Core { + /// Returns the version of the runtime. + fn version() -> RuntimeVersion; + /// Execute the given block. + fn execute_block(block: Block); + /// Initialize a block with the given header. + fn initialize_block(header: &::Header); + } + + /// The `Metadata` api trait that returns metadata for the runtime. + pub trait Metadata { + /// Returns the metadata of a runtime. + fn metadata() -> OpaqueMetadata; + } + + /// The `TaggedTransactionQueue` api trait for interfering with the new transaction queue. + pub trait TaggedTransactionQueue { + /// Validate the given transaction. + fn validate_transaction(tx: ::Extrinsic) -> TransactionValidity; + } } - diff --git a/core/consensus/aura/primitives/src/lib.rs b/core/consensus/aura/primitives/src/lib.rs index 47b6ec7c14..2ba72604a8 100644 --- a/core/consensus/aura/primitives/src/lib.rs +++ b/core/consensus/aura/primitives/src/lib.rs @@ -18,20 +18,20 @@ #![cfg_attr(not(feature = "std"), no_std)] -use substrate_client::decl_runtime_apis; use runtime_primitives::ConsensusEngineId; +use substrate_client::decl_runtime_apis; /// The `ConsensusEngineId` of AuRa. pub const AURA_ENGINE_ID: ConsensusEngineId = [b'a', b'u', b'r', b'a']; decl_runtime_apis! { - /// API necessary for block authorship with aura. - pub trait AuraApi { - /// Return the slot duration in seconds for Aura. - /// Currently, only the value provided by this type at genesis - /// will be used. - /// - /// Dynamic slot duration may be supported in the future. - fn slot_duration() -> u64; - } + /// API necessary for block authorship with aura. + pub trait AuraApi { + /// Return the slot duration in seconds for Aura. + /// Currently, only the value provided by this type at genesis + /// will be used. + /// + /// Dynamic slot duration may be supported in the future. + fn slot_duration() -> u64; + } } diff --git a/core/consensus/aura/slots/src/lib.rs b/core/consensus/aura/slots/src/lib.rs index 5339444130..24bcac17e2 100644 --- a/core/consensus/aura/slots/src/lib.rs +++ b/core/consensus/aura/slots/src/lib.rs @@ -16,193 +16,197 @@ mod slots; -pub use slots::{Slots, SlotInfo}; +pub use slots::{SlotInfo, Slots}; -use std::sync::{mpsc, Arc}; -use std::thread; -use futures::prelude::*; -use futures::{Future, IntoFuture, future::{self, Either}}; -use log::{warn, debug, info}; -use runtime_primitives::generic::BlockId; -use runtime_primitives::traits::{ProvideRuntimeApi, Block}; -use consensus_common::SyncOracle; -use inherents::{InherentData, InherentDataProviders}; use aura_primitives::AuraApi; use client::ChainHead; use codec::Encode; +use consensus_common::SyncOracle; +use futures::prelude::*; +use futures::{ + future::{self, Either}, + Future, IntoFuture, +}; +use inherents::{InherentData, InherentDataProviders}; +use log::{debug, info, warn}; +use runtime_primitives::generic::BlockId; +use runtime_primitives::traits::{Block, ProvideRuntimeApi}; +use std::sync::{mpsc, Arc}; +use std::thread; /// A worker that should be invoked at every new slot. pub trait SlotWorker { - type OnSlot: IntoFuture; - - /// Called when the proposer starts. - fn on_start( - &self, - slot_duration: u64 - ) -> Result<(), consensus_common::Error>; - - /// Called when a new slot is triggered. - fn on_slot( - &self, - chain_head: B::Header, - slot_info: SlotInfo, - ) -> Self::OnSlot; + type OnSlot: IntoFuture; + + /// Called when the proposer starts. + fn on_start(&self, slot_duration: u64) -> Result<(), consensus_common::Error>; + + /// Called when a new slot is triggered. + fn on_slot(&self, chain_head: B::Header, slot_info: SlotInfo) -> Self::OnSlot; } /// Slot compatible inherent data. pub trait SlotCompatible { - /// Extract timestamp and slot from inherent data. - fn extract_timestamp_and_slot(inherent: &InherentData) -> Result<(u64, u64), consensus_common::Error>; + /// Extract timestamp and slot from inherent data. + fn extract_timestamp_and_slot( + inherent: &InherentData, + ) -> Result<(u64, u64), consensus_common::Error>; } /// Convert an inherent error to common error. pub fn inherent_to_common_error(err: inherents::RuntimeString) -> consensus_common::Error { - consensus_common::ErrorKind::InherentData(err.into()).into() + consensus_common::ErrorKind::InherentData(err.into()).into() } /// Start a new slot worker in a separate thread. pub fn start_slot_worker_thread( - slot_duration: SlotDuration, - client: Arc, - worker: Arc, - sync_oracle: SO, - on_exit: OnExit, - inherent_data_providers: InherentDataProviders, -) -> Result<(), consensus_common::Error> where - B: Block + 'static, - C: ChainHead + Send + Sync + 'static, - W: SlotWorker + Send + Sync + 'static, - SO: SyncOracle + Send + Clone + 'static, - SC: SlotCompatible + 'static, - OnExit: Future + Send + 'static + slot_duration: SlotDuration, + client: Arc, + worker: Arc, + sync_oracle: SO, + on_exit: OnExit, + inherent_data_providers: InherentDataProviders, +) -> Result<(), consensus_common::Error> +where + B: Block + 'static, + C: ChainHead + Send + Sync + 'static, + W: SlotWorker + Send + Sync + 'static, + SO: SyncOracle + Send + Clone + 'static, + SC: SlotCompatible + 'static, + OnExit: Future + Send + 'static, { - use tokio::runtime::current_thread::Runtime; - - let (result_sender, result_recv) = mpsc::channel(); - - thread::spawn(move || { - let mut runtime = match Runtime::new() { - Ok(r) => r, - Err(e) => { - warn!("Unable to start authorship: {:?}", e); - return; - } - }; - - let slot_worker_future = match start_slot_worker::<_, _, _, _, SC, _>( - slot_duration, - client, - worker, - sync_oracle, - on_exit, - inherent_data_providers, - ) { - Ok(slot_worker_future) => { - result_sender - .send(Ok(())) - .expect("Receive is not dropped before receiving a result; qed"); - slot_worker_future - }, - Err(e) => { - result_sender - .send(Err(e)) - .expect("Receive is not dropped before receiving a result; qed"); - return; - } - }; - - let _ = runtime.block_on(slot_worker_future); - }); - - result_recv.recv().expect("Aura start thread result sender dropped") + use tokio::runtime::current_thread::Runtime; + + let (result_sender, result_recv) = mpsc::channel(); + + thread::spawn(move || { + let mut runtime = match Runtime::new() { + Ok(r) => r, + Err(e) => { + warn!("Unable to start authorship: {:?}", e); + return; + } + }; + + let slot_worker_future = match start_slot_worker::<_, _, _, _, SC, _>( + slot_duration, + client, + worker, + sync_oracle, + on_exit, + inherent_data_providers, + ) { + Ok(slot_worker_future) => { + result_sender + .send(Ok(())) + .expect("Receive is not dropped before receiving a result; qed"); + slot_worker_future + } + Err(e) => { + result_sender + .send(Err(e)) + .expect("Receive is not dropped before receiving a result; qed"); + return; + } + }; + + let _ = runtime.block_on(slot_worker_future); + }); + + result_recv + .recv() + .expect("Aura start thread result sender dropped") } /// Start a new slot worker. pub fn start_slot_worker( - slot_duration: SlotDuration, - client: Arc, - worker: Arc, - sync_oracle: SO, - on_exit: OnExit, - inherent_data_providers: InherentDataProviders, -) -> Result, consensus_common::Error> where - B: Block, - C: ChainHead, - W: SlotWorker, - SO: SyncOracle + Send + Clone, - SC: SlotCompatible, - OnExit: Future, + slot_duration: SlotDuration, + client: Arc, + worker: Arc, + sync_oracle: SO, + on_exit: OnExit, + inherent_data_providers: InherentDataProviders, +) -> Result, consensus_common::Error> +where + B: Block, + C: ChainHead, + W: SlotWorker, + SO: SyncOracle + Send + Clone, + SC: SlotCompatible, + OnExit: Future, { - worker.on_start(slot_duration.0)?; - - let make_authorship = move || { - let client = client.clone(); - let worker = worker.clone(); - let sync_oracle = sync_oracle.clone(); - let SlotDuration(slot_duration) = slot_duration; - let inherent_data_providers = inherent_data_providers.clone(); - - // rather than use a timer interval, we schedule our waits ourselves - Slots::::new(slot_duration, inherent_data_providers) - .map_err(|e| debug!(target: "aura", "Faulty timer: {:?}", e)) - .for_each(move |slot_info| { - let client = client.clone(); - let worker = worker.clone(); - let sync_oracle = sync_oracle.clone(); - - // only propose when we are not syncing. - if sync_oracle.is_major_syncing() { - debug!(target: "aura", "Skipping proposal slot due to sync."); - return Either::B(future::ok(())); - } - - let slot_num = slot_info.number; - let chain_head = match client.best_block_header() { - Ok(x) => x, - Err(e) => { - warn!(target: "aura", "Unable to author block in slot {}. \ + worker.on_start(slot_duration.0)?; + + let make_authorship = move || { + let client = client.clone(); + let worker = worker.clone(); + let sync_oracle = sync_oracle.clone(); + let SlotDuration(slot_duration) = slot_duration; + let inherent_data_providers = inherent_data_providers.clone(); + + // rather than use a timer interval, we schedule our waits ourselves + Slots::::new(slot_duration, inherent_data_providers) + .map_err(|e| debug!(target: "aura", "Faulty timer: {:?}", e)) + .for_each(move |slot_info| { + let client = client.clone(); + let worker = worker.clone(); + let sync_oracle = sync_oracle.clone(); + + // only propose when we are not syncing. + if sync_oracle.is_major_syncing() { + debug!(target: "aura", "Skipping proposal slot due to sync."); + return Either::B(future::ok(())); + } + + let slot_num = slot_info.number; + let chain_head = match client.best_block_header() { + Ok(x) => x, + Err(e) => { + warn!(target: "aura", "Unable to author block in slot {}. \ no best block header: {:?}", slot_num, e); - return Either::B(future::ok(())) - } - }; - - Either::A( - worker.on_slot(chain_head, slot_info).into_future() - .map_err(|e| debug!(target: "aura", "Encountered aura error: {:?}", e)) - ) - }) - }; - - let work = future::loop_fn((), move |()| { - let authorship_task = ::std::panic::AssertUnwindSafe(make_authorship()); - authorship_task.catch_unwind().then(|res| { - match res { - Ok(Ok(())) => (), - Ok(Err(())) => warn!("Aura authorship task terminated unexpectedly. Restarting"), - Err(e) => { - if let Some(s) = e.downcast_ref::<&'static str>() { - warn!("Aura authorship task panicked at {:?}", s); - } - - warn!("Restarting Aura authorship task"); - } - } - - Ok(future::Loop::Continue(())) - }) - }); - - Ok(work.select(on_exit).then(|_| Ok(()))) + return Either::B(future::ok(())); + } + }; + + Either::A( + worker + .on_slot(chain_head, slot_info) + .into_future() + .map_err(|e| debug!(target: "aura", "Encountered aura error: {:?}", e)), + ) + }) + }; + + let work = future::loop_fn((), move |()| { + let authorship_task = ::std::panic::AssertUnwindSafe(make_authorship()); + authorship_task.catch_unwind().then(|res| { + match res { + Ok(Ok(())) => (), + Ok(Err(())) => warn!("Aura authorship task terminated unexpectedly. Restarting"), + Err(e) => { + if let Some(s) = e.downcast_ref::<&'static str>() { + warn!("Aura authorship task panicked at {:?}", s); + } + + warn!("Restarting Aura authorship task"); + } + } + + Ok(future::Loop::Continue(())) + }) + }); + + Ok(work.select(on_exit).then(|_| Ok(()))) } /// A header which has been checked pub enum CheckedHeader { - /// A header which has slot in the future. this is the full header (not stripped) - /// and the slot in which it should be processed. - Deferred(H, u64), - /// A header which is fully checked, including signature. This is the pre-header - /// accompanied by the seal components. - Checked(H, u64, S), + /// A header which has slot in the future. this is the full header (not stripped) + /// and the slot in which it should be processed. + Deferred(H, u64), + /// A header which is fully checked, including signature. This is the pre-header + /// accompanied by the seal components. + Checked(H, u64, S), } /// A slot duration. Create with `get_or_compute`. @@ -211,43 +215,45 @@ pub enum CheckedHeader { pub struct SlotDuration(u64); impl SlotDuration { - /// Either fetch the slot duration from disk or compute it from the genesis - /// state. - pub fn get_or_compute(client: &C) -> ::client::error::Result where - C: client::backend::AuxStore, - C: ProvideRuntimeApi, - C::Api: AuraApi, - { - use codec::Decode; - const SLOT_KEY: &[u8] = b"aura_slot_duration"; - - match client.get_aux(SLOT_KEY)? { - Some(v) => u64::decode(&mut &v[..]) - .map(SlotDuration) - .ok_or_else(|| ::client::error::ErrorKind::Backend( - format!("Aura slot duration kept in invalid format"), - ).into()), - None => { - use runtime_primitives::traits::Zero; - let genesis_slot_duration = client.runtime_api() - .slot_duration(&BlockId::number(Zero::zero()))?; - - info!( - "Loaded block-time = {:?} seconds from genesis on first-launch", - genesis_slot_duration - ); - - genesis_slot_duration.using_encoded(|s| { - client.insert_aux(&[(SLOT_KEY, &s[..])], &[]) - })?; - - Ok(SlotDuration(genesis_slot_duration)) - } - } - } - - /// Returns slot duration value. - pub fn get(&self) -> u64 { - self.0 - } + /// Either fetch the slot duration from disk or compute it from the genesis + /// state. + pub fn get_or_compute(client: &C) -> ::client::error::Result + where + C: client::backend::AuxStore, + C: ProvideRuntimeApi, + C::Api: AuraApi, + { + use codec::Decode; + const SLOT_KEY: &[u8] = b"aura_slot_duration"; + + match client.get_aux(SLOT_KEY)? { + Some(v) => u64::decode(&mut &v[..]).map(SlotDuration).ok_or_else(|| { + ::client::error::ErrorKind::Backend(format!( + "Aura slot duration kept in invalid format" + )) + .into() + }), + None => { + use runtime_primitives::traits::Zero; + let genesis_slot_duration = client + .runtime_api() + .slot_duration(&BlockId::number(Zero::zero()))?; + + info!( + "Loaded block-time = {:?} seconds from genesis on first-launch", + genesis_slot_duration + ); + + genesis_slot_duration + .using_encoded(|s| client.insert_aux(&[(SLOT_KEY, &s[..])], &[]))?; + + Ok(SlotDuration(genesis_slot_duration)) + } + } + } + + /// Returns slot duration value. + pub fn get(&self) -> u64 { + self.0 + } } diff --git a/core/consensus/aura/slots/src/slots.rs b/core/consensus/aura/slots/src/slots.rs index 9b665ce0d2..ef58e00f5c 100644 --- a/core/consensus/aura/slots/src/slots.rs +++ b/core/consensus/aura/slots/src/slots.rs @@ -18,132 +18,138 @@ //! //! This is used instead of `tokio_timer::Interval` because it was unreliable. -use std::time::{Instant, Duration}; -use std::marker::PhantomData; -use tokio::timer::Delay; +use crate::SlotCompatible; +use consensus_common::{Error, ErrorKind}; use futures::prelude::*; use futures::try_ready; +use inherents::{InherentData, InherentDataProviders}; use log::warn; -use inherents::{InherentDataProviders, InherentData}; -use consensus_common::{Error, ErrorKind}; -use crate::SlotCompatible; +use std::marker::PhantomData; +use std::time::{Duration, Instant}; +use tokio::timer::Delay; /// Returns current duration since unix epoch. pub fn duration_now() -> Option { - use std::time::SystemTime; - - let now = SystemTime::now(); - now.duration_since(SystemTime::UNIX_EPOCH).map_err(|e| { - warn!("Current time {:?} is before unix epoch. Something is wrong: {:?}", now, e); - }).ok() + use std::time::SystemTime; + + let now = SystemTime::now(); + now.duration_since(SystemTime::UNIX_EPOCH) + .map_err(|e| { + warn!( + "Current time {:?} is before unix epoch. Something is wrong: {:?}", + now, e + ); + }) + .ok() } /// Returns the duration until the next slot, based on current duration since pub fn time_until_next(now: Duration, slot_duration: u64) -> Duration { - let remaining_full_secs = slot_duration - (now.as_secs() % slot_duration) - 1; - let remaining_nanos = 1_000_000_000 - now.subsec_nanos(); - Duration::new(remaining_full_secs, remaining_nanos) + let remaining_full_secs = slot_duration - (now.as_secs() % slot_duration) - 1; + let remaining_nanos = 1_000_000_000 - now.subsec_nanos(); + Duration::new(remaining_full_secs, remaining_nanos) } /// Information about a slot. pub struct SlotInfo { - /// The slot number. - pub number: u64, - /// Current timestamp. - pub timestamp: u64, - /// The instant at which the slot ends. - pub ends_at: Instant, - /// The inherent data. - pub inherent_data: InherentData, - /// Slot duration. - pub duration: u64, + /// The slot number. + pub number: u64, + /// Current timestamp. + pub timestamp: u64, + /// The instant at which the slot ends. + pub ends_at: Instant, + /// The inherent data. + pub inherent_data: InherentData, + /// Slot duration. + pub duration: u64, } impl SlotInfo { - /// Yields the remaining duration in the slot. - pub fn remaining_duration(&self) -> Duration { - let now = Instant::now(); - if now < self.ends_at { - self.ends_at.duration_since(now) - } else { - Duration::from_secs(0) - } - } + /// Yields the remaining duration in the slot. + pub fn remaining_duration(&self) -> Duration { + let now = Instant::now(); + if now < self.ends_at { + self.ends_at.duration_since(now) + } else { + Duration::from_secs(0) + } + } } /// A stream that returns every time there is a new slot. pub struct Slots { - last_slot: u64, - slot_duration: u64, - inner_delay: Option, - inherent_data_providers: InherentDataProviders, - _marker: PhantomData, + last_slot: u64, + slot_duration: u64, + inner_delay: Option, + inherent_data_providers: InherentDataProviders, + _marker: PhantomData, } impl Slots { - /// Create a new `Slots` stream. - pub fn new(slot_duration: u64, inherent_data_providers: InherentDataProviders) -> Self { - Slots { - last_slot: 0, - slot_duration, - inner_delay: None, - inherent_data_providers, - _marker: PhantomData, - } - } + /// Create a new `Slots` stream. + pub fn new(slot_duration: u64, inherent_data_providers: InherentDataProviders) -> Self { + Slots { + last_slot: 0, + slot_duration, + inner_delay: None, + inherent_data_providers, + _marker: PhantomData, + } + } } impl Stream for Slots { - type Item = SlotInfo; - type Error = Error; - - fn poll(&mut self) -> Poll, Self::Error> { - let slot_duration = self.slot_duration; - self.inner_delay = match self.inner_delay.take() { - None => { - // schedule wait. - let wait_until = match duration_now() { - None => return Ok(Async::Ready(None)), - Some(now) => Instant::now() + time_until_next(now, slot_duration), - }; - - Some(Delay::new(wait_until)) - } - Some(d) => Some(d), - }; - - if let Some(ref mut inner_delay) = self.inner_delay { - try_ready!(inner_delay.poll().map_err(|e| Error::from(ErrorKind::FaultyTimer(e)))); - } - - // timeout has fired. - - let inherent_data = self.inherent_data_providers.create_inherent_data() - .map_err(crate::inherent_to_common_error)?; - let (timestamp, slot_num) = SC::extract_timestamp_and_slot(&inherent_data)?; - - // reschedule delay for next slot. - let ends_at = Instant::now() + time_until_next(Duration::from_secs(timestamp), slot_duration); - self.inner_delay = Some(Delay::new(ends_at)); - - // never yield the same slot twice. - if slot_num > self.last_slot { - self.last_slot = slot_num; - - Ok( - Async::Ready( - Some(SlotInfo { - number: slot_num, - duration: self.slot_duration, - timestamp, - ends_at, - inherent_data, - }) - ) - ) - } else { - // re-poll until we get a new slot. - self.poll() - } - } + type Item = SlotInfo; + type Error = Error; + + fn poll(&mut self) -> Poll, Self::Error> { + let slot_duration = self.slot_duration; + self.inner_delay = match self.inner_delay.take() { + None => { + // schedule wait. + let wait_until = match duration_now() { + None => return Ok(Async::Ready(None)), + Some(now) => Instant::now() + time_until_next(now, slot_duration), + }; + + Some(Delay::new(wait_until)) + } + Some(d) => Some(d), + }; + + if let Some(ref mut inner_delay) = self.inner_delay { + try_ready!(inner_delay + .poll() + .map_err(|e| Error::from(ErrorKind::FaultyTimer(e)))); + } + + // timeout has fired. + + let inherent_data = self + .inherent_data_providers + .create_inherent_data() + .map_err(crate::inherent_to_common_error)?; + let (timestamp, slot_num) = SC::extract_timestamp_and_slot(&inherent_data)?; + + // reschedule delay for next slot. + let ends_at = + Instant::now() + time_until_next(Duration::from_secs(timestamp), slot_duration); + self.inner_delay = Some(Delay::new(ends_at)); + + // never yield the same slot twice. + if slot_num > self.last_slot { + self.last_slot = slot_num; + + Ok(Async::Ready(Some(SlotInfo { + number: slot_num, + duration: self.slot_duration, + timestamp, + ends_at, + inherent_data, + }))) + } else { + // re-poll until we get a new slot. + self.poll() + } + } } diff --git a/core/consensus/aura/src/lib.rs b/core/consensus/aura/src/lib.rs index fd33228b66..6d49228f23 100644 --- a/core/consensus/aura/src/lib.rs +++ b/core/consensus/aura/src/lib.rs @@ -26,41 +26,48 @@ //! Blocks from future steps will be either deferred or rejected depending on how //! far in the future they are. #![deny(deprecated)] -use std::{sync::Arc, time::Duration, thread, marker::PhantomData, hash::Hash, fmt::Debug}; +use std::{fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc, thread, time::Duration}; -use parity_codec::{Encode, Decode}; -use consensus_common::{self, Authorities, BlockImport, Environment, Proposer, - ForkChoiceStrategy, ImportBlock, BlockOrigin, Error as ConsensusError, -}; -use consensus_common::well_known_cache_keys; -use consensus_common::import_queue::{Verifier, BasicQueue, SharedBlockImport, SharedJustificationImport}; -use client::ChainHead; +use aura_primitives::AURA_ENGINE_ID; +use authorities::AuthoritiesApi; use client::block_builder::api::BlockBuilder as BlockBuilderApi; use client::blockchain::ProvideCache; use client::runtime_api::ApiExt; -use aura_primitives::AURA_ENGINE_ID; -use runtime_primitives::{generic, generic::BlockId, Justification}; -use runtime_primitives::traits::{ - Block, Header, Digest, DigestItemFor, DigestItem, ProvideRuntimeApi, AuthorityIdFor, +use client::ChainHead; +use consensus_common::import_queue::{ + BasicQueue, SharedBlockImport, SharedJustificationImport, Verifier, }; +use consensus_common::well_known_cache_keys; +use consensus_common::{ + self, Authorities, BlockImport, BlockOrigin, Environment, Error as ConsensusError, + ForkChoiceStrategy, ImportBlock, Proposer, +}; +use inherents::{InherentData, InherentDataProviders, RuntimeString}; +use parity_codec::{Decode, Encode}; use primitives::Pair; -use inherents::{InherentDataProviders, InherentData, RuntimeString}; -use authorities::AuthoritiesApi; +use runtime_primitives::traits::{ + AuthorityIdFor, Block, Digest, DigestItem, DigestItemFor, Header, ProvideRuntimeApi, +}; +use runtime_primitives::{generic, generic::BlockId, Justification}; -use futures::{Stream, Future, IntoFuture, future}; +use futures::{future, Future, IntoFuture, Stream}; +use log::{debug, info, trace, warn}; use tokio::timer::Timeout; -use log::{warn, debug, info, trace}; use srml_aura::{ - InherentType as AuraInherent, AuraInherentData, - timestamp::{TimestampInherentData, InherentType as TimestampInherent, InherentError as TIError} + timestamp::{ + InherentError as TIError, InherentType as TimestampInherent, TimestampInherentData, + }, + AuraInherentData, InherentType as AuraInherent, +}; +use substrate_telemetry::{ + telemetry, CONSENSUS_DEBUG, CONSENSUS_INFO, CONSENSUS_TRACE, CONSENSUS_WARN, }; -use substrate_telemetry::{telemetry, CONSENSUS_TRACE, CONSENSUS_DEBUG, CONSENSUS_WARN, CONSENSUS_INFO}; -use aura_slots::{CheckedHeader, SlotWorker, SlotInfo, SlotCompatible}; +use aura_slots::{CheckedHeader, SlotCompatible, SlotInfo, SlotWorker}; -pub use aura_slots::SlotDuration; pub use aura_primitives::*; +pub use aura_slots::SlotDuration; pub use consensus_common::SyncOracle; type AuthorityId

=

::Public; @@ -71,369 +78,384 @@ type Signature

=

::Signature; /// /// Intended to be a lightweight handle such as an `Arc`. pub trait Network: Clone { - /// A stream of input messages for a topic. - type In: Stream,Error=()>; + /// A stream of input messages for a topic. + type In: Stream, Error = ()>; - /// Send a message at a specific round out. - fn send_message(&self, slot: u64, message: Vec); + /// Send a message at a specific round out. + fn send_message(&self, slot: u64, message: Vec); } /// Get slot author for given block along with authorities. fn slot_author(slot_num: u64, authorities: &[AuthorityId

]) -> Option> - where P::Public: Clone, +where + P::Public: Clone, { - if authorities.is_empty() { return None } - - let idx = slot_num % (authorities.len() as u64); - assert!(idx <= usize::max_value() as u64, - "It is impossible to have a vector with length beyond the address space; qed"); - - let current_author = authorities.get(idx as usize) - .expect("authorities not empty; index constrained to list length;\ - this is a valid index; qed") - .clone(); - - Some(current_author.clone()) + if authorities.is_empty() { + return None; + } + + let idx = slot_num % (authorities.len() as u64); + assert!( + idx <= usize::max_value() as u64, + "It is impossible to have a vector with length beyond the address space; qed" + ); + + let current_author = authorities + .get(idx as usize) + .expect( + "authorities not empty; index constrained to list length;\ + this is a valid index; qed", + ) + .clone(); + + Some(current_author.clone()) } fn duration_now() -> Option { - use std::time::SystemTime; - - let now = SystemTime::now(); - now.duration_since(SystemTime::UNIX_EPOCH).map_err(|e| { - warn!("Current time {:?} is before unix epoch. Something is wrong: {:?}", now, e); - }).ok() + use std::time::SystemTime; + + let now = SystemTime::now(); + now.duration_since(SystemTime::UNIX_EPOCH) + .map_err(|e| { + warn!( + "Current time {:?} is before unix epoch. Something is wrong: {:?}", + now, e + ); + }) + .ok() } /// Get the slot for now. fn slot_now(slot_duration: u64) -> Option { - duration_now().map(|s| s.as_secs() / slot_duration) + duration_now().map(|s| s.as_secs() / slot_duration) } fn inherent_to_common_error(err: RuntimeString) -> consensus_common::Error { - consensus_common::ErrorKind::InherentData(err.into()).into() + consensus_common::ErrorKind::InherentData(err.into()).into() } /// A digest item which is usable with aura consensus. pub trait CompatibleDigestItem: Sized { - /// Construct a digest item which contains a slot number and a signature on the - /// hash. - fn aura_seal(slot_num: u64, signature: Signature) -> Self; + /// Construct a digest item which contains a slot number and a signature on the + /// hash. + fn aura_seal(slot_num: u64, signature: Signature) -> Self; - /// If this item is an Aura seal, return the slot number and signature. - fn as_aura_seal(&self) -> Option<(u64, Signature)>; + /// If this item is an Aura seal, return the slot number and signature. + fn as_aura_seal(&self) -> Option<(u64, Signature)>; - /// Return `true` if this seal type is deprecated. Otherwise, return - /// `false`. - fn is_deprecated(&self) -> bool; + /// Return `true` if this seal type is deprecated. Otherwise, return + /// `false`. + fn is_deprecated(&self) -> bool; } impl CompatibleDigestItem

for generic::DigestItem - where P: Pair, P::Signature: Clone + Encode + Decode, +where + P: Pair, + P::Signature: Clone + Encode + Decode, { - /// Construct a digest item which is a slot number and a signature on the - /// hash. - fn aura_seal(slot_number: u64, signature: Signature

) -> Self { - generic::DigestItem::Consensus(AURA_ENGINE_ID, (slot_number, signature).encode()) - } - - /// If this item is an Aura seal, return the slot number and signature. - #[allow(deprecated)] - fn as_aura_seal(&self) -> Option<(u64, Signature

)> { - match self { - generic::DigestItem::Seal(slot, ref sig) => Some((*slot, (*sig).clone())), - generic::DigestItem::Consensus(AURA_ENGINE_ID, seal) => Decode::decode(&mut &seal[..]), - _ => None, - } - } - - #[allow(deprecated)] - fn is_deprecated(&self) -> bool { - match self { - generic::DigestItem::Seal(_, _) => true, - _ => false, - } - } + /// Construct a digest item which is a slot number and a signature on the + /// hash. + fn aura_seal(slot_number: u64, signature: Signature

) -> Self { + generic::DigestItem::Consensus(AURA_ENGINE_ID, (slot_number, signature).encode()) + } + + /// If this item is an Aura seal, return the slot number and signature. + #[allow(deprecated)] + fn as_aura_seal(&self) -> Option<(u64, Signature

)> { + match self { + generic::DigestItem::Seal(slot, ref sig) => Some((*slot, (*sig).clone())), + generic::DigestItem::Consensus(AURA_ENGINE_ID, seal) => Decode::decode(&mut &seal[..]), + _ => None, + } + } + + #[allow(deprecated)] + fn is_deprecated(&self) -> bool { + match self { + generic::DigestItem::Seal(_, _) => true, + _ => false, + } + } } struct AuraSlotCompatible; impl SlotCompatible for AuraSlotCompatible { - fn extract_timestamp_and_slot( - data: &InherentData - ) -> Result<(TimestampInherent, AuraInherent), consensus_common::Error> { - data.timestamp_inherent_data() - .and_then(|t| data.aura_inherent_data().map(|a| (t, a))) - .map_err(inherent_to_common_error) - } + fn extract_timestamp_and_slot( + data: &InherentData, + ) -> Result<(TimestampInherent, AuraInherent), consensus_common::Error> { + data.timestamp_inherent_data() + .and_then(|t| data.aura_inherent_data().map(|a| (t, a))) + .map_err(inherent_to_common_error) + } } /// Start the aura worker in a separate thread. pub fn start_aura_thread( - slot_duration: SlotDuration, - local_key: Arc

, - client: Arc, - block_import: Arc, - env: Arc, - sync_oracle: SO, - on_exit: OnExit, - inherent_data_providers: InherentDataProviders, - force_authoring: bool, -) -> Result<(), consensus_common::Error> where - B: Block + 'static, - C: ChainHead + ProvideRuntimeApi + ProvideCache + Send + Sync + 'static, - C::Api: AuthoritiesApi, - E: Environment + Send + Sync + 'static, - E::Proposer: Proposer + Send + 'static, - <>::Create as IntoFuture>::Future: Send + 'static, - I: BlockImport + Send + Sync + 'static, - Error: From + 'static, - P: Pair + Send + Sync + 'static, - P::Public: Encode + Decode + Eq + Clone + Debug + Hash + Send + Sync + 'static, - P::Signature: Encode, - SO: SyncOracle + Send + Sync + Clone + 'static, - OnExit: Future + Send + 'static, - DigestItemFor: CompatibleDigestItem

+ DigestItem> + 'static, - Error: ::std::error::Error + Send + From<::consensus_common::Error> + 'static, + slot_duration: SlotDuration, + local_key: Arc

, + client: Arc, + block_import: Arc, + env: Arc, + sync_oracle: SO, + on_exit: OnExit, + inherent_data_providers: InherentDataProviders, + force_authoring: bool, +) -> Result<(), consensus_common::Error> +where + B: Block + 'static, + C: ChainHead + ProvideRuntimeApi + ProvideCache + Send + Sync + 'static, + C::Api: AuthoritiesApi, + E: Environment + Send + Sync + 'static, + E::Proposer: Proposer + Send + 'static, + <>::Create as IntoFuture>::Future: Send + 'static, + I: BlockImport + Send + Sync + 'static, + Error: From + 'static, + P: Pair + Send + Sync + 'static, + P::Public: Encode + Decode + Eq + Clone + Debug + Hash + Send + Sync + 'static, + P::Signature: Encode, + SO: SyncOracle + Send + Sync + Clone + 'static, + OnExit: Future + Send + 'static, + DigestItemFor: CompatibleDigestItem

+ DigestItem> + 'static, + Error: ::std::error::Error + Send + From<::consensus_common::Error> + 'static, { - let worker = AuraWorker { - client: client.clone(), - block_import, - env, - local_key, - inherent_data_providers: inherent_data_providers.clone(), - sync_oracle: sync_oracle.clone(), - force_authoring, - }; - - aura_slots::start_slot_worker_thread::<_, _, _, _, AuraSlotCompatible, _>( - slot_duration, - client, - Arc::new(worker), - sync_oracle, - on_exit, - inherent_data_providers - ) + let worker = AuraWorker { + client: client.clone(), + block_import, + env, + local_key, + inherent_data_providers: inherent_data_providers.clone(), + sync_oracle: sync_oracle.clone(), + force_authoring, + }; + + aura_slots::start_slot_worker_thread::<_, _, _, _, AuraSlotCompatible, _>( + slot_duration, + client, + Arc::new(worker), + sync_oracle, + on_exit, + inherent_data_providers, + ) } /// Start the aura worker. The returned future should be run in a tokio runtime. pub fn start_aura( - slot_duration: SlotDuration, - local_key: Arc

, - client: Arc, - block_import: Arc, - env: Arc, - sync_oracle: SO, - on_exit: OnExit, - inherent_data_providers: InherentDataProviders, - force_authoring: bool, -) -> Result, consensus_common::Error> where - B: Block, - C: ChainHead + ProvideRuntimeApi + ProvideCache, - C::Api: AuthoritiesApi, - E: Environment, - E::Proposer: Proposer, - <>::Create as IntoFuture>::Future: Send + 'static, - I: BlockImport + Send + Sync + 'static, - Error: From, - P: Pair + Send + Sync + 'static, - P::Public: Hash + Eq + Send + Sync + Clone + Debug + Encode + Decode + 'static, - P::Signature: Encode, - SO: SyncOracle + Send + Sync + Clone, - DigestItemFor: CompatibleDigestItem

+ DigestItem>, - Error: ::std::error::Error + Send + 'static + From<::consensus_common::Error>, - OnExit: Future, + slot_duration: SlotDuration, + local_key: Arc

, + client: Arc, + block_import: Arc, + env: Arc, + sync_oracle: SO, + on_exit: OnExit, + inherent_data_providers: InherentDataProviders, + force_authoring: bool, +) -> Result, consensus_common::Error> +where + B: Block, + C: ChainHead + ProvideRuntimeApi + ProvideCache, + C::Api: AuthoritiesApi, + E: Environment, + E::Proposer: Proposer, + <>::Create as IntoFuture>::Future: Send + 'static, + I: BlockImport + Send + Sync + 'static, + Error: From, + P: Pair + Send + Sync + 'static, + P::Public: Hash + Eq + Send + Sync + Clone + Debug + Encode + Decode + 'static, + P::Signature: Encode, + SO: SyncOracle + Send + Sync + Clone, + DigestItemFor: CompatibleDigestItem

+ DigestItem>, + Error: ::std::error::Error + Send + 'static + From<::consensus_common::Error>, + OnExit: Future, { - let worker = AuraWorker { - client: client.clone(), - block_import, - env, - local_key, - inherent_data_providers: inherent_data_providers.clone(), - sync_oracle: sync_oracle.clone(), - force_authoring, - }; - aura_slots::start_slot_worker::<_, _, _, _, AuraSlotCompatible, _>( - slot_duration, - client, - Arc::new(worker), - sync_oracle, - on_exit, - inherent_data_providers - ) + let worker = AuraWorker { + client: client.clone(), + block_import, + env, + local_key, + inherent_data_providers: inherent_data_providers.clone(), + sync_oracle: sync_oracle.clone(), + force_authoring, + }; + aura_slots::start_slot_worker::<_, _, _, _, AuraSlotCompatible, _>( + slot_duration, + client, + Arc::new(worker), + sync_oracle, + on_exit, + inherent_data_providers, + ) } struct AuraWorker { - client: Arc, - block_import: Arc, - env: Arc, - local_key: Arc

, - sync_oracle: SO, - inherent_data_providers: InherentDataProviders, - force_authoring: bool, + client: Arc, + block_import: Arc, + env: Arc, + local_key: Arc

, + sync_oracle: SO, + inherent_data_providers: InherentDataProviders, + force_authoring: bool, } -impl SlotWorker for AuraWorker where - C: ProvideRuntimeApi + ProvideCache, - C::Api: AuthoritiesApi, - E: Environment, - E::Proposer: Proposer, - <>::Create as IntoFuture>::Future: Send + 'static, - I: BlockImport + Send + Sync + 'static, - P: Pair + Send + Sync + 'static, - P::Public: Hash + Eq + Send + Sync + Clone + Debug + Encode + Decode + 'static, - P::Signature: Encode, - Error: From, - SO: SyncOracle + Send + Clone, - DigestItemFor: CompatibleDigestItem

+ DigestItem>, - Error: ::std::error::Error + Send + 'static + From<::consensus_common::Error>, +impl SlotWorker for AuraWorker +where + C: ProvideRuntimeApi + ProvideCache, + C::Api: AuthoritiesApi, + E: Environment, + E::Proposer: Proposer, + <>::Create as IntoFuture>::Future: Send + 'static, + I: BlockImport + Send + Sync + 'static, + P: Pair + Send + Sync + 'static, + P::Public: Hash + Eq + Send + Sync + Clone + Debug + Encode + Decode + 'static, + P::Signature: Encode, + Error: From, + SO: SyncOracle + Send + Clone, + DigestItemFor: CompatibleDigestItem

+ DigestItem>, + Error: ::std::error::Error + Send + 'static + From<::consensus_common::Error>, { - type OnSlot = Box + Send>; - - fn on_start( - &self, - slot_duration: u64 - ) -> Result<(), consensus_common::Error> { - register_aura_inherent_data_provider(&self.inherent_data_providers, slot_duration) - } - - fn on_slot( - &self, - chain_head: B::Header, - slot_info: SlotInfo, - ) -> Self::OnSlot { - let pair = self.local_key.clone(); - let public_key = self.local_key.public(); - let client = self.client.clone(); - let block_import = self.block_import.clone(); - let env = self.env.clone(); - - let (timestamp, slot_num, slot_duration) = - (slot_info.timestamp, slot_info.number, slot_info.duration); - - let authorities = match authorities(client.as_ref(), &BlockId::Hash(chain_head.hash())) { - Ok(authorities) => authorities, - Err(e) => { - warn!( - "Unable to fetch authorities at block {:?}: {:?}", - chain_head.hash(), - e - ); - telemetry!(CONSENSUS_WARN; "aura.unable_fetching_authorities"; - "slot" => ?chain_head.hash(), "err" => ?e - ); - return Box::new(future::ok(())); - } - }; - - if !self.force_authoring && self.sync_oracle.is_offline() && authorities.len() > 1 { - debug!(target: "aura", "Skipping proposal slot. Waiting for the network."); - telemetry!(CONSENSUS_DEBUG; "aura.skipping_proposal_slot"; - "authorities_len" => authorities.len() - ); - return Box::new(future::ok(())); - } - let maybe_author = slot_author::

(slot_num, &authorities); - let proposal_work = match maybe_author { - None => return Box::new(future::ok(())), - Some(author) => if author == public_key { - debug!( - target: "aura", "Starting authorship at slot {}; timestamp = {}", - slot_num, - timestamp - ); - telemetry!(CONSENSUS_DEBUG; "aura.starting_authorship"; - "slot_num" => slot_num, "timestamp" => timestamp - ); - - // we are the slot author. make a block and sign it. - let proposer = match env.init(&chain_head, &authorities) { - Ok(p) => p, - Err(e) => { - warn!("Unable to author block in slot {:?}: {:?}", slot_num, e); - telemetry!(CONSENSUS_WARN; "aura.unable_authoring_block"; - "slot" => slot_num, "err" => ?e - ); - return Box::new(future::ok(())) - } - }; - - let remaining_duration = slot_info.remaining_duration(); - // deadline our production to approx. the end of the - // slot - Timeout::new( - proposer.propose(slot_info.inherent_data, remaining_duration).into_future(), - remaining_duration, - ) - } else { - return Box::new(future::ok(())); - } - }; - - Box::new( - proposal_work - .map(move |b| { - // minor hack since we don't have access to the timestamp - // that is actually set by the proposer. - let slot_after_building = slot_now(slot_duration); - if slot_after_building != Some(slot_num) { - info!( - "Discarding proposal for slot {}; block production took too long", - slot_num - ); - telemetry!(CONSENSUS_INFO; "aura.discarding_proposal_took_too_long"; - "slot" => slot_num - ); - return - } - - let (header, body) = b.deconstruct(); - let header_num = header.number().clone(); - let pre_hash = header.hash(); - let parent_hash = header.parent_hash().clone(); - - // sign the pre-sealed hash of the block and then - // add it to a digest item. - let to_sign = (slot_num, pre_hash).encode(); - let signature = pair.sign(&to_sign[..]); - let item = as CompatibleDigestItem

>::aura_seal( - slot_num, - signature, - ); - - let import_block: ImportBlock = ImportBlock { - origin: BlockOrigin::Own, - header, - justification: None, - post_digests: vec![item], - body: Some(body), - finalized: false, - auxiliary: Vec::new(), - fork_choice: ForkChoiceStrategy::LongestChain, - }; - - info!("Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.", - header_num, - import_block.post_header().hash(), - pre_hash - ); - telemetry!(CONSENSUS_INFO; "aura.pre_sealed_block"; - "header_num" => ?header_num, - "hash_now" => ?import_block.post_header().hash(), - "hash_previously" => ?pre_hash - ); - - if let Err(e) = block_import.import_block(import_block, Default::default()) { - warn!(target: "aura", "Error with block built on {:?}: {:?}", + type OnSlot = Box + Send>; + + fn on_start(&self, slot_duration: u64) -> Result<(), consensus_common::Error> { + register_aura_inherent_data_provider(&self.inherent_data_providers, slot_duration) + } + + fn on_slot(&self, chain_head: B::Header, slot_info: SlotInfo) -> Self::OnSlot { + let pair = self.local_key.clone(); + let public_key = self.local_key.public(); + let client = self.client.clone(); + let block_import = self.block_import.clone(); + let env = self.env.clone(); + + let (timestamp, slot_num, slot_duration) = + (slot_info.timestamp, slot_info.number, slot_info.duration); + + let authorities = match authorities(client.as_ref(), &BlockId::Hash(chain_head.hash())) { + Ok(authorities) => authorities, + Err(e) => { + warn!( + "Unable to fetch authorities at block {:?}: {:?}", + chain_head.hash(), + e + ); + telemetry!(CONSENSUS_WARN; "aura.unable_fetching_authorities"; + "slot" => ?chain_head.hash(), "err" => ?e + ); + return Box::new(future::ok(())); + } + }; + + if !self.force_authoring && self.sync_oracle.is_offline() && authorities.len() > 1 { + debug!(target: "aura", "Skipping proposal slot. Waiting for the network."); + telemetry!(CONSENSUS_DEBUG; "aura.skipping_proposal_slot"; + "authorities_len" => authorities.len() + ); + return Box::new(future::ok(())); + } + let maybe_author = slot_author::

(slot_num, &authorities); + let proposal_work = match maybe_author { + None => return Box::new(future::ok(())), + Some(author) => { + if author == public_key { + debug!( + target: "aura", "Starting authorship at slot {}; timestamp = {}", + slot_num, + timestamp + ); + telemetry!(CONSENSUS_DEBUG; "aura.starting_authorship"; + "slot_num" => slot_num, "timestamp" => timestamp + ); + + // we are the slot author. make a block and sign it. + let proposer = match env.init(&chain_head, &authorities) { + Ok(p) => p, + Err(e) => { + warn!("Unable to author block in slot {:?}: {:?}", slot_num, e); + telemetry!(CONSENSUS_WARN; "aura.unable_authoring_block"; + "slot" => slot_num, "err" => ?e + ); + return Box::new(future::ok(())); + } + }; + + let remaining_duration = slot_info.remaining_duration(); + // deadline our production to approx. the end of the + // slot + Timeout::new( + proposer + .propose(slot_info.inherent_data, remaining_duration) + .into_future(), + remaining_duration, + ) + } else { + return Box::new(future::ok(())); + } + } + }; + + Box::new( + proposal_work + .map(move |b| { + // minor hack since we don't have access to the timestamp + // that is actually set by the proposer. + let slot_after_building = slot_now(slot_duration); + if slot_after_building != Some(slot_num) { + info!( + "Discarding proposal for slot {}; block production took too long", + slot_num + ); + telemetry!(CONSENSUS_INFO; "aura.discarding_proposal_took_too_long"; + "slot" => slot_num + ); + return; + } + + let (header, body) = b.deconstruct(); + let header_num = header.number().clone(); + let pre_hash = header.hash(); + let parent_hash = header.parent_hash().clone(); + + // sign the pre-sealed hash of the block and then + // add it to a digest item. + let to_sign = (slot_num, pre_hash).encode(); + let signature = pair.sign(&to_sign[..]); + let item = as CompatibleDigestItem

>::aura_seal( + slot_num, signature, + ); + + let import_block: ImportBlock = ImportBlock { + origin: BlockOrigin::Own, + header, + justification: None, + post_digests: vec![item], + body: Some(body), + finalized: false, + auxiliary: Vec::new(), + fork_choice: ForkChoiceStrategy::LongestChain, + }; + + info!( + "Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.", + header_num, + import_block.post_header().hash(), + pre_hash + ); + telemetry!(CONSENSUS_INFO; "aura.pre_sealed_block"; + "header_num" => ?header_num, + "hash_now" => ?import_block.post_header().hash(), + "hash_previously" => ?pre_hash + ); + + if let Err(e) = block_import.import_block(import_block, Default::default()) { + warn!(target: "aura", "Error with block built on {:?}: {:?}", parent_hash, e); - telemetry!(CONSENSUS_WARN; "aura.err_with_block_built_on"; - "hash" => ?parent_hash, "err" => ?e - ); - } - }) - .map_err(|e| consensus_common::ErrorKind::ClientImport(format!("{:?}", e)).into()) - ) - } + telemetry!(CONSENSUS_WARN; "aura.err_with_block_built_on"; + "hash" => ?parent_hash, "err" => ?e + ); + } + }) + .map_err(|e| consensus_common::ErrorKind::ClientImport(format!("{:?}", e)).into()), + ) + } } /// check a header has been signed by the right key. If the slot is too far in the future, an error will be returned. @@ -442,126 +464,126 @@ impl SlotWorker for AuraWorker( - slot_now: u64, - mut header: B::Header, - hash: B::Hash, - authorities: &[AuthorityId

], - allow_old_seals: bool, + slot_now: u64, + mut header: B::Header, + hash: B::Hash, + authorities: &[AuthorityId

], + allow_old_seals: bool, ) -> Result, String> - where DigestItemFor: CompatibleDigestItem

, - P::Public: Clone + AsRef, - P::Signature: Decode, +where + DigestItemFor: CompatibleDigestItem

, + P::Public: Clone + AsRef, + P::Signature: Decode, { - let digest_item = match header.digest_mut().pop() { - Some(x) => x, - None => return Err(format!("Header {:?} is unsealed", hash)), - }; - - if !allow_old_seals && digest_item.is_deprecated() { - debug!(target: "aura", "Header {:?} uses old seal format, rejecting", hash); - return Err(format!("Header {:?} uses old seal format, rejecting", hash)) - } - - let (slot_num, sig) = digest_item.as_aura_seal().ok_or_else(|| { - debug!(target: "aura", "Header {:?} is unsealed", hash); - format!("Header {:?} is unsealed", hash) - })?; - - if slot_num > slot_now { - header.digest_mut().push(digest_item); - Ok(CheckedHeader::Deferred(header, slot_num)) - } else { - // check the signature is valid under the expected authority and - // chain state. - let expected_author = match slot_author::

(slot_num, &authorities) { - None => return Err("Slot Author not found".to_string()), - Some(author) => author - }; - - let pre_hash = header.hash(); - let to_sign = (slot_num, pre_hash).encode(); - let public = expected_author; - - if P::verify(&sig, &to_sign[..], public) { - Ok(CheckedHeader::Checked(header, slot_num, sig)) - } else { - Err(format!("Bad signature on {:?}", hash)) - } - } + let digest_item = match header.digest_mut().pop() { + Some(x) => x, + None => return Err(format!("Header {:?} is unsealed", hash)), + }; + + if !allow_old_seals && digest_item.is_deprecated() { + debug!(target: "aura", "Header {:?} uses old seal format, rejecting", hash); + return Err(format!("Header {:?} uses old seal format, rejecting", hash)); + } + + let (slot_num, sig) = digest_item.as_aura_seal().ok_or_else(|| { + debug!(target: "aura", "Header {:?} is unsealed", hash); + format!("Header {:?} is unsealed", hash) + })?; + + if slot_num > slot_now { + header.digest_mut().push(digest_item); + Ok(CheckedHeader::Deferred(header, slot_num)) + } else { + // check the signature is valid under the expected authority and + // chain state. + let expected_author = match slot_author::

(slot_num, &authorities) { + None => return Err("Slot Author not found".to_string()), + Some(author) => author, + }; + + let pre_hash = header.hash(); + let to_sign = (slot_num, pre_hash).encode(); + let public = expected_author; + + if P::verify(&sig, &to_sign[..], public) { + Ok(CheckedHeader::Checked(header, slot_num, sig)) + } else { + Err(format!("Bad signature on {:?}", hash)) + } + } } /// Extra verification for Aura blocks. pub trait ExtraVerification: Send + Sync { - /// Future that resolves when the block is verified or fails with error if not. - type Verified: IntoFuture; - - /// Do additional verification for this block. - fn verify( - &self, - header: &B::Header, - body: Option<&[B::Extrinsic]>, - ) -> Self::Verified; + /// Future that resolves when the block is verified or fails with error if not. + type Verified: IntoFuture; + + /// Do additional verification for this block. + fn verify(&self, header: &B::Header, body: Option<&[B::Extrinsic]>) -> Self::Verified; } /// A verifier for Aura blocks. pub struct AuraVerifier { - client: Arc, - extra: E, - phantom: PhantomData

, - inherent_data_providers: inherents::InherentDataProviders, - allow_old_seals: bool, + client: Arc, + extra: E, + phantom: PhantomData

, + inherent_data_providers: inherents::InherentDataProviders, + allow_old_seals: bool, } impl AuraVerifier - where P: Send + Sync + 'static +where + P: Send + Sync + 'static, { - fn check_inherents( - &self, - block: B, - block_id: BlockId, - inherent_data: InherentData, - timestamp_now: u64, - ) -> Result<(), String> - where C: ProvideRuntimeApi, C::Api: BlockBuilderApi - { - const MAX_TIMESTAMP_DRIFT_SECS: u64 = 60; - - let inherent_res = self.client.runtime_api().check_inherents( - &block_id, - block, - inherent_data, - ).map_err(|e| format!("{:?}", e))?; - - if !inherent_res.ok() { - inherent_res - .into_errors() - .try_for_each(|(i, e)| match TIError::try_from(&i, &e) { - Some(TIError::ValidAtTimestamp(timestamp)) => { - // halt import until timestamp is valid. - // reject when too far ahead. - if timestamp > timestamp_now + MAX_TIMESTAMP_DRIFT_SECS { - return Err("Rejecting block too far in future".into()); - } - - let diff = timestamp.saturating_sub(timestamp_now); - info!( - target: "aura", - "halting for block {} seconds in the future", - diff - ); - telemetry!(CONSENSUS_INFO; "aura.halting_for_future_block"; - "diff" => ?diff - ); - thread::sleep(Duration::from_secs(diff)); - Ok(()) - }, - Some(TIError::Other(e)) => Err(e.into()), - None => Err(self.inherent_data_providers.error_to_string(&i, &e)), - }) - } else { - Ok(()) - } - } + fn check_inherents( + &self, + block: B, + block_id: BlockId, + inherent_data: InherentData, + timestamp_now: u64, + ) -> Result<(), String> + where + C: ProvideRuntimeApi, + C::Api: BlockBuilderApi, + { + const MAX_TIMESTAMP_DRIFT_SECS: u64 = 60; + + let inherent_res = self + .client + .runtime_api() + .check_inherents(&block_id, block, inherent_data) + .map_err(|e| format!("{:?}", e))?; + + if !inherent_res.ok() { + inherent_res + .into_errors() + .try_for_each(|(i, e)| match TIError::try_from(&i, &e) { + Some(TIError::ValidAtTimestamp(timestamp)) => { + // halt import until timestamp is valid. + // reject when too far ahead. + if timestamp > timestamp_now + MAX_TIMESTAMP_DRIFT_SECS { + return Err("Rejecting block too far in future".into()); + } + + let diff = timestamp.saturating_sub(timestamp_now); + info!( + target: "aura", + "halting for block {} seconds in the future", + diff + ); + telemetry!(CONSENSUS_INFO; "aura.halting_for_future_block"; + "diff" => ?diff + ); + thread::sleep(Duration::from_secs(diff)); + Ok(()) + } + Some(TIError::Other(e)) => Err(e.into()), + None => Err(self.inherent_data_providers.error_to_string(&i, &e)), + }) + } else { + Ok(()) + } + } } /// No-op extra verification. @@ -569,135 +591,147 @@ impl AuraVerifier pub struct NothingExtra; impl ExtraVerification for NothingExtra { - type Verified = Result<(), String>; + type Verified = Result<(), String>; - fn verify(&self, _: &B::Header, _: Option<&[B::Extrinsic]>) -> Self::Verified { - Ok(()) - } + fn verify(&self, _: &B::Header, _: Option<&[B::Extrinsic]>) -> Self::Verified { + Ok(()) + } } #[forbid(deprecated)] -impl Verifier for AuraVerifier where - C: ProvideRuntimeApi + Send + Sync, - C::Api: BlockBuilderApi, - DigestItemFor: CompatibleDigestItem

+ DigestItem>, - E: ExtraVerification, - P: Pair + Send + Sync + 'static, - P::Public: Send + Sync + Hash + Eq + Clone + Decode + Encode + Debug + AsRef + 'static, - P::Signature: Encode + Decode, - Self: Authorities, +impl Verifier for AuraVerifier +where + C: ProvideRuntimeApi + Send + Sync, + C::Api: BlockBuilderApi, + DigestItemFor: CompatibleDigestItem

+ DigestItem>, + E: ExtraVerification, + P: Pair + Send + Sync + 'static, + P::Public: + Send + Sync + Hash + Eq + Clone + Decode + Encode + Debug + AsRef + 'static, + P::Signature: Encode + Decode, + Self: Authorities, { - fn verify( - &self, - origin: BlockOrigin, - header: B::Header, - justification: Option, - mut body: Option>, - ) -> Result<(ImportBlock, Option>>), String> { - let mut inherent_data = self.inherent_data_providers.create_inherent_data().map_err(String::from)?; - let (timestamp_now, slot_now) = AuraSlotCompatible::extract_timestamp_and_slot(&inherent_data) - .map_err(|e| format!("Could not extract timestamp and slot: {:?}", e))?; - let hash = header.hash(); - let parent_hash = *header.parent_hash(); - let authorities = self.authorities(&BlockId::Hash(parent_hash)) - .map_err(|e| format!("Could not fetch authorities at {:?}: {:?}", parent_hash, e))?; - - let extra_verification = self.extra.verify( - &header, - body.as_ref().map(|x| &x[..]), - ); - - // we add one to allow for some small drift. - // FIXME #1019 in the future, alter this queue to allow deferring of headers - let checked_header = check_header::( - slot_now + 1, - header, - hash, - &authorities[..], - self.allow_old_seals, - )?; - match checked_header { - CheckedHeader::Checked(pre_header, slot_num, sig) => { - let item = >::aura_seal(slot_num, sig); - - // if the body is passed through, we need to use the runtime - // to check that the internally-set timestamp in the inherents - // actually matches the slot set in the seal. - if let Some(inner_body) = body.take() { - inherent_data.aura_replace_inherent_data(slot_num); - let block = B::new(pre_header.clone(), inner_body); - - // skip the inherents verification if the runtime API is old. - if self.client - .runtime_api() - .has_api_with::, _>(&BlockId::Hash(parent_hash), |v| v >= 2) - .map_err(|e| format!("{:?}", e))? - { - self.check_inherents( - block.clone(), - BlockId::Hash(parent_hash), - inherent_data, - timestamp_now, - )?; - } - - let (_, inner_body) = block.deconstruct(); - body = Some(inner_body); - } - - trace!(target: "aura", "Checked {:?}; importing.", pre_header); - telemetry!(CONSENSUS_TRACE; "aura.checked_and_importing"; "pre_header" => ?pre_header); - - extra_verification.into_future().wait()?; - - let import_block = ImportBlock { - origin, - header: pre_header, - post_digests: vec![item], - body, - finalized: false, - justification, - auxiliary: Vec::new(), - fork_choice: ForkChoiceStrategy::LongestChain, - }; - - // FIXME #1019 extract authorities - Ok((import_block, None)) - } - CheckedHeader::Deferred(a, b) => { - debug!(target: "aura", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); - telemetry!(CONSENSUS_DEBUG; "aura.header_too_far_in_future"; - "hash" => ?hash, "a" => ?a, "b" => ?b - ); - Err(format!("Header {:?} rejected: too far in the future", hash)) - } - } - } + fn verify( + &self, + origin: BlockOrigin, + header: B::Header, + justification: Option, + mut body: Option>, + ) -> Result<(ImportBlock, Option>>), String> { + let mut inherent_data = self + .inherent_data_providers + .create_inherent_data() + .map_err(String::from)?; + let (timestamp_now, slot_now) = + AuraSlotCompatible::extract_timestamp_and_slot(&inherent_data) + .map_err(|e| format!("Could not extract timestamp and slot: {:?}", e))?; + let hash = header.hash(); + let parent_hash = *header.parent_hash(); + let authorities = self + .authorities(&BlockId::Hash(parent_hash)) + .map_err(|e| format!("Could not fetch authorities at {:?}: {:?}", parent_hash, e))?; + + let extra_verification = self.extra.verify(&header, body.as_ref().map(|x| &x[..])); + + // we add one to allow for some small drift. + // FIXME #1019 in the future, alter this queue to allow deferring of headers + let checked_header = check_header::( + slot_now + 1, + header, + hash, + &authorities[..], + self.allow_old_seals, + )?; + match checked_header { + CheckedHeader::Checked(pre_header, slot_num, sig) => { + let item = >::aura_seal(slot_num, sig); + + // if the body is passed through, we need to use the runtime + // to check that the internally-set timestamp in the inherents + // actually matches the slot set in the seal. + if let Some(inner_body) = body.take() { + inherent_data.aura_replace_inherent_data(slot_num); + let block = B::new(pre_header.clone(), inner_body); + + // skip the inherents verification if the runtime API is old. + if self + .client + .runtime_api() + .has_api_with::, _>(&BlockId::Hash(parent_hash), |v| { + v >= 2 + }) + .map_err(|e| format!("{:?}", e))? + { + self.check_inherents( + block.clone(), + BlockId::Hash(parent_hash), + inherent_data, + timestamp_now, + )?; + } + + let (_, inner_body) = block.deconstruct(); + body = Some(inner_body); + } + + trace!(target: "aura", "Checked {:?}; importing.", pre_header); + telemetry!(CONSENSUS_TRACE; "aura.checked_and_importing"; "pre_header" => ?pre_header); + + extra_verification.into_future().wait()?; + + let import_block = ImportBlock { + origin, + header: pre_header, + post_digests: vec![item], + body, + finalized: false, + justification, + auxiliary: Vec::new(), + fork_choice: ForkChoiceStrategy::LongestChain, + }; + + // FIXME #1019 extract authorities + Ok((import_block, None)) + } + CheckedHeader::Deferred(a, b) => { + debug!(target: "aura", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); + telemetry!(CONSENSUS_DEBUG; "aura.header_too_far_in_future"; + "hash" => ?hash, "a" => ?a, "b" => ?b + ); + Err(format!("Header {:?} rejected: too far in the future", hash)) + } + } + } } -impl Authorities for AuraVerifier where - B: Block, - C: ProvideRuntimeApi + ProvideCache, - C::Api: AuthoritiesApi, +impl Authorities for AuraVerifier +where + B: Block, + C: ProvideRuntimeApi + ProvideCache, + C::Api: AuthoritiesApi, { - type Error = ConsensusError; + type Error = ConsensusError; - fn authorities(&self, at: &BlockId) -> Result>, Self::Error> { - authorities(self.client.as_ref(), at) - } + fn authorities(&self, at: &BlockId) -> Result>, Self::Error> { + authorities(self.client.as_ref(), at) + } } -fn authorities(client: &C, at: &BlockId) -> Result>, ConsensusError> where - B: Block, - C: ProvideRuntimeApi + ProvideCache, - C::Api: AuthoritiesApi, +fn authorities(client: &C, at: &BlockId) -> Result>, ConsensusError> +where + B: Block, + C: ProvideRuntimeApi + ProvideCache, + C::Api: AuthoritiesApi, { - client - .cache() - .and_then(|cache| cache.get_at(&well_known_cache_keys::AUTHORITIES, at) - .and_then(|v| Decode::decode(&mut &v[..]))) - .or_else(|| client.runtime_api().authorities(at).ok()) - .ok_or_else(|| consensus_common::ErrorKind::InvalidAuthoritiesSet.into()) + client + .cache() + .and_then(|cache| { + cache + .get_at(&well_known_cache_keys::AUTHORITIES, at) + .and_then(|v| Decode::decode(&mut &v[..])) + }) + .or_else(|| client.runtime_api().authorities(at).ok()) + .ok_or_else(|| consensus_common::ErrorKind::InvalidAuthoritiesSet.into()) } /// The Aura import queue type. @@ -705,231 +739,250 @@ pub type AuraImportQueue = BasicQueue; /// Register the aura inherent data provider, if not registered already. fn register_aura_inherent_data_provider( - inherent_data_providers: &InherentDataProviders, - slot_duration: u64, + inherent_data_providers: &InherentDataProviders, + slot_duration: u64, ) -> Result<(), consensus_common::Error> { - if !inherent_data_providers.has_provider(&srml_aura::INHERENT_IDENTIFIER) { - inherent_data_providers - .register_provider(srml_aura::InherentDataProvider::new(slot_duration)) - .map_err(inherent_to_common_error) - } else { - Ok(()) - } + if !inherent_data_providers.has_provider(&srml_aura::INHERENT_IDENTIFIER) { + inherent_data_providers + .register_provider(srml_aura::InherentDataProvider::new(slot_duration)) + .map_err(inherent_to_common_error) + } else { + Ok(()) + } } /// Start an import queue for the Aura consensus algorithm. pub fn import_queue( - slot_duration: SlotDuration, - block_import: SharedBlockImport, - justification_import: Option>, - client: Arc, - extra: E, - inherent_data_providers: InherentDataProviders, - allow_old_seals: bool, -) -> Result, consensus_common::Error> where - B: Block, - C: 'static + ProvideRuntimeApi + ProvideCache + Send + Sync, - C::Api: BlockBuilderApi + AuthoritiesApi, - DigestItemFor: CompatibleDigestItem

+ DigestItem>, - E: 'static + ExtraVerification, - P: Pair + Send + Sync + 'static, - P::Public: Clone + Eq + Send + Sync + Hash + Debug + Encode + Decode + AsRef, - P::Signature: Encode + Decode, + slot_duration: SlotDuration, + block_import: SharedBlockImport, + justification_import: Option>, + client: Arc, + extra: E, + inherent_data_providers: InherentDataProviders, + allow_old_seals: bool, +) -> Result, consensus_common::Error> +where + B: Block, + C: 'static + ProvideRuntimeApi + ProvideCache + Send + Sync, + C::Api: BlockBuilderApi + AuthoritiesApi, + DigestItemFor: CompatibleDigestItem

+ DigestItem>, + E: 'static + ExtraVerification, + P: Pair + Send + Sync + 'static, + P::Public: Clone + Eq + Send + Sync + Hash + Debug + Encode + Decode + AsRef, + P::Signature: Encode + Decode, { - register_aura_inherent_data_provider(&inherent_data_providers, slot_duration.get())?; - - let verifier = Arc::new( - AuraVerifier { - client: client.clone(), - extra, - inherent_data_providers, - phantom: PhantomData, - allow_old_seals, - } - ); - Ok(BasicQueue::new(verifier, block_import, justification_import)) + register_aura_inherent_data_provider(&inherent_data_providers, slot_duration.get())?; + + let verifier = Arc::new(AuraVerifier { + client: client.clone(), + extra, + inherent_data_providers, + phantom: PhantomData, + allow_old_seals, + }); + Ok(BasicQueue::new( + verifier, + block_import, + justification_import, + )) } #[cfg(test)] mod tests { - use super::*; - use consensus_common::NoNetwork as DummyOracle; - use network::test::*; - use network::test::{Block as TestBlock, PeersClient}; - use runtime_primitives::traits::Block as BlockT; - use network::config::ProtocolConfig; - use parking_lot::Mutex; - use tokio::runtime::current_thread; - use keyring::ed25519::Keyring; - use primitives::ed25519; - use client::BlockchainEvents; - use test_client; - - type Error = ::client::error::Error; - - type TestClient = ::client::Client; - - struct DummyFactory(Arc); - struct DummyProposer(u64, Arc); - - impl Environment for DummyFactory { - type Proposer = DummyProposer; - type Error = Error; - - fn init(&self, parent_header: &::Header, _authorities: &[AuthorityId]) - -> Result - { - Ok(DummyProposer(parent_header.number + 1, self.0.clone())) - } - } - - impl Proposer for DummyProposer { - type Error = Error; - type Create = Result; - - fn propose(&self, _: InherentData, _: Duration) -> Result { - self.1.new_block().unwrap().bake().map_err(|e| e.into()) - } - } - - const SLOT_DURATION: u64 = 1; - const TEST_ROUTING_INTERVAL: Duration = Duration::from_millis(50); - - pub struct AuraTestNet { - peers: Vec>>, - started: bool, - } - - impl TestNetFactory for AuraTestNet { - type Specialization = DummySpecialization; - type Verifier = AuraVerifier; - type PeerData = (); - - /// Create new test network with peers and given config. - fn from_config(_config: &ProtocolConfig) -> Self { - AuraTestNet { - peers: Vec::new(), - started: false, - } - } - - fn make_verifier(&self, client: Arc, _cfg: &ProtocolConfig) - -> Arc - { - let slot_duration = SlotDuration::get_or_compute(&*client) - .expect("slot duration available"); - let inherent_data_providers = InherentDataProviders::new(); - register_aura_inherent_data_provider( - &inherent_data_providers, - slot_duration.get() - ).expect("Registers aura inherent data provider"); - - assert_eq!(slot_duration.get(), SLOT_DURATION); - Arc::new(AuraVerifier { - client, - extra: NothingExtra, - inherent_data_providers, - phantom: Default::default(), - allow_old_seals: false, - }) - } - - fn peer(&self, i: usize) -> &Peer { - &self.peers[i] - } - - fn peers(&self) -> &Vec>> { - &self.peers - } - - fn mut_peers>>)>(&mut self, closure: F) { - closure(&mut self.peers); - } - - fn started(&self) -> bool { - self.started - } - - fn set_started(&mut self, new: bool) { - self.started = new; - } - } - - #[test] - fn authoring_blocks() { - let _ = ::env_logger::try_init(); - let mut net = AuraTestNet::new(3); - - net.start(); - - let peers = &[ - (0, Keyring::Alice), - (1, Keyring::Bob), - (2, Keyring::Charlie), - ]; - - let net = Arc::new(Mutex::new(net)); - let mut import_notifications = Vec::new(); - - let mut runtime = current_thread::Runtime::new().unwrap(); - for (peer_id, key) in peers { - let client = net.lock().peer(*peer_id).client().clone(); - let environ = Arc::new(DummyFactory(client.clone())); - import_notifications.push( - client.import_notification_stream() - .take_while(|n| Ok(!(n.origin != BlockOrigin::Own && n.header.number() < &5))) - .for_each(move |_| Ok(())) - ); - - let slot_duration = SlotDuration::get_or_compute(&*client) - .expect("slot duration available"); - - let inherent_data_providers = InherentDataProviders::new(); - register_aura_inherent_data_provider( - &inherent_data_providers, slot_duration.get() - ).expect("Registers aura inherent data provider"); - - let aura = start_aura::<_, _, _, _, ed25519::Pair, _, _, _>( - slot_duration, - Arc::new(key.clone().into()), - client.clone(), - client, - environ.clone(), - DummyOracle, - futures::empty(), - inherent_data_providers, - false, - ).expect("Starts aura"); - - runtime.spawn(aura); - } - - // wait for all finalized on each. - let wait_for = ::futures::future::join_all(import_notifications) - .map(|_| ()) - .map_err(|_| ()); - - let drive_to_completion = ::tokio::timer::Interval::new_interval(TEST_ROUTING_INTERVAL) - .for_each(move |_| { - net.lock().send_import_notifications(); - net.lock().route_fast(); - Ok(()) - }) - .map(|_| ()) - .map_err(|_| ()); - - runtime.block_on(wait_for.select(drive_to_completion).map_err(|_| ())).unwrap(); - } - - #[test] - fn authorities_call_works() { - let client = test_client::new(); - - assert_eq!(client.info().unwrap().chain.best_number, 0); - assert_eq!(authorities(&client, &BlockId::Number(0)).unwrap(), vec![ - Keyring::Alice.into(), - Keyring::Bob.into(), - Keyring::Charlie.into() - ]); - } + use super::*; + use client::BlockchainEvents; + use consensus_common::NoNetwork as DummyOracle; + use keyring::ed25519::Keyring; + use network::config::ProtocolConfig; + use network::test::*; + use network::test::{Block as TestBlock, PeersClient}; + use parking_lot::Mutex; + use primitives::ed25519; + use runtime_primitives::traits::Block as BlockT; + use test_client; + use tokio::runtime::current_thread; + + type Error = ::client::error::Error; + + type TestClient = ::client::Client< + test_client::Backend, + test_client::Executor, + TestBlock, + test_client::runtime::RuntimeApi, + >; + + struct DummyFactory(Arc); + struct DummyProposer(u64, Arc); + + impl Environment for DummyFactory { + type Proposer = DummyProposer; + type Error = Error; + + fn init( + &self, + parent_header: &::Header, + _authorities: &[AuthorityId], + ) -> Result { + Ok(DummyProposer(parent_header.number + 1, self.0.clone())) + } + } + + impl Proposer for DummyProposer { + type Error = Error; + type Create = Result; + + fn propose(&self, _: InherentData, _: Duration) -> Result { + self.1.new_block().unwrap().bake().map_err(|e| e.into()) + } + } + + const SLOT_DURATION: u64 = 1; + const TEST_ROUTING_INTERVAL: Duration = Duration::from_millis(50); + + pub struct AuraTestNet { + peers: Vec>>, + started: bool, + } + + impl TestNetFactory for AuraTestNet { + type Specialization = DummySpecialization; + type Verifier = AuraVerifier; + type PeerData = (); + + /// Create new test network with peers and given config. + fn from_config(_config: &ProtocolConfig) -> Self { + AuraTestNet { + peers: Vec::new(), + started: false, + } + } + + fn make_verifier( + &self, + client: Arc, + _cfg: &ProtocolConfig, + ) -> Arc { + let slot_duration = + SlotDuration::get_or_compute(&*client).expect("slot duration available"); + let inherent_data_providers = InherentDataProviders::new(); + register_aura_inherent_data_provider(&inherent_data_providers, slot_duration.get()) + .expect("Registers aura inherent data provider"); + + assert_eq!(slot_duration.get(), SLOT_DURATION); + Arc::new(AuraVerifier { + client, + extra: NothingExtra, + inherent_data_providers, + phantom: Default::default(), + allow_old_seals: false, + }) + } + + fn peer(&self, i: usize) -> &Peer { + &self.peers[i] + } + + fn peers(&self) -> &Vec>> { + &self.peers + } + + fn mut_peers>>)>( + &mut self, + closure: F, + ) { + closure(&mut self.peers); + } + + fn started(&self) -> bool { + self.started + } + + fn set_started(&mut self, new: bool) { + self.started = new; + } + } + + #[test] + fn authoring_blocks() { + let _ = ::env_logger::try_init(); + let mut net = AuraTestNet::new(3); + + net.start(); + + let peers = &[ + (0, Keyring::Alice), + (1, Keyring::Bob), + (2, Keyring::Charlie), + ]; + + let net = Arc::new(Mutex::new(net)); + let mut import_notifications = Vec::new(); + + let mut runtime = current_thread::Runtime::new().unwrap(); + for (peer_id, key) in peers { + let client = net.lock().peer(*peer_id).client().clone(); + let environ = Arc::new(DummyFactory(client.clone())); + import_notifications.push( + client + .import_notification_stream() + .take_while(|n| Ok(!(n.origin != BlockOrigin::Own && n.header.number() < &5))) + .for_each(move |_| Ok(())), + ); + + let slot_duration = + SlotDuration::get_or_compute(&*client).expect("slot duration available"); + + let inherent_data_providers = InherentDataProviders::new(); + register_aura_inherent_data_provider(&inherent_data_providers, slot_duration.get()) + .expect("Registers aura inherent data provider"); + + let aura = start_aura::<_, _, _, _, ed25519::Pair, _, _, _>( + slot_duration, + Arc::new(key.clone().into()), + client.clone(), + client, + environ.clone(), + DummyOracle, + futures::empty(), + inherent_data_providers, + false, + ) + .expect("Starts aura"); + + runtime.spawn(aura); + } + + // wait for all finalized on each. + let wait_for = ::futures::future::join_all(import_notifications) + .map(|_| ()) + .map_err(|_| ()); + + let drive_to_completion = ::tokio::timer::Interval::new_interval(TEST_ROUTING_INTERVAL) + .for_each(move |_| { + net.lock().send_import_notifications(); + net.lock().route_fast(); + Ok(()) + }) + .map(|_| ()) + .map_err(|_| ()); + + runtime + .block_on(wait_for.select(drive_to_completion).map_err(|_| ())) + .unwrap(); + } + + #[test] + fn authorities_call_works() { + let client = test_client::new(); + + assert_eq!(client.info().unwrap().chain.best_number, 0); + assert_eq!( + authorities(&client, &BlockId::Number(0)).unwrap(), + vec![ + Keyring::Alice.into(), + Keyring::Bob.into(), + Keyring::Charlie.into() + ] + ); + } } diff --git a/core/consensus/authorities/src/lib.rs b/core/consensus/authorities/src/lib.rs index a5ad974f5b..affc730904 100644 --- a/core/consensus/authorities/src/lib.rs +++ b/core/consensus/authorities/src/lib.rs @@ -18,14 +18,14 @@ #![cfg_attr(not(feature = "std"), no_std)] -use substrate_client::decl_runtime_apis; -use runtime_primitives::traits::AuthorityIdFor; use rstd::vec::Vec; +use runtime_primitives::traits::AuthorityIdFor; +use substrate_client::decl_runtime_apis; decl_runtime_apis! { - /// Authorities API. - pub trait AuthoritiesApi { - /// Returns the authorities at the given block. - fn authorities() -> Vec>; - } + /// Authorities API. + pub trait AuthoritiesApi { + /// Returns the authorities at the given block. + fn authorities() -> Vec>; + } } diff --git a/core/consensus/common/src/block_import.rs b/core/consensus/common/src/block_import.rs index 7debe1acfe..82a5841d33 100644 --- a/core/consensus/common/src/block_import.rs +++ b/core/consensus/common/src/block_import.rs @@ -16,189 +16,187 @@ //! Block import helpers. +use crate::well_known_cache_keys; use runtime_primitives::traits::{Block as BlockT, DigestItemFor, Header as HeaderT, NumberFor}; use runtime_primitives::Justification; use std::borrow::Cow; use std::collections::HashMap; -use crate::well_known_cache_keys; /// Block import result. #[derive(Debug, PartialEq, Eq)] pub enum ImportResult { - /// Block imported. - Imported(ImportedAux), - /// Already in the blockchain. - AlreadyInChain, - /// Block or parent is known to be bad. - KnownBad, - /// Block parent is not in the chain. - UnknownParent, + /// Block imported. + Imported(ImportedAux), + /// Already in the blockchain. + AlreadyInChain, + /// Block or parent is known to be bad. + KnownBad, + /// Block parent is not in the chain. + UnknownParent, } /// Auxiliary data associated with an imported block result. #[derive(Debug, PartialEq, Eq)] pub struct ImportedAux { - /// Clear all pending justification requests. - pub clear_justification_requests: bool, - /// Request a justification for the given block. - pub needs_justification: bool, - /// Received a bad justification. - pub bad_justification: bool, + /// Clear all pending justification requests. + pub clear_justification_requests: bool, + /// Request a justification for the given block. + pub needs_justification: bool, + /// Received a bad justification. + pub bad_justification: bool, } impl Default for ImportedAux { - fn default() -> ImportedAux { - ImportedAux { - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - } - } + fn default() -> ImportedAux { + ImportedAux { + clear_justification_requests: false, + needs_justification: false, + bad_justification: false, + } + } } impl ImportResult { - /// Returns default value for `ImportResult::Imported` with both - /// `clear_justification_requests` and `needs_justification` set to false. - pub fn imported() -> ImportResult { - ImportResult::Imported(ImportedAux::default()) - } + /// Returns default value for `ImportResult::Imported` with both + /// `clear_justification_requests` and `needs_justification` set to false. + pub fn imported() -> ImportResult { + ImportResult::Imported(ImportedAux::default()) + } } /// Block data origin. #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum BlockOrigin { - /// Genesis block built into the client. - Genesis, - /// Block is part of the initial sync with the network. - NetworkInitialSync, - /// Block was broadcasted on the network. - NetworkBroadcast, - /// Block that was received from the network and validated in the consensus process. - ConsensusBroadcast, - /// Block that was collated by this node. - Own, - /// Block was imported from a file. - File, + /// Genesis block built into the client. + Genesis, + /// Block is part of the initial sync with the network. + NetworkInitialSync, + /// Block was broadcasted on the network. + NetworkBroadcast, + /// Block that was received from the network and validated in the consensus process. + ConsensusBroadcast, + /// Block that was collated by this node. + Own, + /// Block was imported from a file. + File, } /// Fork choice strategy. #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum ForkChoiceStrategy { - /// Longest chain fork choice. - LongestChain, - /// Custom fork choice rule, where true indicates the new block should be the best block. - Custom(bool), + /// Longest chain fork choice. + LongestChain, + /// Custom fork choice rule, where true indicates the new block should be the best block. + Custom(bool), } /// Data required to import a Block pub struct ImportBlock { - /// Origin of the Block - pub origin: BlockOrigin, - /// The header, without consensus post-digests applied. This should be in the same - /// state as it comes out of the runtime. - /// - /// Consensus engines which alter the header (by adding post-runtime digests) - /// should strip those off in the initial verification process and pass them - /// via the `post_digests` field. During block authorship, they should - /// not be pushed to the header directly. - /// - /// The reason for this distinction is so the header can be directly - /// re-executed in a runtime that checks digest equivalence -- the - /// post-runtime digests are pushed back on after. - pub header: Block::Header, - /// Justification provided for this block from the outside. - pub justification: Option, - /// Digest items that have been added after the runtime for external - /// work, like a consensus signature. - pub post_digests: Vec>, - /// Block's body - pub body: Option>, - /// Is this block finalized already? - /// `true` implies instant finality. - pub finalized: bool, - /// Auxiliary consensus data produced by the block. - /// Contains a list of key-value pairs. If values are `None`, the keys - /// will be deleted. - pub auxiliary: Vec<(Vec, Option>)>, - /// Fork choice strategy of this import. - pub fork_choice: ForkChoiceStrategy, + /// Origin of the Block + pub origin: BlockOrigin, + /// The header, without consensus post-digests applied. This should be in the same + /// state as it comes out of the runtime. + /// + /// Consensus engines which alter the header (by adding post-runtime digests) + /// should strip those off in the initial verification process and pass them + /// via the `post_digests` field. During block authorship, they should + /// not be pushed to the header directly. + /// + /// The reason for this distinction is so the header can be directly + /// re-executed in a runtime that checks digest equivalence -- the + /// post-runtime digests are pushed back on after. + pub header: Block::Header, + /// Justification provided for this block from the outside. + pub justification: Option, + /// Digest items that have been added after the runtime for external + /// work, like a consensus signature. + pub post_digests: Vec>, + /// Block's body + pub body: Option>, + /// Is this block finalized already? + /// `true` implies instant finality. + pub finalized: bool, + /// Auxiliary consensus data produced by the block. + /// Contains a list of key-value pairs. If values are `None`, the keys + /// will be deleted. + pub auxiliary: Vec<(Vec, Option>)>, + /// Fork choice strategy of this import. + pub fork_choice: ForkChoiceStrategy, } impl ImportBlock { - /// Deconstruct the justified header into parts. - pub fn into_inner(self) - -> ( - BlockOrigin, - ::Header, - Option, - Vec>, - Option::Extrinsic>>, - bool, - Vec<(Vec, Option>)>, - ) { - ( - self.origin, - self.header, - self.justification, - self.post_digests, - self.body, - self.finalized, - self.auxiliary, - ) - } - - /// Get a handle to full header (with post-digests applied). - pub fn post_header(&self) -> Cow { - use runtime_primitives::traits::Digest; - - if self.post_digests.is_empty() { - Cow::Borrowed(&self.header) - } else { - Cow::Owned({ - let mut hdr = self.header.clone(); - for digest_item in &self.post_digests { - hdr.digest_mut().push(digest_item.clone()); - } - - hdr - }) - } - } + /// Deconstruct the justified header into parts. + pub fn into_inner( + self, + ) -> ( + BlockOrigin, + ::Header, + Option, + Vec>, + Option::Extrinsic>>, + bool, + Vec<(Vec, Option>)>, + ) { + ( + self.origin, + self.header, + self.justification, + self.post_digests, + self.body, + self.finalized, + self.auxiliary, + ) + } + + /// Get a handle to full header (with post-digests applied). + pub fn post_header(&self) -> Cow { + use runtime_primitives::traits::Digest; + + if self.post_digests.is_empty() { + Cow::Borrowed(&self.header) + } else { + Cow::Owned({ + let mut hdr = self.header.clone(); + for digest_item in &self.post_digests { + hdr.digest_mut().push(digest_item.clone()); + } + + hdr + }) + } + } } /// Block import trait. pub trait BlockImport { - type Error: ::std::error::Error + Send + 'static; - - /// Check block preconditions. - fn check_block( - &self, - hash: B::Hash, - parent_hash: B::Hash, - ) -> Result; - - /// Import a block. - /// - /// Cached data can be accessed through the blockchain cache. - fn import_block( - &self, - block: ImportBlock, - cache: HashMap>, - ) -> Result; + type Error: ::std::error::Error + Send + 'static; + + /// Check block preconditions. + fn check_block(&self, hash: B::Hash, parent_hash: B::Hash) + -> Result; + + /// Import a block. + /// + /// Cached data can be accessed through the blockchain cache. + fn import_block( + &self, + block: ImportBlock, + cache: HashMap>, + ) -> Result; } /// Justification import trait pub trait JustificationImport { - type Error: ::std::error::Error + Send + 'static; - - /// Called by the import queue when it is started. - fn on_start(&self, _link: &crate::import_queue::Link) { } - - /// Import a Block justification and finalize the given block. - fn import_justification( - &self, - hash: B::Hash, - number: NumberFor, - justification: Justification, - ) -> Result<(), Self::Error>; + type Error: ::std::error::Error + Send + 'static; + + /// Called by the import queue when it is started. + fn on_start(&self, _link: &crate::import_queue::Link) {} + + /// Import a Block justification and finalize the given block. + fn import_justification( + &self, + hash: B::Hash, + number: NumberFor, + justification: Justification, + ) -> Result<(), Self::Error>; } diff --git a/core/consensus/common/src/error.rs b/core/consensus/common/src/error.rs index 0f1914087b..f5bcae9e69 100644 --- a/core/consensus/common/src/error.rs +++ b/core/consensus/common/src/error.rs @@ -15,95 +15,97 @@ // along with Substrate. If not, see . //! Error types in Consensus -use runtime_version::RuntimeVersion; -use error_chain::{error_chain, error_chain_processing, impl_error_chain_processed, - impl_extract_backtrace, impl_error_chain_kind}; +use error_chain::{ + error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed, + impl_extract_backtrace, +}; use primitives::ed25519::{Public, Signature}; +use runtime_version::RuntimeVersion; error_chain! { - errors { - /// Missing state at block with given descriptor. - StateUnavailable(b: String) { - description("State missing at given block."), - display("State unavailable at block {}", b), - } - - /// I/O terminated unexpectedly - IoTerminated { - description("I/O terminated unexpectedly."), - display("I/O terminated unexpectedly."), - } - - /// Unable to schedule wakeup. - FaultyTimer(e: ::tokio::timer::Error) { - description("Timer error"), - display("Timer error: {}", e), - } - - /// Error while working with inherent data. - InherentData(e: String) { - description("InherentData error"), - display("InherentData error: {}", e), - } - - /// Unable to propose a block. - CannotPropose { - description("Unable to create block proposal."), - display("Unable to create block proposal."), - } - - /// Error checking signature - InvalidSignature(s: Signature, a: Public) { - description("Message signature is invalid"), - display("Message signature {:?} by {:?} is invalid.", s, a), - } - - /// Invalid authorities set received from the runtime. - InvalidAuthoritiesSet { - description("authorities set is invalid"), - display("Current state of blockchain has invalid authorities set"), - } - - /// Account is not an authority. - InvalidAuthority(a: Public) { - description("Message sender is not a valid authority"), - display("Message sender {:?} is not a valid authority.", a), - } - - /// Authoring interface does not match the runtime. - IncompatibleAuthoringRuntime(native: RuntimeVersion, on_chain: RuntimeVersion) { - description("Authoring for current runtime is not supported"), - display("Authoring for current runtime is not supported. Native ({}) cannot author for on-chain ({}).", native, on_chain), - } - - /// Authoring interface does not match the runtime. - RuntimeVersionMissing { - description("Current runtime has no version"), - display("Authoring for current runtime is not supported since it has no version."), - } - - /// Authoring interface does not match the runtime. - NativeRuntimeMissing { - description("This build has no native runtime"), - display("Authoring in current build is not supported since it has no runtime."), - } - - /// Justification requirements not met. - InvalidJustification { - description("Invalid justification"), - display("Invalid justification."), - } - - /// Some other error. - Other(e: Box<::std::error::Error + Send>) { - description("Other error") - display("Other error: {}", e.description()) - } - - /// Error from the client while importing - ClientImport(reason: String) { - description("Import failed"), - display("Import failed: {}", reason), - } - } + errors { + /// Missing state at block with given descriptor. + StateUnavailable(b: String) { + description("State missing at given block."), + display("State unavailable at block {}", b), + } + + /// I/O terminated unexpectedly + IoTerminated { + description("I/O terminated unexpectedly."), + display("I/O terminated unexpectedly."), + } + + /// Unable to schedule wakeup. + FaultyTimer(e: ::tokio::timer::Error) { + description("Timer error"), + display("Timer error: {}", e), + } + + /// Error while working with inherent data. + InherentData(e: String) { + description("InherentData error"), + display("InherentData error: {}", e), + } + + /// Unable to propose a block. + CannotPropose { + description("Unable to create block proposal."), + display("Unable to create block proposal."), + } + + /// Error checking signature + InvalidSignature(s: Signature, a: Public) { + description("Message signature is invalid"), + display("Message signature {:?} by {:?} is invalid.", s, a), + } + + /// Invalid authorities set received from the runtime. + InvalidAuthoritiesSet { + description("authorities set is invalid"), + display("Current state of blockchain has invalid authorities set"), + } + + /// Account is not an authority. + InvalidAuthority(a: Public) { + description("Message sender is not a valid authority"), + display("Message sender {:?} is not a valid authority.", a), + } + + /// Authoring interface does not match the runtime. + IncompatibleAuthoringRuntime(native: RuntimeVersion, on_chain: RuntimeVersion) { + description("Authoring for current runtime is not supported"), + display("Authoring for current runtime is not supported. Native ({}) cannot author for on-chain ({}).", native, on_chain), + } + + /// Authoring interface does not match the runtime. + RuntimeVersionMissing { + description("Current runtime has no version"), + display("Authoring for current runtime is not supported since it has no version."), + } + + /// Authoring interface does not match the runtime. + NativeRuntimeMissing { + description("This build has no native runtime"), + display("Authoring in current build is not supported since it has no runtime."), + } + + /// Justification requirements not met. + InvalidJustification { + description("Invalid justification"), + display("Invalid justification."), + } + + /// Some other error. + Other(e: Box<::std::error::Error + Send>) { + description("Other error") + display("Other error: {}", e.description()) + } + + /// Error from the client while importing + ClientImport(reason: String) { + description("Import failed"), + display("Import failed: {}", reason), + } + } } diff --git a/core/consensus/common/src/evaluation.rs b/core/consensus/common/src/evaluation.rs index 48016b1e94..8820b50f59 100644 --- a/core/consensus/common/src/evaluation.rs +++ b/core/consensus/common/src/evaluation.rs @@ -18,63 +18,66 @@ use super::MAX_BLOCK_SIZE; +use error_chain::{ + bail, error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed, + impl_extract_backtrace, +}; use parity_codec::Encode; -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, As}; -use error_chain::{error_chain, error_chain_processing, impl_error_chain_processed, - impl_extract_backtrace, impl_error_chain_kind, bail}; +use runtime_primitives::traits::{As, Block as BlockT, Header as HeaderT}; type BlockNumber = u64; error_chain! { - errors { - BadProposalFormat { - description("Proposal provided not a block."), - display("Proposal provided not a block."), - } - WrongParentHash(expected: String, got: String) { - description("Proposal had wrong parent hash."), - display("Proposal had wrong parent hash. Expected {:?}, got {:?}", expected, got), - } - WrongNumber(expected: BlockNumber, got: BlockNumber) { - description("Proposal had wrong number."), - display("Proposal had wrong number. Expected {}, got {}", expected, got), - } - ProposalTooLarge(size: usize) { - description("Proposal exceeded the maximum size."), - display( - "Proposal exceeded the maximum size of {} by {} bytes.", - MAX_BLOCK_SIZE, size.saturating_sub(MAX_BLOCK_SIZE) - ), - } - } + errors { + BadProposalFormat { + description("Proposal provided not a block."), + display("Proposal provided not a block."), + } + WrongParentHash(expected: String, got: String) { + description("Proposal had wrong parent hash."), + display("Proposal had wrong parent hash. Expected {:?}, got {:?}", expected, got), + } + WrongNumber(expected: BlockNumber, got: BlockNumber) { + description("Proposal had wrong number."), + display("Proposal had wrong number. Expected {}, got {}", expected, got), + } + ProposalTooLarge(size: usize) { + description("Proposal exceeded the maximum size."), + display( + "Proposal exceeded the maximum size of {} by {} bytes.", + MAX_BLOCK_SIZE, size.saturating_sub(MAX_BLOCK_SIZE) + ), + } + } } /// Attempt to evaluate a substrate block as a node block, returning error /// upon any initial validity checks failing. pub fn evaluate_initial( - proposal: &Block, - parent_hash: &::Hash, - parent_number: <::Header as HeaderT>::Number, + proposal: &Block, + parent_hash: &::Hash, + parent_number: <::Header as HeaderT>::Number, ) -> Result<()> { + let encoded = Encode::encode(proposal); + let proposal = Block::decode(&mut &encoded[..]).ok_or_else(|| ErrorKind::BadProposalFormat)?; - let encoded = Encode::encode(proposal); - let proposal = Block::decode(&mut &encoded[..]) - .ok_or_else(|| ErrorKind::BadProposalFormat)?; + if encoded.len() > MAX_BLOCK_SIZE { + bail!(ErrorKind::ProposalTooLarge(encoded.len())) + } - if encoded.len() > MAX_BLOCK_SIZE { - bail!(ErrorKind::ProposalTooLarge(encoded.len())) - } + if *parent_hash != *proposal.header().parent_hash() { + bail!(ErrorKind::WrongParentHash( + format!("{:?}", *parent_hash), + format!("{:?}", proposal.header().parent_hash()) + )); + } - if *parent_hash != *proposal.header().parent_hash() { - bail!(ErrorKind::WrongParentHash( - format!("{:?}", *parent_hash), - format!("{:?}", proposal.header().parent_hash()) - )); - } + if parent_number.as_() + 1 != proposal.header().number().as_() { + bail!(ErrorKind::WrongNumber( + parent_number.as_() + 1, + proposal.header().number().as_() + )); + } - if parent_number.as_() + 1 != proposal.header().number().as_() { - bail!(ErrorKind::WrongNumber(parent_number.as_() + 1, proposal.header().number().as_())); - } - - Ok(()) + Ok(()) } diff --git a/core/consensus/common/src/import_queue.rs b/core/consensus/common/src/import_queue.rs index 7a418ae9f4..409284dd2c 100644 --- a/core/consensus/common/src/import_queue.rs +++ b/core/consensus/common/src/import_queue.rs @@ -25,7 +25,7 @@ //! instantiated simply. use crate::block_import::{ - BlockImport, BlockOrigin, ImportBlock, ImportedAux, ImportResult, JustificationImport, + BlockImport, BlockOrigin, ImportBlock, ImportResult, ImportedAux, JustificationImport, }; use crossbeam_channel::{self as channel, Receiver, Sender}; use parity_codec::Encode; @@ -33,9 +33,7 @@ use parity_codec::Encode; use std::sync::Arc; use std::thread; -use runtime_primitives::traits::{ - AuthorityIdFor, Block as BlockT, Header as HeaderT, NumberFor -}; +use runtime_primitives::traits::{AuthorityIdFor, Block as BlockT, Header as HeaderT, NumberFor}; use runtime_primitives::Justification; use crate::error::Error as ConsensusError; @@ -45,7 +43,8 @@ use parity_codec::alloc::collections::hash_map::HashMap; pub type SharedBlockImport = Arc + Send + Sync>; /// Shared justification import struct used by the queue. -pub type SharedJustificationImport = Arc + Send + Sync>; +pub type SharedJustificationImport = + Arc + Send + Sync>; /// Maps to the Origin used by the network. pub type Origin = libp2p::PeerId; @@ -53,70 +52,76 @@ pub type Origin = libp2p::PeerId; /// Block data used by the queue. #[derive(Debug, PartialEq, Eq, Clone)] pub struct IncomingBlock { - /// Block header hash. - pub hash: ::Hash, - /// Block header if requested. - pub header: Option<::Header>, - /// Block body if requested. - pub body: Option::Extrinsic>>, - /// Justification if requested. - pub justification: Option, - /// The peer, we received this from - pub origin: Option, + /// Block header hash. + pub hash: ::Hash, + /// Block header if requested. + pub header: Option<::Header>, + /// Block body if requested. + pub body: Option::Extrinsic>>, + /// Justification if requested. + pub justification: Option, + /// The peer, we received this from + pub origin: Option, } /// Verify a justification of a block pub trait Verifier: Send + Sync + Sized { - /// Verify the given data and return the ImportBlock and an optional - /// new set of validators to import. If not, err with an Error-Message - /// presented to the User in the logs. - fn verify( - &self, - origin: BlockOrigin, - header: B::Header, - justification: Option, - body: Option>, - ) -> Result<(ImportBlock, Option>>), String>; + /// Verify the given data and return the ImportBlock and an optional + /// new set of validators to import. If not, err with an Error-Message + /// presented to the User in the logs. + fn verify( + &self, + origin: BlockOrigin, + header: B::Header, + justification: Option, + body: Option>, + ) -> Result<(ImportBlock, Option>>), String>; } /// Blocks import queue API. pub trait ImportQueue: Send + Sync + ImportQueueClone { - /// Start background work for the queue as necessary. - /// - /// This is called automatically by the network service when synchronization - /// begins. - fn start(&self, _link: Box>) -> Result<(), std::io::Error> { - Ok(()) - } - /// Clears the import queue and stops importing. - fn stop(&self); - /// Import bunch of blocks. - fn import_blocks(&self, origin: BlockOrigin, blocks: Vec>); - /// Import a block justification. - fn import_justification(&self, who: Origin, hash: B::Hash, number: NumberFor, justification: Justification); + /// Start background work for the queue as necessary. + /// + /// This is called automatically by the network service when synchronization + /// begins. + fn start(&self, _link: Box>) -> Result<(), std::io::Error> { + Ok(()) + } + /// Clears the import queue and stops importing. + fn stop(&self); + /// Import bunch of blocks. + fn import_blocks(&self, origin: BlockOrigin, blocks: Vec>); + /// Import a block justification. + fn import_justification( + &self, + who: Origin, + hash: B::Hash, + number: NumberFor, + justification: Justification, + ); } pub trait ImportQueueClone { - fn clone_box(&self) -> Box>; + fn clone_box(&self) -> Box>; } impl Clone for Box> { - fn clone(&self) -> Box> { - self.clone_box() - } + fn clone(&self) -> Box> { + self.clone_box() + } } /// Interface to a basic block import queue that is importing blocks sequentially in a separate thread, /// with pluggable verification. #[derive(Clone)] pub struct BasicQueue { - sender: Sender>, + sender: Sender>, } impl ImportQueueClone for BasicQueue { - fn clone_box(&self) -> Box> { - Box::new(self.clone()) - } + fn clone_box(&self) -> Box> { + Box::new(self.clone()) + } } /// "BasicQueue" is a wrapper around a channel sender to the "BlockImporter". @@ -135,229 +140,247 @@ impl ImportQueueClone for BasicQueue { /// As long as the "BasicQueue" is not dropped, the "BlockImporter" will keep running. /// The "BlockImporter" owns a sender to the "BlockImportWorker", ensuring that the worker is kept alive until that sender is dropped. impl BasicQueue { - /// Instantiate a new basic queue, with given verifier. - pub fn new>( - verifier: Arc, - block_import: SharedBlockImport, - justification_import: Option> - ) -> Self { - let (result_sender, result_port) = channel::unbounded(); - let worker_sender = BlockImportWorker::new(result_sender, verifier, block_import); - let importer_sender = BlockImporter::new(result_port, worker_sender, justification_import); - - Self { - sender: importer_sender, - } - } + /// Instantiate a new basic queue, with given verifier. + pub fn new>( + verifier: Arc, + block_import: SharedBlockImport, + justification_import: Option>, + ) -> Self { + let (result_sender, result_port) = channel::unbounded(); + let worker_sender = BlockImportWorker::new(result_sender, verifier, block_import); + let importer_sender = BlockImporter::new(result_port, worker_sender, justification_import); + + Self { + sender: importer_sender, + } + } } impl ImportQueue for BasicQueue { - fn start(&self, link: Box>) -> Result<(), std::io::Error> { - let (sender, port) = channel::unbounded(); - let _ = self + fn start(&self, link: Box>) -> Result<(), std::io::Error> { + let (sender, port) = channel::unbounded(); + let _ = self .sender .send(BlockImportMsg::Start(link, sender)) .expect("1. self is holding a sender to the Importer, 2. Importer should handle messages while there are senders around; qed"); - port.recv().expect("1. self is holding a sender to the Importer, 2. Importer should handle messages while there are senders around; qed") - } + port.recv().expect("1. self is holding a sender to the Importer, 2. Importer should handle messages while there are senders around; qed") + } - fn stop(&self) { - let _ = self + fn stop(&self) { + let _ = self .sender .send(BlockImportMsg::Stop) .expect("1. self is holding a sender to the Importer, 2. Importer should handle messages while there are senders around; qed"); - } + } - fn import_blocks(&self, origin: BlockOrigin, blocks: Vec>) { - if blocks.is_empty() { - return; - } - let _ = self + fn import_blocks(&self, origin: BlockOrigin, blocks: Vec>) { + if blocks.is_empty() { + return; + } + let _ = self .sender .send(BlockImportMsg::ImportBlocks(origin, blocks)) .expect("1. self is holding a sender to the Importer, 2. Importer should handle messages while there are senders around; qed"); - } - - fn import_justification(&self, who: Origin, hash: B::Hash, number: NumberFor, justification: Justification) { - let _ = self + } + + fn import_justification( + &self, + who: Origin, + hash: B::Hash, + number: NumberFor, + justification: Justification, + ) { + let _ = self .sender .send(BlockImportMsg::ImportJustification(who.clone(), hash, number, justification)) .expect("1. self is holding a sender to the Importer, 2. Importer should handle messages while there are senders around; qed"); - } + } } pub enum BlockImportMsg { - ImportBlocks(BlockOrigin, Vec>), - ImportJustification(Origin, B::Hash, NumberFor, Justification), - Start(Box>, Sender>), - Stop, + ImportBlocks(BlockOrigin, Vec>), + ImportJustification(Origin, B::Hash, NumberFor, Justification), + Start(Box>, Sender>), + Stop, } pub enum BlockImportWorkerMsg { - ImportBlocks(BlockOrigin, Vec>), - Imported( - Vec<( - Result>, BlockImportError>, - B::Hash, - )>, - ), + ImportBlocks(BlockOrigin, Vec>), + Imported( + Vec<( + Result>, BlockImportError>, + B::Hash, + )>, + ), } enum ImportMsgType { - FromWorker(BlockImportWorkerMsg), - FromNetwork(BlockImportMsg), + FromWorker(BlockImportWorkerMsg), + FromNetwork(BlockImportMsg), } struct BlockImporter { - port: Receiver>, - result_port: Receiver>, - worker_sender: Sender>, - link: Option>>, - justification_import: Option>, + port: Receiver>, + result_port: Receiver>, + worker_sender: Sender>, + link: Option>>, + justification_import: Option>, } impl BlockImporter { - fn new( - result_port: Receiver>, - worker_sender: Sender>, - justification_import: Option>, - ) -> Sender> { - let (sender, port) = channel::bounded(4); - let _ = thread::Builder::new() - .name("ImportQueue".into()) - .spawn(move || { - let mut importer = BlockImporter { - port, - result_port, - worker_sender, - link: None, - justification_import, - }; - while importer.run() { - // Importing until all senders have been dropped... - } - }) - .expect("ImportQueue thread spawning failed"); - sender - } - - fn run(&mut self) -> bool { - let msg = select! { - recv(self.port) -> msg => { - match msg { - // Our sender has been dropped, quitting. - Err(_) => return false, - Ok(msg) => ImportMsgType::FromNetwork(msg) - } - }, - recv(self.result_port) -> msg => { - match msg { - Err(_) => unreachable!("1. We hold a sender to the Worker, 2. it should not quit until that sender is dropped; qed"), - Ok(msg) => ImportMsgType::FromWorker(msg), - } - } - }; - match msg { - ImportMsgType::FromNetwork(msg) => self.handle_network_msg(msg), - ImportMsgType::FromWorker(msg) => self.handle_worker_msg(msg), - } - } - - fn handle_network_msg(&mut self, msg: BlockImportMsg) -> bool { - match msg { - BlockImportMsg::ImportBlocks(origin, incoming_blocks) => { - self.handle_import_blocks(origin, incoming_blocks) - }, - BlockImportMsg::ImportJustification(who, hash, number, justification) => { - self.handle_import_justification(who, hash, number, justification) - }, - BlockImportMsg::Start(link, sender) => { - if let Some(justification_import) = self.justification_import.as_ref() { - justification_import.on_start(&*link); - } - self.link = Some(link); - let _ = sender.send(Ok(())); - }, - BlockImportMsg::Stop => return false, - } - true - } - - fn handle_worker_msg(&mut self, msg: BlockImportWorkerMsg) -> bool { - let results = match msg { - BlockImportWorkerMsg::Imported(results) => (results), - _ => unreachable!("Import Worker does not send ImportBlocks message; qed"), - }; - let mut has_error = false; - let mut hashes = vec![]; - for (result, hash) in results { - hashes.push(hash); - - if has_error { - continue; - } - - if result.is_err() { - has_error = true; - } - - let link = match self.link.as_ref() { - Some(link) => link, - None => { - trace!(target: "sync", "Received import result for {} while import-queue has no link", hash); - return true; - }, - }; - - match result { - Ok(BlockImportResult::ImportedKnown(number)) => link.block_imported(&hash, number), - Ok(BlockImportResult::ImportedUnknown(number, aux, who)) => { - link.block_imported(&hash, number); - - if aux.clear_justification_requests { - trace!(target: "sync", "Block imported clears all pending justification requests {}: {:?}", number, hash); - link.clear_justification_requests(); - } - - if aux.needs_justification { - trace!(target: "sync", "Block imported but requires justification {}: {:?}", number, hash); - link.request_justification(&hash, number); - } - - if aux.bad_justification { - if let Some(peer) = who { - link.useless_peer(peer, "Sent block with bad justification to import"); - } - } - }, - Err(BlockImportError::IncompleteHeader(who)) => { - if let Some(peer) = who { - link.note_useless_and_restart_sync(peer, "Sent block with incomplete header to import"); - } - }, - Err(BlockImportError::VerificationFailed(who, e)) => { - if let Some(peer) = who { - link.note_useless_and_restart_sync(peer, &format!("Verification failed: {}", e)); - } - }, - Err(BlockImportError::BadBlock(who)) => { - if let Some(peer) = who { - link.note_useless_and_restart_sync(peer, "Sent us a bad block"); - } - }, - Err(BlockImportError::UnknownParent) | Err(BlockImportError::Error) => { - link.restart(); - }, - }; - } - if let Some(link) = self.link.as_ref() { - link.blocks_processed(hashes, has_error); - } - true - } - - fn handle_import_justification(&self, who: Origin, hash: B::Hash, number: NumberFor, justification: Justification) { - let success = self.justification_import.as_ref().map(|justification_import| { + fn new( + result_port: Receiver>, + worker_sender: Sender>, + justification_import: Option>, + ) -> Sender> { + let (sender, port) = channel::bounded(4); + let _ = thread::Builder::new() + .name("ImportQueue".into()) + .spawn(move || { + let mut importer = BlockImporter { + port, + result_port, + worker_sender, + link: None, + justification_import, + }; + while importer.run() { + // Importing until all senders have been dropped... + } + }) + .expect("ImportQueue thread spawning failed"); + sender + } + + fn run(&mut self) -> bool { + let msg = select! { + recv(self.port) -> msg => { + match msg { + // Our sender has been dropped, quitting. + Err(_) => return false, + Ok(msg) => ImportMsgType::FromNetwork(msg) + } + }, + recv(self.result_port) -> msg => { + match msg { + Err(_) => unreachable!("1. We hold a sender to the Worker, 2. it should not quit until that sender is dropped; qed"), + Ok(msg) => ImportMsgType::FromWorker(msg), + } + } + }; + match msg { + ImportMsgType::FromNetwork(msg) => self.handle_network_msg(msg), + ImportMsgType::FromWorker(msg) => self.handle_worker_msg(msg), + } + } + + fn handle_network_msg(&mut self, msg: BlockImportMsg) -> bool { + match msg { + BlockImportMsg::ImportBlocks(origin, incoming_blocks) => { + self.handle_import_blocks(origin, incoming_blocks) + } + BlockImportMsg::ImportJustification(who, hash, number, justification) => { + self.handle_import_justification(who, hash, number, justification) + } + BlockImportMsg::Start(link, sender) => { + if let Some(justification_import) = self.justification_import.as_ref() { + justification_import.on_start(&*link); + } + self.link = Some(link); + let _ = sender.send(Ok(())); + } + BlockImportMsg::Stop => return false, + } + true + } + + fn handle_worker_msg(&mut self, msg: BlockImportWorkerMsg) -> bool { + let results = match msg { + BlockImportWorkerMsg::Imported(results) => (results), + _ => unreachable!("Import Worker does not send ImportBlocks message; qed"), + }; + let mut has_error = false; + let mut hashes = vec![]; + for (result, hash) in results { + hashes.push(hash); + + if has_error { + continue; + } + + if result.is_err() { + has_error = true; + } + + let link = match self.link.as_ref() { + Some(link) => link, + None => { + trace!(target: "sync", "Received import result for {} while import-queue has no link", hash); + return true; + } + }; + + match result { + Ok(BlockImportResult::ImportedKnown(number)) => link.block_imported(&hash, number), + Ok(BlockImportResult::ImportedUnknown(number, aux, who)) => { + link.block_imported(&hash, number); + + if aux.clear_justification_requests { + trace!(target: "sync", "Block imported clears all pending justification requests {}: {:?}", number, hash); + link.clear_justification_requests(); + } + + if aux.needs_justification { + trace!(target: "sync", "Block imported but requires justification {}: {:?}", number, hash); + link.request_justification(&hash, number); + } + + if aux.bad_justification { + if let Some(peer) = who { + link.useless_peer(peer, "Sent block with bad justification to import"); + } + } + } + Err(BlockImportError::IncompleteHeader(who)) => { + if let Some(peer) = who { + link.note_useless_and_restart_sync( + peer, + "Sent block with incomplete header to import", + ); + } + } + Err(BlockImportError::VerificationFailed(who, e)) => { + if let Some(peer) = who { + link.note_useless_and_restart_sync( + peer, + &format!("Verification failed: {}", e), + ); + } + } + Err(BlockImportError::BadBlock(who)) => { + if let Some(peer) = who { + link.note_useless_and_restart_sync(peer, "Sent us a bad block"); + } + } + Err(BlockImportError::UnknownParent) | Err(BlockImportError::Error) => { + link.restart(); + } + }; + } + if let Some(link) = self.link.as_ref() { + link.blocks_processed(hashes, has_error); + } + true + } + + fn handle_import_justification( + &self, + who: Origin, + hash: B::Hash, + number: NumberFor, + justification: Justification, + ) { + let success = self.justification_import.as_ref().map(|justification_import| { justification_import.import_justification(hash, number, justification) .map_err(|e| { debug!(target: "sync", "Justification import failed with {:?} for hash: {:?} number: {:?} coming from node: {:?}", e, hash, number, who); @@ -365,200 +388,215 @@ impl BlockImporter { }).is_ok() }).unwrap_or(false); - if let Some(link) = self.link.as_ref() { - link.justification_imported(who, &hash, number, success); - } - } + if let Some(link) = self.link.as_ref() { + link.justification_imported(who, &hash, number, success); + } + } - fn handle_import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>) { - trace!(target: "sync", "Scheduling {} blocks for import", blocks.len()); - self.worker_sender + fn handle_import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>) { + trace!(target: "sync", "Scheduling {} blocks for import", blocks.len()); + self.worker_sender .send(BlockImportWorkerMsg::ImportBlocks(origin, blocks)) .expect("1. This is holding a sender to the worker, 2. the worker should not quit while a sender is still held; qed"); - } + } } struct BlockImportWorker> { - result_sender: Sender>, - block_import: SharedBlockImport, - verifier: Arc, + result_sender: Sender>, + block_import: SharedBlockImport, + verifier: Arc, } impl> BlockImportWorker { - pub fn new( - result_sender: Sender>, - verifier: Arc, - block_import: SharedBlockImport, - ) -> Sender> { - let (sender, port) = channel::bounded(4); - let _ = thread::Builder::new() - .name("ImportQueueWorker".into()) - .spawn(move || { - let worker = BlockImportWorker { - result_sender, - verifier, - block_import, - }; - for msg in port.iter() { - // Working until all senders have been dropped... - match msg { - BlockImportWorkerMsg::ImportBlocks(origin, blocks) => { - worker.import_a_batch_of_blocks(origin, blocks) - } - _ => unreachable!("Import Worker does not receive the Imported message; qed"), - } - } - }) - .expect("ImportQueueWorker thread spawning failed"); - sender - } - - fn import_a_batch_of_blocks(&self, origin: BlockOrigin, blocks: Vec>) { - let count = blocks.len(); - let mut imported = 0; - - let blocks_range = match ( - blocks.first().and_then(|b| b.header.as_ref().map(|h| h.number())), - blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), - ) { - (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), - (Some(first), Some(_)) => format!(" ({})", first), - _ => Default::default(), - }; - - trace!(target: "sync", "Starting import of {} blocks {}", count, blocks_range); - - let mut results = vec![]; - - let mut has_error = false; - - // Blocks in the response/drain should be in ascending order. - for block in blocks { - let import_result = if has_error { - Err(BlockImportError::Error) - } else { - import_single_block( - &*self.block_import, - origin.clone(), - block.clone(), - self.verifier.clone(), - ) - }; - let was_ok = import_result.is_ok(); - results.push((import_result, block.hash)); - if was_ok { - imported += 1; - } else { - has_error = true; - } - } - - let _ = self - .result_sender - .send(BlockImportWorkerMsg::Imported(results)); - - trace!(target: "sync", "Imported {} of {}", imported, count); - } + pub fn new( + result_sender: Sender>, + verifier: Arc, + block_import: SharedBlockImport, + ) -> Sender> { + let (sender, port) = channel::bounded(4); + let _ = thread::Builder::new() + .name("ImportQueueWorker".into()) + .spawn(move || { + let worker = BlockImportWorker { + result_sender, + verifier, + block_import, + }; + for msg in port.iter() { + // Working until all senders have been dropped... + match msg { + BlockImportWorkerMsg::ImportBlocks(origin, blocks) => { + worker.import_a_batch_of_blocks(origin, blocks) + } + _ => { + unreachable!("Import Worker does not receive the Imported message; qed") + } + } + } + }) + .expect("ImportQueueWorker thread spawning failed"); + sender + } + + fn import_a_batch_of_blocks(&self, origin: BlockOrigin, blocks: Vec>) { + let count = blocks.len(); + let mut imported = 0; + + let blocks_range = match ( + blocks + .first() + .and_then(|b| b.header.as_ref().map(|h| h.number())), + blocks + .last() + .and_then(|b| b.header.as_ref().map(|h| h.number())), + ) { + (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), + (Some(first), Some(_)) => format!(" ({})", first), + _ => Default::default(), + }; + + trace!(target: "sync", "Starting import of {} blocks {}", count, blocks_range); + + let mut results = vec![]; + + let mut has_error = false; + + // Blocks in the response/drain should be in ascending order. + for block in blocks { + let import_result = if has_error { + Err(BlockImportError::Error) + } else { + import_single_block( + &*self.block_import, + origin.clone(), + block.clone(), + self.verifier.clone(), + ) + }; + let was_ok = import_result.is_ok(); + results.push((import_result, block.hash)); + if was_ok { + imported += 1; + } else { + has_error = true; + } + } + + let _ = self + .result_sender + .send(BlockImportWorkerMsg::Imported(results)); + + trace!(target: "sync", "Imported {} of {}", imported, count); + } } /// Hooks that the verification queue can use to influence the synchronization /// algorithm. pub trait Link: Send { - /// Block imported. - fn block_imported(&self, _hash: &B::Hash, _number: NumberFor) {} - /// Batch of blocks imported, with or without error. - fn blocks_processed(&self, _processed_blocks: Vec, _has_error: bool) {} - /// Justification import result. - fn justification_imported(&self, _who: Origin, _hash: &B::Hash, _number: NumberFor, _success: bool) {} - /// Clear all pending justification requests. - fn clear_justification_requests(&self) {} - /// Request a justification for the given block. - fn request_justification(&self, _hash: &B::Hash, _number: NumberFor) {} - /// Disconnect from peer. - fn useless_peer(&self, _who: Origin, _reason: &str) {} - /// Disconnect from peer and restart sync. - fn note_useless_and_restart_sync(&self, _who: Origin, _reason: &str) {} - /// Restart sync. - fn restart(&self) {} + /// Block imported. + fn block_imported(&self, _hash: &B::Hash, _number: NumberFor) {} + /// Batch of blocks imported, with or without error. + fn blocks_processed(&self, _processed_blocks: Vec, _has_error: bool) {} + /// Justification import result. + fn justification_imported( + &self, + _who: Origin, + _hash: &B::Hash, + _number: NumberFor, + _success: bool, + ) { + } + /// Clear all pending justification requests. + fn clear_justification_requests(&self) {} + /// Request a justification for the given block. + fn request_justification(&self, _hash: &B::Hash, _number: NumberFor) {} + /// Disconnect from peer. + fn useless_peer(&self, _who: Origin, _reason: &str) {} + /// Disconnect from peer and restart sync. + fn note_useless_and_restart_sync(&self, _who: Origin, _reason: &str) {} + /// Restart sync. + fn restart(&self) {} } /// Block import successful result. #[derive(Debug, PartialEq)] pub enum BlockImportResult { - /// Imported known block. - ImportedKnown(N), - /// Imported unknown block. - ImportedUnknown(N, ImportedAux, Option), + /// Imported known block. + ImportedKnown(N), + /// Imported unknown block. + ImportedUnknown(N, ImportedAux, Option), } /// Block import error. #[derive(Debug, PartialEq)] pub enum BlockImportError { - /// Block missed header, can't be imported - IncompleteHeader(Option), - /// Block verification failed, can't be imported - VerificationFailed(Option, String), - /// Block is known to be Bad - BadBlock(Option), - /// Block has an unknown parent - UnknownParent, - /// Other Error. - Error, + /// Block missed header, can't be imported + IncompleteHeader(Option), + /// Block verification failed, can't be imported + VerificationFailed(Option, String), + /// Block is known to be Bad + BadBlock(Option), + /// Block has an unknown parent + UnknownParent, + /// Other Error. + Error, } /// Single block import function. pub fn import_single_block>( - import_handle: &BlockImport, - block_origin: BlockOrigin, - block: IncomingBlock, - verifier: Arc, + import_handle: &BlockImport, + block_origin: BlockOrigin, + block: IncomingBlock, + verifier: Arc, ) -> Result>, BlockImportError> { - let peer = block.origin; - - let (header, justification) = match (block.header, block.justification) { - (Some(header), justification) => (header, justification), - (None, _) => { - if let Some(ref peer) = peer { - debug!(target: "sync", "Header {} was not provided by {} ", block.hash, peer); - } else { - debug!(target: "sync", "Header {} was not provided ", block.hash); - } - return Err(BlockImportError::IncompleteHeader(peer)) - }, - }; - - let number = header.number().clone(); - let hash = header.hash(); - let parent = header.parent_hash().clone(); - - let import_error = |e| { - match e { - Ok(ImportResult::AlreadyInChain) => { - trace!(target: "sync", "Block already in chain {}: {:?}", number, hash); - Ok(BlockImportResult::ImportedKnown(number)) - }, - Ok(ImportResult::Imported(aux)) => Ok(BlockImportResult::ImportedUnknown(number, aux, peer.clone())), - Ok(ImportResult::UnknownParent) => { - debug!(target: "sync", "Block with unknown parent {}: {:?}, parent: {:?}", number, hash, parent); - Err(BlockImportError::UnknownParent) - }, - Ok(ImportResult::KnownBad) => { - debug!(target: "sync", "Peer gave us a bad block {}: {:?}", number, hash); - Err(BlockImportError::BadBlock(peer.clone())) - }, - Err(e) => { - debug!(target: "sync", "Error importing block {}: {:?}: {:?}", number, hash, e); - Err(BlockImportError::Error) - } - } - }; - - match import_error(import_handle.check_block(hash, parent))? { - BlockImportResult::ImportedUnknown { .. } => (), - r @ _ => return Ok(r), // Any other successfull result means that the block is already imported. - } - - let (import_block, new_authorities) = verifier.verify(block_origin, header, justification, block.body) + let peer = block.origin; + + let (header, justification) = match (block.header, block.justification) { + (Some(header), justification) => (header, justification), + (None, _) => { + if let Some(ref peer) = peer { + debug!(target: "sync", "Header {} was not provided by {} ", block.hash, peer); + } else { + debug!(target: "sync", "Header {} was not provided ", block.hash); + } + return Err(BlockImportError::IncompleteHeader(peer)); + } + }; + + let number = header.number().clone(); + let hash = header.hash(); + let parent = header.parent_hash().clone(); + + let import_error = |e| match e { + Ok(ImportResult::AlreadyInChain) => { + trace!(target: "sync", "Block already in chain {}: {:?}", number, hash); + Ok(BlockImportResult::ImportedKnown(number)) + } + Ok(ImportResult::Imported(aux)) => Ok(BlockImportResult::ImportedUnknown( + number, + aux, + peer.clone(), + )), + Ok(ImportResult::UnknownParent) => { + debug!(target: "sync", "Block with unknown parent {}: {:?}, parent: {:?}", number, hash, parent); + Err(BlockImportError::UnknownParent) + } + Ok(ImportResult::KnownBad) => { + debug!(target: "sync", "Peer gave us a bad block {}: {:?}", number, hash); + Err(BlockImportError::BadBlock(peer.clone())) + } + Err(e) => { + debug!(target: "sync", "Error importing block {}: {:?}: {:?}", number, hash, e); + Err(BlockImportError::Error) + } + }; + + match import_error(import_handle.check_block(hash, parent))? { + BlockImportResult::ImportedUnknown { .. } => (), + r @ _ => return Ok(r), // Any other successfull result means that the block is already imported. + } + + let (import_block, new_authorities) = verifier.verify(block_origin, header, justification, block.body) .map_err(|msg| { if let Some(ref peer) = peer { trace!(target: "sync", "Verifying {}({}) from {} failed: {}", number, hash, peer, msg); @@ -568,117 +606,172 @@ pub fn import_single_block>( BlockImportError::VerificationFailed(peer.clone(), msg) })?; - let mut cache = HashMap::new(); - if let Some(authorities) = new_authorities { - cache.insert(crate::well_known_cache_keys::AUTHORITIES, authorities.encode()); - } + let mut cache = HashMap::new(); + if let Some(authorities) = new_authorities { + cache.insert( + crate::well_known_cache_keys::AUTHORITIES, + authorities.encode(), + ); + } - import_error(import_handle.import_block(import_block, cache)) + import_error(import_handle.import_block(import_block, cache)) } #[cfg(test)] mod tests { - use super::*; - use libp2p::PeerId; - use test_client::runtime::{Block, Hash}; - - #[derive(Debug, PartialEq)] - enum LinkMsg { - BlockImported, - Disconnected, - Restarted, - } - - #[derive(Clone)] - struct TestLink { - sender: Sender, - } - - impl TestLink { - fn new(sender: Sender) -> TestLink { - TestLink { - sender, - } - } - } - - impl Link for TestLink { - fn block_imported(&self, _hash: &Hash, _number: NumberFor) { - let _ = self.sender.send(LinkMsg::BlockImported); - } - fn useless_peer(&self, _: Origin, _: &str) { - let _ = self.sender.send(LinkMsg::Disconnected); - } - fn note_useless_and_restart_sync(&self, id: Origin, r: &str) { - self.useless_peer(id, r); - self.restart(); - } - fn restart(&self) { - let _ = self.sender.send(LinkMsg::Restarted); - } - } - - #[test] - fn process_import_result_works() { - let (result_sender, result_port) = channel::unbounded(); - let (worker_sender, _) = channel::unbounded(); - let (link_sender, link_port) = channel::unbounded(); - let importer_sender = BlockImporter::::new(result_port, worker_sender, None); - let link = TestLink::new(link_sender); - let (ack_sender, start_ack_port) = channel::bounded(4); - let _ = importer_sender.send(BlockImportMsg::Start(Box::new(link.clone()), ack_sender)); - - // Ensure the importer handles Start before any result messages. - let _ = start_ack_port.recv(); - - // Send a known - let results = vec![(Ok(BlockImportResult::ImportedKnown(Default::default())), Default::default())]; - let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap(); - assert_eq!(link_port.recv(), Ok(LinkMsg::BlockImported)); - - // Send a second known - let results = vec![(Ok(BlockImportResult::ImportedKnown(Default::default())), Default::default())]; - let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap(); - assert_eq!(link_port.recv(), Ok(LinkMsg::BlockImported)); - - // Send an unknown - let results = vec![(Ok(BlockImportResult::ImportedUnknown(Default::default(), Default::default(), None)), Default::default())]; - let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap(); - assert_eq!(link_port.recv(), Ok(LinkMsg::BlockImported)); - - // Send an unknown with peer and bad justification - let peer_id = PeerId::random(); - let results = vec![(Ok(BlockImportResult::ImportedUnknown(Default::default(), - ImportedAux { needs_justification: true, clear_justification_requests: false, bad_justification: true }, - Some(peer_id.clone()))), Default::default())]; - let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap(); - assert_eq!(link_port.recv(), Ok(LinkMsg::BlockImported)); - assert_eq!(link_port.recv(), Ok(LinkMsg::Disconnected)); - - // Send an incomplete header - let results = vec![(Err(BlockImportError::IncompleteHeader(Some(peer_id.clone()))), Default::default())]; - let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap(); - assert_eq!(link_port.recv(), Ok(LinkMsg::Disconnected)); - assert_eq!(link_port.recv(), Ok(LinkMsg::Restarted)); - - // Send an unknown parent - let results = vec![(Err(BlockImportError::UnknownParent), Default::default())]; - let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap(); - assert_eq!(link_port.recv(), Ok(LinkMsg::Restarted)); - - // Send a verification failed - let results = vec![(Err(BlockImportError::VerificationFailed(Some(peer_id.clone()), String::new())), Default::default())]; - let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap(); - assert_eq!(link_port.recv(), Ok(LinkMsg::Disconnected)); - assert_eq!(link_port.recv(), Ok(LinkMsg::Restarted)); - - // Send an error - let results = vec![(Err(BlockImportError::Error), Default::default())]; - let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap(); - assert_eq!(link_port.recv(), Ok(LinkMsg::Restarted)); - - // Drop the importer sender first, ensuring graceful shutdown. - drop(importer_sender); - } + use super::*; + use libp2p::PeerId; + use test_client::runtime::{Block, Hash}; + + #[derive(Debug, PartialEq)] + enum LinkMsg { + BlockImported, + Disconnected, + Restarted, + } + + #[derive(Clone)] + struct TestLink { + sender: Sender, + } + + impl TestLink { + fn new(sender: Sender) -> TestLink { + TestLink { sender } + } + } + + impl Link for TestLink { + fn block_imported(&self, _hash: &Hash, _number: NumberFor) { + let _ = self.sender.send(LinkMsg::BlockImported); + } + fn useless_peer(&self, _: Origin, _: &str) { + let _ = self.sender.send(LinkMsg::Disconnected); + } + fn note_useless_and_restart_sync(&self, id: Origin, r: &str) { + self.useless_peer(id, r); + self.restart(); + } + fn restart(&self) { + let _ = self.sender.send(LinkMsg::Restarted); + } + } + + #[test] + fn process_import_result_works() { + let (result_sender, result_port) = channel::unbounded(); + let (worker_sender, _) = channel::unbounded(); + let (link_sender, link_port) = channel::unbounded(); + let importer_sender = BlockImporter::::new(result_port, worker_sender, None); + let link = TestLink::new(link_sender); + let (ack_sender, start_ack_port) = channel::bounded(4); + let _ = importer_sender.send(BlockImportMsg::Start(Box::new(link.clone()), ack_sender)); + + // Ensure the importer handles Start before any result messages. + let _ = start_ack_port.recv(); + + // Send a known + let results = vec![( + Ok(BlockImportResult::ImportedKnown(Default::default())), + Default::default(), + )]; + let _ = result_sender + .send(BlockImportWorkerMsg::Imported(results)) + .ok() + .unwrap(); + assert_eq!(link_port.recv(), Ok(LinkMsg::BlockImported)); + + // Send a second known + let results = vec![( + Ok(BlockImportResult::ImportedKnown(Default::default())), + Default::default(), + )]; + let _ = result_sender + .send(BlockImportWorkerMsg::Imported(results)) + .ok() + .unwrap(); + assert_eq!(link_port.recv(), Ok(LinkMsg::BlockImported)); + + // Send an unknown + let results = vec![( + Ok(BlockImportResult::ImportedUnknown( + Default::default(), + Default::default(), + None, + )), + Default::default(), + )]; + let _ = result_sender + .send(BlockImportWorkerMsg::Imported(results)) + .ok() + .unwrap(); + assert_eq!(link_port.recv(), Ok(LinkMsg::BlockImported)); + + // Send an unknown with peer and bad justification + let peer_id = PeerId::random(); + let results = vec![( + Ok(BlockImportResult::ImportedUnknown( + Default::default(), + ImportedAux { + needs_justification: true, + clear_justification_requests: false, + bad_justification: true, + }, + Some(peer_id.clone()), + )), + Default::default(), + )]; + let _ = result_sender + .send(BlockImportWorkerMsg::Imported(results)) + .ok() + .unwrap(); + assert_eq!(link_port.recv(), Ok(LinkMsg::BlockImported)); + assert_eq!(link_port.recv(), Ok(LinkMsg::Disconnected)); + + // Send an incomplete header + let results = vec![( + Err(BlockImportError::IncompleteHeader(Some(peer_id.clone()))), + Default::default(), + )]; + let _ = result_sender + .send(BlockImportWorkerMsg::Imported(results)) + .ok() + .unwrap(); + assert_eq!(link_port.recv(), Ok(LinkMsg::Disconnected)); + assert_eq!(link_port.recv(), Ok(LinkMsg::Restarted)); + + // Send an unknown parent + let results = vec![(Err(BlockImportError::UnknownParent), Default::default())]; + let _ = result_sender + .send(BlockImportWorkerMsg::Imported(results)) + .ok() + .unwrap(); + assert_eq!(link_port.recv(), Ok(LinkMsg::Restarted)); + + // Send a verification failed + let results = vec![( + Err(BlockImportError::VerificationFailed( + Some(peer_id.clone()), + String::new(), + )), + Default::default(), + )]; + let _ = result_sender + .send(BlockImportWorkerMsg::Imported(results)) + .ok() + .unwrap(); + assert_eq!(link_port.recv(), Ok(LinkMsg::Disconnected)); + assert_eq!(link_port.recv(), Ok(LinkMsg::Restarted)); + + // Send an error + let results = vec![(Err(BlockImportError::Error), Default::default())]; + let _ = result_sender + .send(BlockImportWorkerMsg::Imported(results)) + .ok() + .unwrap(); + assert_eq!(link_port.recv(), Ok(LinkMsg::Restarted)); + + // Drop the importer sender first, ensuring graceful shutdown. + drop(importer_sender); + } } - diff --git a/core/consensus/common/src/lib.rs b/core/consensus/common/src/lib.rs index 134a34454e..39a61d5836 100644 --- a/core/consensus/common/src/lib.rs +++ b/core/consensus/common/src/lib.rs @@ -22,54 +22,59 @@ // This provides "unused" building blocks to other crates #![allow(dead_code)] - // our error-chain could potentially blow up otherwise -#![recursion_limit="128"] +#![recursion_limit = "128"] -#[macro_use] extern crate crossbeam_channel; -#[macro_use] extern crate log; +#[macro_use] +extern crate crossbeam_channel; +#[macro_use] +extern crate log; use std::sync::Arc; use std::time::Duration; -use runtime_primitives::generic::BlockId; -use runtime_primitives::traits::{AuthorityIdFor, Block}; use futures::prelude::*; pub use inherents::InherentData; +use runtime_primitives::generic::BlockId; +use runtime_primitives::traits::{AuthorityIdFor, Block}; -pub mod offline_tracker; -pub mod error; mod block_import; -pub mod import_queue; +pub mod error; pub mod evaluation; +pub mod import_queue; +pub mod offline_tracker; // block size limit. const MAX_BLOCK_SIZE: usize = 4 * 1024 * 1024 + 512; pub use self::error::{Error, ErrorKind}; pub use block_import::{ - BlockImport, BlockOrigin, ForkChoiceStrategy, ImportedAux, ImportBlock, ImportResult, JustificationImport, + BlockImport, BlockOrigin, ForkChoiceStrategy, ImportBlock, ImportResult, ImportedAux, + JustificationImport, }; /// Trait for getting the authorities at a given block. pub trait Authorities { - type Error: std::error::Error + Send + 'static; + type Error: std::error::Error + Send + 'static; - /// Get the authorities at the given block. - fn authorities(&self, at: &BlockId) -> Result>, Self::Error>; + /// Get the authorities at the given block. + fn authorities(&self, at: &BlockId) -> Result>, Self::Error>; } /// Environment producer for a Consensus instance. Creates proposer instance and communication streams. pub trait Environment { - /// The proposer type this creates. - type Proposer: Proposer; - /// Error which can occur upon creation. - type Error: From; - - /// Initialize the proposal logic on top of a specific header. Provide - /// the authorities at that header. - fn init(&self, parent_header: &B::Header, authorities: &[AuthorityIdFor]) - -> Result; + /// The proposer type this creates. + type Proposer: Proposer; + /// Error which can occur upon creation. + type Error: From; + + /// Initialize the proposal logic on top of a specific header. Provide + /// the authorities at that header. + fn init( + &self, + parent_header: &B::Header, + authorities: &[AuthorityIdFor], + ) -> Result; } /// Logic for a proposer. @@ -79,12 +84,12 @@ pub trait Environment { /// /// Proposers are generic over bits of "consensus data" which are engine-specific. pub trait Proposer { - /// Error type which can occur when proposing or evaluating. - type Error: From + ::std::fmt::Debug + 'static; - /// Future that resolves to a committed proposal. - type Create: IntoFuture; - /// Create a proposal. - fn propose(&self, inherent_data: InherentData, max_duration: Duration) -> Self::Create; + /// Error type which can occur when proposing or evaluating. + type Error: From + ::std::fmt::Debug + 'static; + /// Future that resolves to a committed proposal. + type Create: IntoFuture; + /// Create a proposal. + fn propose(&self, inherent_data: InherentData, max_duration: Duration) -> Self::Create; } /// An oracle for when major synchronization work is being undertaken. @@ -92,12 +97,12 @@ pub trait Proposer { /// Generally, consensus authoring work isn't undertaken while well behind /// the head of the chain. pub trait SyncOracle { - /// Whether the synchronization service is undergoing major sync. - /// Returns true if so. - fn is_major_syncing(&self) -> bool; - /// Whether the synchronization service is offline. - /// Returns true if so. - fn is_offline(&self) -> bool; + /// Whether the synchronization service is undergoing major sync. + /// Returns true if so. + fn is_major_syncing(&self) -> bool; + /// Whether the synchronization service is offline. + /// Returns true if so. + fn is_offline(&self) -> bool; } /// A synchronization oracle for when there is no network. @@ -105,24 +110,28 @@ pub trait SyncOracle { pub struct NoNetwork; impl SyncOracle for NoNetwork { - fn is_major_syncing(&self) -> bool { false } - fn is_offline(&self) -> bool { false } + fn is_major_syncing(&self) -> bool { + false + } + fn is_offline(&self) -> bool { + false + } } impl SyncOracle for Arc { - fn is_major_syncing(&self) -> bool { - T::is_major_syncing(&*self) - } - fn is_offline(&self) -> bool { - T::is_offline(&*self) - } + fn is_major_syncing(&self) -> bool { + T::is_major_syncing(&*self) + } + fn is_offline(&self) -> bool { + T::is_offline(&*self) + } } /// A list of all well known keys in the cache. pub mod well_known_cache_keys { - /// The type representing cache keys. - pub type Id = [u8; 4]; + /// The type representing cache keys. + pub type Id = [u8; 4]; - /// A list of authorities. - pub const AUTHORITIES: Id = *b"auth"; + /// A list of authorities. + pub const AUTHORITIES: Id = *b"auth"; } diff --git a/core/consensus/common/src/offline_tracker.rs b/core/consensus/common/src/offline_tracker.rs index 3c6755d941..2d0b5a0da3 100644 --- a/core/consensus/common/src/offline_tracker.rs +++ b/core/consensus/common/src/offline_tracker.rs @@ -17,120 +17,134 @@ //! Tracks offline validators. use std::collections::HashMap; -use std::time::{Instant, Duration}; +use std::time::{Duration, Instant}; // time before we report a validator. const REPORT_TIME: Duration = Duration::from_secs(60 * 5); struct Observed { - last_round_end: Instant, - offline_since: Instant, + last_round_end: Instant, + offline_since: Instant, } impl Observed { - fn new() -> Observed { - let now = Instant::now(); - Observed { - last_round_end: now, - offline_since: now, - } - } - - fn note_round_end(&mut self, was_online: bool) { - let now = Instant::now(); - - self.last_round_end = now; - if was_online { - self.offline_since = now; - } - } - - fn is_active(&self) -> bool { - // can happen if clocks are not monotonic - if self.offline_since > self.last_round_end { return true } - self.last_round_end.duration_since(self.offline_since) < REPORT_TIME - } + fn new() -> Observed { + let now = Instant::now(); + Observed { + last_round_end: now, + offline_since: now, + } + } + + fn note_round_end(&mut self, was_online: bool) { + let now = Instant::now(); + + self.last_round_end = now; + if was_online { + self.offline_since = now; + } + } + + fn is_active(&self) -> bool { + // can happen if clocks are not monotonic + if self.offline_since > self.last_round_end { + return true; + } + self.last_round_end.duration_since(self.offline_since) < REPORT_TIME + } } /// Tracks offline validators and can issue a report for those offline. pub struct OfflineTracker { - observed: HashMap, + observed: HashMap, } impl OfflineTracker { - /// Create a new tracker. - pub fn new() -> Self { - OfflineTracker { observed: HashMap::new() } - } - - /// Note new consensus is starting with the given set of validators. - pub fn note_new_block(&mut self, validators: &[AuthorityId]) { - use std::collections::HashSet; - - let set: HashSet<_> = validators.iter().cloned().collect(); - self.observed.retain(|k, _| set.contains(k)); - } - - /// Note that a round has ended. - pub fn note_round_end(&mut self, validator: AuthorityId, was_online: bool) { - self.observed.entry(validator) - .or_insert_with(Observed::new) - .note_round_end(was_online); - } - - /// Generate a vector of indices for offline account IDs. - pub fn reports(&self, validators: &[AuthorityId]) -> Vec { - validators.iter() - .enumerate() - .filter_map(|(i, v)| if self.is_online(v) { - None - } else { - Some(i as u32) - }) - .collect() - } - - /// Whether reports on a validator set are consistent with our view of things. - pub fn check_consistency(&self, validators: &[AuthorityId], reports: &[u32]) -> bool { - reports.iter().cloned().all(|r| { - let v = match validators.get(r as usize) { - Some(v) => v, - None => return false, - }; - - // we must think all validators reported externally are offline. - let thinks_online = self.is_online(v); - !thinks_online - }) - } - - fn is_online(&self, v: &AuthorityId) -> bool { - self.observed.get(v).map(Observed::is_active).unwrap_or(true) - } + /// Create a new tracker. + pub fn new() -> Self { + OfflineTracker { + observed: HashMap::new(), + } + } + + /// Note new consensus is starting with the given set of validators. + pub fn note_new_block(&mut self, validators: &[AuthorityId]) { + use std::collections::HashSet; + + let set: HashSet<_> = validators.iter().cloned().collect(); + self.observed.retain(|k, _| set.contains(k)); + } + + /// Note that a round has ended. + pub fn note_round_end(&mut self, validator: AuthorityId, was_online: bool) { + self.observed + .entry(validator) + .or_insert_with(Observed::new) + .note_round_end(was_online); + } + + /// Generate a vector of indices for offline account IDs. + pub fn reports(&self, validators: &[AuthorityId]) -> Vec { + validators + .iter() + .enumerate() + .filter_map(|(i, v)| { + if self.is_online(v) { + None + } else { + Some(i as u32) + } + }) + .collect() + } + + /// Whether reports on a validator set are consistent with our view of things. + pub fn check_consistency(&self, validators: &[AuthorityId], reports: &[u32]) -> bool { + reports.iter().cloned().all(|r| { + let v = match validators.get(r as usize) { + Some(v) => v, + None => return false, + }; + + // we must think all validators reported externally are offline. + let thinks_online = self.is_online(v); + !thinks_online + }) + } + + fn is_online(&self, v: &AuthorityId) -> bool { + self.observed + .get(v) + .map(Observed::is_active) + .unwrap_or(true) + } } #[cfg(test)] mod tests { - use super::*; - use primitives::ed25519::Public as AuthorityId; - - #[test] - fn validator_offline() { - let mut tracker = OfflineTracker::::new(); - let v = AuthorityId::from_raw([0; 32]); - let v2 = AuthorityId::from_raw([1; 32]); - let v3 = AuthorityId::from_raw([2; 32]); - tracker.note_round_end(v.clone(), true); - tracker.note_round_end(v2.clone(), true); - tracker.note_round_end(v3.clone(), true); - - let slash_time = REPORT_TIME + Duration::from_secs(5); - tracker.observed.get_mut(&v).unwrap().offline_since -= slash_time; - tracker.observed.get_mut(&v2).unwrap().offline_since -= slash_time; - - assert_eq!(tracker.reports(&[v.clone(), v2.clone(), v3.clone()]), vec![0, 1]); - - tracker.note_new_block(&[v.clone(), v3.clone()]); - assert_eq!(tracker.reports(&[v, v2, v3]), vec![0]); - } + use super::*; + use primitives::ed25519::Public as AuthorityId; + + #[test] + fn validator_offline() { + let mut tracker = OfflineTracker::::new(); + let v = AuthorityId::from_raw([0; 32]); + let v2 = AuthorityId::from_raw([1; 32]); + let v3 = AuthorityId::from_raw([2; 32]); + tracker.note_round_end(v.clone(), true); + tracker.note_round_end(v2.clone(), true); + tracker.note_round_end(v3.clone(), true); + + let slash_time = REPORT_TIME + Duration::from_secs(5); + tracker.observed.get_mut(&v).unwrap().offline_since -= slash_time; + tracker.observed.get_mut(&v2).unwrap().offline_since -= slash_time; + + assert_eq!( + tracker.reports(&[v.clone(), v2.clone(), v3.clone()]), + vec![0, 1] + ); + + tracker.note_new_block(&[v.clone(), v3.clone()]); + assert_eq!(tracker.reports(&[v, v2, v3]), vec![0]); + } } diff --git a/core/consensus/rhd/src/error.rs b/core/consensus/rhd/src/error.rs index 3808110975..563370a888 100644 --- a/core/consensus/rhd/src/error.rs +++ b/core/consensus/rhd/src/error.rs @@ -15,45 +15,47 @@ // along with Substrate. If not, see . //! Error types in the rhododendron Consensus service. +use client; use consensus::error::{Error as CommonError, ErrorKind as CommonErrorKind}; +use error_chain::{ + error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed, + impl_extract_backtrace, +}; use primitives::AuthorityId; -use client; -use error_chain::{error_chain, error_chain_processing, impl_error_chain_processed, - impl_extract_backtrace, impl_error_chain_kind}; error_chain! { - links { - Client(client::error::Error, client::error::ErrorKind); - Common(CommonError, CommonErrorKind); - } - errors { - NotValidator(id: AuthorityId) { - description("Local account ID not a validator at this block."), - display("Local account ID ({:?}) not a validator at this block.", id), - } - PrematureDestruction { - description("Proposer destroyed before finishing proposing or evaluating"), - display("Proposer destroyed before finishing proposing or evaluating"), - } - Timer(e: ::tokio::timer::Error) { - description("Failed to register or resolve async timer."), - display("Timer failed: {}", e), - } - Executor(e: ::futures::future::ExecuteErrorKind) { - description("Unable to dispatch agreement future"), - display("Unable to dispatch agreement future: {:?}", e), - } - } + links { + Client(client::error::Error, client::error::ErrorKind); + Common(CommonError, CommonErrorKind); + } + errors { + NotValidator(id: AuthorityId) { + description("Local account ID not a validator at this block."), + display("Local account ID ({:?}) not a validator at this block.", id), + } + PrematureDestruction { + description("Proposer destroyed before finishing proposing or evaluating"), + display("Proposer destroyed before finishing proposing or evaluating"), + } + Timer(e: ::tokio::timer::Error) { + description("Failed to register or resolve async timer."), + display("Timer failed: {}", e), + } + Executor(e: ::futures::future::ExecuteErrorKind) { + description("Unable to dispatch agreement future"), + display("Unable to dispatch agreement future: {:?}", e), + } + } } impl From<::rhododendron::InputStreamConcluded> for Error { - fn from(_: ::rhododendron::InputStreamConcluded) -> Self { - CommonErrorKind::IoTerminated.into() - } + fn from(_: ::rhododendron::InputStreamConcluded) -> Self { + CommonErrorKind::IoTerminated.into() + } } impl From for Error { - fn from(e: CommonErrorKind) -> Self { - CommonError::from(e).into() - } + fn from(e: CommonErrorKind) -> Self { + CommonError::from(e).into() + } } diff --git a/core/consensus/rhd/src/lib.rs b/core/consensus/rhd/src/lib.rs index cbdf95d987..d49b5f911e 100644 --- a/core/consensus/rhd/src/lib.rs +++ b/core/consensus/rhd/src/lib.rs @@ -30,41 +30,43 @@ //! In general, this future should be pre-empted by the import of a justification //! set for this block height. -#![cfg(feature="rhd")] +#![cfg(feature = "rhd")] // FIXME #1020 doesn't compile -use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; -use std::time::{self, Instant, Duration}; +use std::sync::Arc; +use std::time::{self, Duration, Instant}; -use parity_codec::{Decode, Encode}; +use client::runtime_api::{BlockBuilder as BlockBuilderAPI, BlockBuilderError, Core, OldTxQueue}; +use client::{CallExecutor, Client as SubstrateClient}; +use consensus::error::ErrorKind as CommonErrorKind; use consensus::offline_tracker::OfflineTracker; -use consensus::error::{ErrorKind as CommonErrorKind}; use consensus::{Authorities, BlockImport, Environment, Proposer as BaseProposer}; -use client::{Client as SubstrateClient, CallExecutor}; -use client::runtime_api::{Core, BlockBuilder as BlockBuilderAPI, OldTxQueue, BlockBuilderError}; -use runtime_primitives::generic::{BlockId, Era, ImportResult, ImportBlock, BlockOrigin}; +use parity_codec::{Decode, Encode}; +use primitives::{ed25519, ed25519::LocalizedSignature, AuthorityId, Blake2Hasher}; +use runtime_primitives::generic::{BlockId, BlockOrigin, Era, ImportBlock, ImportResult}; +use runtime_primitives::traits::{ + As, Block as BlockT, BlockNumberToHash, Hash as HashT, Header as HeaderT, +}; use runtime_primitives::traits::{Block, Header}; -use runtime_primitives::traits::{Block as BlockT, Hash as HashT, Header as HeaderT, As, BlockNumberToHash}; use runtime_primitives::Justification; -use primitives::{AuthorityId, ed25519, Blake2Hasher, ed25519::LocalizedSignature}; use srml_system::Trait as SystemT; use node_runtime::Runtime; use transaction_pool::txpool::{self, Pool as TransactionPool}; -use futures::prelude::*; use futures::future; +use futures::prelude::*; use futures::sync::oneshot; +use parking_lot::{Mutex, RwLock}; use tokio::runtime::TaskExecutor; use tokio::timer::Delay; -use parking_lot::{RwLock, Mutex}; +pub use self::error::{Error, ErrorKind}; pub use rhododendron::{ - self, InputStreamConcluded, AdvanceRoundReason, Message as RhdMessage, - Vote as RhdMessageVote, Communication as RhdCommunication, + self, AdvanceRoundReason, Communication as RhdCommunication, InputStreamConcluded, + Message as RhdMessage, Vote as RhdMessageVote, }; -pub use self::error::{Error, ErrorKind}; // pub mod misbehavior_check; mod error; @@ -72,9 +74,9 @@ mod service; // statuses for an agreement mod status { - pub const LIVE: usize = 0; - pub const BAD: usize = 1; - pub const GOOD: usize = 2; + pub const LIVE: usize = 0; + pub const BAD: usize = 1; + pub const GOOD: usize = 2; } pub type Timestamp = u64; @@ -82,12 +84,8 @@ pub type Timestamp = u64; pub type AccountId = ::primitives::H256; /// Localized message type. -pub type LocalizedMessage = rhododendron::LocalizedMessage< - B, - ::Hash, - AuthorityId, - LocalizedSignature ->; +pub type LocalizedMessage = + rhododendron::LocalizedMessage::Hash, AuthorityId, LocalizedSignature>; /// Justification of some hash. pub struct RhdJustification(rhododendron::Justification); @@ -100,42 +98,46 @@ pub struct PrepareJustification(rhododendron::PrepareJustification(rhododendron::UncheckedJustification); impl UncheckedJustification { - /// Create a new, unchecked justification. - pub fn new(digest: H, signatures: Vec, round_number: u32) -> Self { - UncheckedJustification(rhododendron::UncheckedJustification { - digest, - signatures, - round_number, - }) - } + /// Create a new, unchecked justification. + pub fn new(digest: H, signatures: Vec, round_number: u32) -> Self { + UncheckedJustification(rhododendron::UncheckedJustification { + digest, + signatures, + round_number, + }) + } } impl UncheckedJustification { - /// Decode a justification. - pub fn decode_justification(justification: Justification) -> Option { - let inner: rhododendron::UncheckedJustification<_, _> = Decode::decode(&mut &justification[..])?; + /// Decode a justification. + pub fn decode_justification(justification: Justification) -> Option { + let inner: rhododendron::UncheckedJustification<_, _> = + Decode::decode(&mut &justification[..])?; - Some(UncheckedJustification(inner)) - } + Some(UncheckedJustification(inner)) + } } impl Into for UncheckedJustification { - fn into(self) -> Justification { - self.0.encode() - } + fn into(self) -> Justification { + self.0.encode() + } } -impl From> for UncheckedJustification { - fn from(inner: rhododendron::UncheckedJustification) -> Self { - UncheckedJustification(inner) - } +impl From> + for UncheckedJustification +{ + fn from(inner: rhododendron::UncheckedJustification) -> Self { + UncheckedJustification(inner) + } } /// Result of a committed round of BFT pub type Committed = rhododendron::Committed::Hash, LocalizedSignature>; /// Communication between BFT participants. -pub type Communication = rhododendron::Communication::Hash, AuthorityId, LocalizedSignature>; +pub type Communication = + rhododendron::Communication::Hash, AuthorityId, LocalizedSignature>; /// Misbehavior observed from BFT participants. pub type Misbehavior = rhododendron::Misbehavior; @@ -144,68 +146,74 @@ pub type Misbehavior = rhododendron::Misbehavior; pub type SharedOfflineTracker = Arc>; /// A proposer for a rhododendron instance. This must implement the base proposer logic. -pub trait LocalProposer: BaseProposer { - /// Import witnessed rhododendron misbehavior. - fn import_misbehavior(&self, misbehavior: Vec<(AuthorityId, Misbehavior)>); +pub trait LocalProposer: BaseProposer { + /// Import witnessed rhododendron misbehavior. + fn import_misbehavior(&self, misbehavior: Vec<(AuthorityId, Misbehavior)>); - /// Determine the proposer for a given round. This should be a deterministic function - /// with consistent results across all authorities. - fn round_proposer(&self, round_number: u32, authorities: &[AuthorityId]) -> AuthorityId; + /// Determine the proposer for a given round. This should be a deterministic function + /// with consistent results across all authorities. + fn round_proposer(&self, round_number: u32, authorities: &[AuthorityId]) -> AuthorityId; - /// Hook called when a BFT round advances without a proposal. - fn on_round_end(&self, _round_number: u32, _proposed: bool) { } + /// Hook called when a BFT round advances without a proposal. + fn on_round_end(&self, _round_number: u32, _proposed: bool) {} } - /// Build new blocks. pub trait BlockBuilder { - /// Push an extrinsic onto the block. Fails if the extrinsic is invalid. - fn push_extrinsic(&mut self, extrinsic: ::Extrinsic) -> Result<(), Error>; + /// Push an extrinsic onto the block. Fails if the extrinsic is invalid. + fn push_extrinsic(&mut self, extrinsic: ::Extrinsic) -> Result<(), Error>; } /// Local client abstraction for the consensus. -pub trait AuthoringApi: - Send - + Sync - + BlockBuilderAPI<::Block, InherentData, Error=::Error> - + Core<::Block, AuthorityId, Error=::Error> - + OldTxQueue<::Block, Error=::Error> +pub trait AuthoringApi: Send + + Sync + + BlockBuilderAPI< + ::Block, + InherentData, + Error = ::Error, + > + Core< + ::Block, + AuthorityId, + Error = ::Error, + > + OldTxQueue< + ::Block, + Error = ::Error, + > { - /// The block used for this API type. - type Block: BlockT; - /// The error used by this API type. - type Error: std::error::Error; - - /// Build a block on top of the given, with inherent extrinsics pre-pushed. - fn build_block) -> ()>( - &self, - at: &BlockId, - inherent_data: InherentData, - build_ctx: F, - ) -> Result; + /// The block used for this API type. + type Block: BlockT; + /// The error used by this API type. + type Error: std::error::Error; + + /// Build a block on top of the given, with inherent extrinsics pre-pushed. + fn build_block) -> ()>( + &self, + at: &BlockId, + inherent_data: InherentData, + build_ctx: F, + ) -> Result; } /// A long-lived network which can create BFT message routing processes on demand. pub trait Network { - /// The block used for this API type. - type Block: BlockT; - /// The input stream of BFT messages. Should never logically conclude. - type Input: Stream,Error=Error>; - /// The output sink of BFT messages. Messages sent here should eventually pass to all - /// current authorities. - type Output: Sink,SinkError=Error>; - - /// Instantiate input and output streams. - fn communication_for( - &self, - validators: &[AuthorityId], - local_id: AuthorityId, - parent_hash: ::Hash, - task_executor: TaskExecutor - ) -> (Self::Input, Self::Output); + /// The block used for this API type. + type Block: BlockT; + /// The input stream of BFT messages. Should never logically conclude. + type Input: Stream, Error = Error>; + /// The output sink of BFT messages. Messages sent here should eventually pass to all + /// current authorities. + type Output: Sink, SinkError = Error>; + + /// Instantiate input and output streams. + fn communication_for( + &self, + validators: &[AuthorityId], + local_id: AuthorityId, + parent_hash: ::Hash, + task_executor: TaskExecutor, + ) -> (Self::Input, Self::Output); } - // caches the round number to start at if we end up with BFT consensus on the same // parent hash more than once (happens if block is bad). // @@ -213,246 +221,266 @@ pub trait Network { // a round advancement vote. #[derive(Debug)] struct RoundCache { - hash: Option, - start_round: u32, + hash: Option, + start_round: u32, } /// Instance of BFT agreement. struct BftInstance { - key: Arc, - authorities: Vec, - parent_hash: B::Hash, - round_timeout_multiplier: u64, - cache: Arc>>, - proposer: P, + key: Arc, + authorities: Vec, + parent_hash: B::Hash, + round_timeout_multiplier: u64, + cache: Arc>>, + proposer: P, } impl> BftInstance - where - B: Clone + Eq, - B::Hash: ::std::hash::Hash - +where + B: Clone + Eq, + B::Hash: ::std::hash::Hash, { - fn round_timeout_duration(&self, round: u32) -> Duration { - // 2^(min(6, x/8)) * 10 - // Grows exponentially starting from 10 seconds, capped at 640 seconds. - const ROUND_INCREMENT_STEP: u32 = 8; - - let round = round / ROUND_INCREMENT_STEP; - let round = ::std::cmp::min(6, round); - - let timeout = 1u64.checked_shl(round) - .unwrap_or_else(u64::max_value) - .saturating_mul(self.round_timeout_multiplier); - - Duration::from_secs(timeout) - } - - fn update_round_cache(&self, current_round: u32) { - let mut cache = self.cache.lock(); - if cache.hash.as_ref() == Some(&self.parent_hash) { - cache.start_round = current_round + 1; - } - } + fn round_timeout_duration(&self, round: u32) -> Duration { + // 2^(min(6, x/8)) * 10 + // Grows exponentially starting from 10 seconds, capped at 640 seconds. + const ROUND_INCREMENT_STEP: u32 = 8; + + let round = round / ROUND_INCREMENT_STEP; + let round = ::std::cmp::min(6, round); + + let timeout = 1u64 + .checked_shl(round) + .unwrap_or_else(u64::max_value) + .saturating_mul(self.round_timeout_multiplier); + + Duration::from_secs(timeout) + } + + fn update_round_cache(&self, current_round: u32) { + let mut cache = self.cache.lock(); + if cache.hash.as_ref() == Some(&self.parent_hash) { + cache.start_round = current_round + 1; + } + } } impl> rhododendron::Context for BftInstance - where - B: Clone + Eq, - B::Hash: ::std::hash::Hash, +where + B: Clone + Eq, + B::Hash: ::std::hash::Hash, { - type Error = P::Error; - type AuthorityId = AuthorityId; - type Digest = B::Hash; - type Signature = LocalizedSignature; - type Candidate = B; - type RoundTimeout = Box>; - type CreateProposal = ::Future; - type EvaluateProposal = ::Future; - - fn local_id(&self) -> AuthorityId { - self.key.public().into() - } - - fn proposal(&self) -> Self::CreateProposal { - self.proposer.propose().into_future() - } - - fn candidate_digest(&self, proposal: &B) -> B::Hash { - proposal.hash() - } - - fn sign_local(&self, message: RhdMessage) -> LocalizedMessage { - sign_message(message, &*self.key, self.parent_hash.clone()) - } - - fn round_proposer(&self, round: u32) -> AuthorityId { - self.proposer.round_proposer(round, &self.authorities[..]) - } - - fn proposal_valid(&self, proposal: &B) -> Self::EvaluateProposal { - self.proposer.evaluate(proposal).into_future() - } - - fn begin_round_timeout(&self, round: u32) -> Self::RoundTimeout { - let timeout = self.round_timeout_duration(round); - let fut = Delay::new(Instant::now() + timeout) - .map_err(|e| Error::from(CommonErrorKind::FaultyTimer(e))) - .map_err(Into::into); - - Box::new(fut) - } - - fn on_advance_round( - &self, - accumulator: &rhododendron::Accumulator, - round: u32, - next_round: u32, - reason: AdvanceRoundReason, - ) { - use std::collections::HashSet; - - let collect_pubkeys = |participants: HashSet<&Self::AuthorityId>| participants.into_iter() - .map(|p| ::ed25519::Public::from_raw(p.0)) - .collect::>(); - - let round_timeout = self.round_timeout_duration(next_round); - debug!(target: "rhd", "Advancing to round {} from {}", next_round, round); - debug!(target: "rhd", "Participating authorities: {:?}", + type Error = P::Error; + type AuthorityId = AuthorityId; + type Digest = B::Hash; + type Signature = LocalizedSignature; + type Candidate = B; + type RoundTimeout = Box>; + type CreateProposal = ::Future; + type EvaluateProposal = ::Future; + + fn local_id(&self) -> AuthorityId { + self.key.public().into() + } + + fn proposal(&self) -> Self::CreateProposal { + self.proposer.propose().into_future() + } + + fn candidate_digest(&self, proposal: &B) -> B::Hash { + proposal.hash() + } + + fn sign_local(&self, message: RhdMessage) -> LocalizedMessage { + sign_message(message, &*self.key, self.parent_hash.clone()) + } + + fn round_proposer(&self, round: u32) -> AuthorityId { + self.proposer.round_proposer(round, &self.authorities[..]) + } + + fn proposal_valid(&self, proposal: &B) -> Self::EvaluateProposal { + self.proposer.evaluate(proposal).into_future() + } + + fn begin_round_timeout(&self, round: u32) -> Self::RoundTimeout { + let timeout = self.round_timeout_duration(round); + let fut = Delay::new(Instant::now() + timeout) + .map_err(|e| Error::from(CommonErrorKind::FaultyTimer(e))) + .map_err(Into::into); + + Box::new(fut) + } + + fn on_advance_round( + &self, + accumulator: &rhododendron::Accumulator, + round: u32, + next_round: u32, + reason: AdvanceRoundReason, + ) { + use std::collections::HashSet; + + let collect_pubkeys = |participants: HashSet<&Self::AuthorityId>| { + participants + .into_iter() + .map(|p| ::ed25519::Public::from_raw(p.0)) + .collect::>() + }; + + let round_timeout = self.round_timeout_duration(next_round); + debug!(target: "rhd", "Advancing to round {} from {}", next_round, round); + debug!(target: "rhd", "Participating authorities: {:?}", collect_pubkeys(accumulator.participants())); - debug!(target: "rhd", "Voting authorities: {:?}", + debug!(target: "rhd", "Voting authorities: {:?}", collect_pubkeys(accumulator.voters())); - debug!(target: "rhd", "Round {} should end in at most {} seconds from now", next_round, round_timeout.as_secs()); + debug!(target: "rhd", "Round {} should end in at most {} seconds from now", next_round, round_timeout.as_secs()); - self.update_round_cache(next_round); + self.update_round_cache(next_round); - if let AdvanceRoundReason::Timeout = reason { - self.proposer.on_round_end(round, accumulator.proposal().is_some()); - } - } + if let AdvanceRoundReason::Timeout = reason { + self.proposer + .on_round_end(round, accumulator.proposal().is_some()); + } + } } /// A future that resolves either when canceled (witnessing a block from the network at same height) /// or when agreement completes. -pub struct BftFuture where - B: Block + Clone + Eq, - B::Hash: ::std::hash::Hash, - P: LocalProposer, - P: BaseProposer, - InStream: Stream, Error=Error>, - OutSink: Sink, SinkError=Error>, +pub struct BftFuture +where + B: Block + Clone + Eq, + B::Hash: ::std::hash::Hash, + P: LocalProposer, + P: BaseProposer, + InStream: Stream, Error = Error>, + OutSink: Sink, SinkError = Error>, { - inner: rhododendron::Agreement, InStream, OutSink>, - status: Arc, - cancel: oneshot::Receiver<()>, - import: Arc, + inner: rhododendron::Agreement, InStream, OutSink>, + status: Arc, + cancel: oneshot::Receiver<()>, + import: Arc, } -impl Future for BftFuture where - B: Block + Clone + Eq, - B::Hash: ::std::hash::Hash, - P: LocalProposer, - P: BaseProposer, - I: BlockImport, - InStream: Stream, Error=Error>, - OutSink: Sink, SinkError=Error>, +impl Future for BftFuture +where + B: Block + Clone + Eq, + B::Hash: ::std::hash::Hash, + P: LocalProposer, + P: BaseProposer, + I: BlockImport, + InStream: Stream, Error = Error>, + OutSink: Sink, SinkError = Error>, { - type Item = (); - type Error = (); - - fn poll(&mut self) -> ::futures::Poll<(), ()> { - // service has canceled the future. bail - let cancel = match self.cancel.poll() { - Ok(Async::Ready(())) | Err(_) => true, - Ok(Async::NotReady) => false, - }; - - let committed = match self.inner.poll().map_err(|_| ()) { - Ok(Async::Ready(x)) => x, - Ok(Async::NotReady) => - return Ok(if cancel { Async::Ready(()) } else { Async::NotReady }), - Err(()) => return Err(()), - }; - - // if something was committed, the round leader must have proposed. - self.inner.context().proposer.on_round_end(committed.round_number, true); - - // If we didn't see the proposal (very unlikely), - // we will get the block from the network later. - if let Some(justified_block) = committed.candidate { - let hash = justified_block.hash(); - info!(target: "rhd", "Importing block #{} ({}) directly from BFT consensus", + type Item = (); + type Error = (); + + fn poll(&mut self) -> ::futures::Poll<(), ()> { + // service has canceled the future. bail + let cancel = match self.cancel.poll() { + Ok(Async::Ready(())) | Err(_) => true, + Ok(Async::NotReady) => false, + }; + + let committed = match self.inner.poll().map_err(|_| ()) { + Ok(Async::Ready(x)) => x, + Ok(Async::NotReady) => { + return Ok(if cancel { + Async::Ready(()) + } else { + Async::NotReady + }); + } + Err(()) => return Err(()), + }; + + // if something was committed, the round leader must have proposed. + self.inner + .context() + .proposer + .on_round_end(committed.round_number, true); + + // If we didn't see the proposal (very unlikely), + // we will get the block from the network later. + if let Some(justified_block) = committed.candidate { + let hash = justified_block.hash(); + info!(target: "rhd", "Importing block #{} ({}) directly from BFT consensus", justified_block.header().number(), hash); - let just: Justification = UncheckedJustification(committed.justification.uncheck()).into(); - let (header, body) = justified_block.deconstruct(); - let import_block = ImportBlock { - origin: BlockOrigin::ConsensusBroadcast, - header: header, - justification: Some(just), - body: Some(body), - finalized: true, - post_digests: Default::default(), - auxiliary: Default::default() - }; - - let new_status = match self.import.import_block(import_block, None) { - Err(e) => { - warn!(target: "rhd", "Error importing block {:?} in round #{}: {:?}", + let just: Justification = + UncheckedJustification(committed.justification.uncheck()).into(); + let (header, body) = justified_block.deconstruct(); + let import_block = ImportBlock { + origin: BlockOrigin::ConsensusBroadcast, + header: header, + justification: Some(just), + body: Some(body), + finalized: true, + post_digests: Default::default(), + auxiliary: Default::default(), + }; + + let new_status = match self.import.import_block(import_block, None) { + Err(e) => { + warn!(target: "rhd", "Error importing block {:?} in round #{}: {:?}", hash, committed.round_number, e); - status::BAD - } - Ok(ImportResult::KnownBad) => { - warn!(target: "rhd", "{:?} was bad block agreed on in round #{}", + status::BAD + } + Ok(ImportResult::KnownBad) => { + warn!(target: "rhd", "{:?} was bad block agreed on in round #{}", hash, committed.round_number); - status::BAD - } - _ => status::GOOD - }; - - self.status.store(new_status, Ordering::Release); - - } else { - // assume good unless we received the proposal. - self.status.store(status::GOOD, Ordering::Release); - } - - self.inner.context().update_round_cache(committed.round_number); - - Ok(Async::Ready(())) - } + status::BAD + } + _ => status::GOOD, + }; + + self.status.store(new_status, Ordering::Release); + } else { + // assume good unless we received the proposal. + self.status.store(status::GOOD, Ordering::Release); + } + + self.inner + .context() + .update_round_cache(committed.round_number); + + Ok(Async::Ready(())) + } } -impl Drop for BftFuture where - B: Block + Clone + Eq, - B::Hash: ::std::hash::Hash, - P: LocalProposer, - P: BaseProposer, - InStream: Stream, Error=Error>, - OutSink: Sink, SinkError=Error>, +impl Drop for BftFuture +where + B: Block + Clone + Eq, + B::Hash: ::std::hash::Hash, + P: LocalProposer, + P: BaseProposer, + InStream: Stream, Error = Error>, + OutSink: Sink, SinkError = Error>, { - fn drop(&mut self) { - let misbehavior = self.inner.drain_misbehavior().collect::>(); - self.inner.context().proposer.import_misbehavior(misbehavior); - } + fn drop(&mut self) { + let misbehavior = self.inner.drain_misbehavior().collect::>(); + self.inner + .context() + .proposer + .import_misbehavior(misbehavior); + } } struct AgreementHandle { - status: Arc, - send_cancel: Option>, + status: Arc, + send_cancel: Option>, } impl AgreementHandle { - fn status(&self) -> usize { - self.status.load(Ordering::Acquire) - } + fn status(&self) -> usize { + self.status.load(Ordering::Acquire) + } } impl Drop for AgreementHandle { - fn drop(&mut self) { - if let Some(sender) = self.send_cancel.take() { - let _ = sender.send(()); - } - } + fn drop(&mut self) { + if let Some(sender) = self.send_cancel.take() { + let _ = sender.send(()); + } + } } /// The BftService kicks off the agreement process on top of any blocks it @@ -460,171 +488,172 @@ impl Drop for AgreementHandle { /// /// This assumes that it is being run in the context of a tokio runtime. pub struct BftService { - client: Arc, - live_agreement: Mutex>, - round_cache: Arc>>, - round_timeout_multiplier: u64, - key: Arc, - factory: P, + client: Arc, + live_agreement: Mutex>, + round_cache: Arc>>, + round_timeout_multiplier: u64, + key: Arc, + factory: P, } impl BftService - where - B: Block + Clone + Eq, - P: Environment, - P::Proposer: LocalProposer, - P::Proposer: BaseProposer, - I: BlockImport + Authorities, +where + B: Block + Clone + Eq, + P: Environment, + P::Proposer: LocalProposer, + P::Proposer: BaseProposer, + I: BlockImport + Authorities, { - /// Create a new service instance. - pub fn new(client: Arc, key: Arc, factory: P) -> BftService { - BftService { - client: client, - live_agreement: Mutex::new(None), - round_cache: Arc::new(Mutex::new(RoundCache { - hash: None, - start_round: 0, - })), - round_timeout_multiplier: 10, - key: key, - factory, - } - } - - /// Get the local Authority ID. - pub fn local_id(&self) -> AuthorityId { - self.key.public().into() - } - - /// Signal that a valid block with the given header has been imported. - /// Provide communication streams that are localized to this block. - /// It's recommended to use the communication primitives provided by this - /// module for signature checking and decoding. See `CheckedStream` and - /// `SigningSink` for more details. - /// - /// Messages received on the stream that don't match the expected format - /// will be dropped. - /// - /// If the local signing key is an authority, this will begin the consensus process to build a - /// block on top of it. If the executor fails to run the future, an error will be returned. - /// Returns `None` if the agreement on the block with given parent is already in progress. - pub fn build_upon(&self, header: &B::Header, input: In, output: Out) - -> Result>::Proposer, - I, - In, - Out, - >>, P::Error> - where - In: Stream, Error=Error>, - Out: Sink, SinkError=Error>, - { - let hash = header.hash(); - - let mut live_agreement = self.live_agreement.lock(); - let can_build = live_agreement.as_ref() - .map_or(true, |x| self.can_build_on_inner(header, x)); - - if !can_build { - return Ok(None) - } - - let authorities = self.client.authorities(&BlockId::Hash(hash.clone())) - .map_err(|e| CommonErrorKind::Other(Box::new(e)).into())?; - - let n = authorities.len(); - let max_faulty = max_faulty_of(n); - trace!(target: "rhd", "Initiating agreement on top of #{}, {:?}", header.number(), hash); - trace!(target: "rhd", "max_faulty_of({})={}", n, max_faulty); - - let local_id = self.local_id(); - - if !authorities.contains(&local_id) { - // cancel current agreement - live_agreement.take(); - Err(CommonErrorKind::InvalidAuthority(local_id).into())?; - } - - let proposer = self.factory.init(header, &authorities, self.key.clone())?; - - let bft_instance = BftInstance { - proposer, - parent_hash: hash.clone(), - cache: self.round_cache.clone(), - round_timeout_multiplier: self.round_timeout_multiplier, - key: self.key.clone(), - authorities: authorities, - }; - - let mut agreement = rhododendron::agree( - bft_instance, - n, - max_faulty, - input, - output, - ); - - // fast forward round number if necessary. - { - let mut cache = self.round_cache.lock(); - trace!(target: "rhd", "Round cache: {:?}", &*cache); - if cache.hash.as_ref() == Some(&hash) { - trace!(target: "rhd", "Fast-forwarding to round {}", cache.start_round); - let start_round = cache.start_round; - cache.start_round += 1; - - drop(cache); - agreement.fast_forward(start_round); - } else { - *cache = RoundCache { - hash: Some(hash.clone()), - start_round: 1, - }; - } - } - - let status = Arc::new(AtomicUsize::new(status::LIVE)); - let (tx, rx) = oneshot::channel(); - - // cancel current agreement. - *live_agreement = Some((header.clone(), AgreementHandle { - send_cancel: Some(tx), - status: status.clone(), - })); - - Ok(Some(BftFuture { - inner: agreement, - status: status, - cancel: rx, - import: self.client.clone(), - })) - } - - /// Cancel current agreement if any. - pub fn cancel_agreement(&self) { - self.live_agreement.lock().take(); - } - - /// Whether we can build using the given header. - pub fn can_build_on(&self, header: &B::Header) -> bool { - self.live_agreement.lock().as_ref() - .map_or(true, |x| self.can_build_on_inner(header, x)) - } - - /// Get a reference to the underyling client. - pub fn client(&self) -> &I { &*self.client } - - fn can_build_on_inner(&self, header: &B::Header, live: &(B::Header, AgreementHandle)) -> bool { - let hash = header.hash(); - let &(ref live_header, ref handle) = live; - match handle.status() { - _ if *header != *live_header && *live_header.parent_hash() != hash => true, // can always follow with next block. - status::BAD => hash == live_header.hash(), // bad block can be re-agreed on. - _ => false, // canceled won't appear since we overwrite the handle before returning. - } - } + /// Create a new service instance. + pub fn new(client: Arc, key: Arc, factory: P) -> BftService { + BftService { + client: client, + live_agreement: Mutex::new(None), + round_cache: Arc::new(Mutex::new(RoundCache { + hash: None, + start_round: 0, + })), + round_timeout_multiplier: 10, + key: key, + factory, + } + } + + /// Get the local Authority ID. + pub fn local_id(&self) -> AuthorityId { + self.key.public().into() + } + + /// Signal that a valid block with the given header has been imported. + /// Provide communication streams that are localized to this block. + /// It's recommended to use the communication primitives provided by this + /// module for signature checking and decoding. See `CheckedStream` and + /// `SigningSink` for more details. + /// + /// Messages received on the stream that don't match the expected format + /// will be dropped. + /// + /// If the local signing key is an authority, this will begin the consensus process to build a + /// block on top of it. If the executor fails to run the future, an error will be returned. + /// Returns `None` if the agreement on the block with given parent is already in progress. + pub fn build_upon( + &self, + header: &B::Header, + input: In, + output: Out, + ) -> Result>::Proposer, I, In, Out>>, P::Error> + where + In: Stream, Error = Error>, + Out: Sink, SinkError = Error>, + { + let hash = header.hash(); + + let mut live_agreement = self.live_agreement.lock(); + let can_build = live_agreement + .as_ref() + .map_or(true, |x| self.can_build_on_inner(header, x)); + + if !can_build { + return Ok(None); + } + + let authorities = self + .client + .authorities(&BlockId::Hash(hash.clone())) + .map_err(|e| CommonErrorKind::Other(Box::new(e)).into())?; + + let n = authorities.len(); + let max_faulty = max_faulty_of(n); + trace!(target: "rhd", "Initiating agreement on top of #{}, {:?}", header.number(), hash); + trace!(target: "rhd", "max_faulty_of({})={}", n, max_faulty); + + let local_id = self.local_id(); + + if !authorities.contains(&local_id) { + // cancel current agreement + live_agreement.take(); + Err(CommonErrorKind::InvalidAuthority(local_id).into())?; + } + + let proposer = self.factory.init(header, &authorities, self.key.clone())?; + + let bft_instance = BftInstance { + proposer, + parent_hash: hash.clone(), + cache: self.round_cache.clone(), + round_timeout_multiplier: self.round_timeout_multiplier, + key: self.key.clone(), + authorities: authorities, + }; + + let mut agreement = rhododendron::agree(bft_instance, n, max_faulty, input, output); + + // fast forward round number if necessary. + { + let mut cache = self.round_cache.lock(); + trace!(target: "rhd", "Round cache: {:?}", &*cache); + if cache.hash.as_ref() == Some(&hash) { + trace!(target: "rhd", "Fast-forwarding to round {}", cache.start_round); + let start_round = cache.start_round; + cache.start_round += 1; + + drop(cache); + agreement.fast_forward(start_round); + } else { + *cache = RoundCache { + hash: Some(hash.clone()), + start_round: 1, + }; + } + } + + let status = Arc::new(AtomicUsize::new(status::LIVE)); + let (tx, rx) = oneshot::channel(); + + // cancel current agreement. + *live_agreement = Some(( + header.clone(), + AgreementHandle { + send_cancel: Some(tx), + status: status.clone(), + }, + )); + + Ok(Some(BftFuture { + inner: agreement, + status: status, + cancel: rx, + import: self.client.clone(), + })) + } + + /// Cancel current agreement if any. + pub fn cancel_agreement(&self) { + self.live_agreement.lock().take(); + } + + /// Whether we can build using the given header. + pub fn can_build_on(&self, header: &B::Header) -> bool { + self.live_agreement + .lock() + .as_ref() + .map_or(true, |x| self.can_build_on_inner(header, x)) + } + + /// Get a reference to the underyling client. + pub fn client(&self) -> &I { + &*self.client + } + + fn can_build_on_inner(&self, header: &B::Header, live: &(B::Header, AgreementHandle)) -> bool { + let hash = header.hash(); + let &(ref live_header, ref handle) = live; + match handle.status() { + _ if *header != *live_header && *live_header.parent_hash() != hash => true, // can always follow with next block. + status::BAD => hash == live_header.hash(), // bad block can be re-agreed on. + _ => false, // canceled won't appear since we overwrite the handle before returning. + } + } } /// Stream that decodes rhododendron messages and checks signatures. @@ -633,141 +662,147 @@ impl BftService /// will be signed in a way that accounts for it. When using this with /// `BftService::build_upon`, the user should take care to use the same hash as for that. pub struct CheckedStream { - inner: S, - local_id: AuthorityId, - authorities: Vec, - parent_hash: B::Hash, + inner: S, + local_id: AuthorityId, + authorities: Vec, + parent_hash: B::Hash, } impl CheckedStream { - /// Construct a new checked stream. - pub fn new( - inner: S, - local_id: AuthorityId, - authorities: Vec, - parent_hash: B::Hash, - ) -> Self { - CheckedStream { - inner, - local_id, - authorities, - parent_hash, - } - } + /// Construct a new checked stream. + pub fn new( + inner: S, + local_id: AuthorityId, + authorities: Vec, + parent_hash: B::Hash, + ) -> Self { + CheckedStream { + inner, + local_id, + authorities, + parent_hash, + } + } } -impl>> Stream for CheckedStream - where S::Error: From, +impl>> Stream for CheckedStream +where + S::Error: From, { - type Item = Communication; - type Error = S::Error; - - fn poll(&mut self) -> Poll, Self::Error> { - use rhododendron::LocalizedMessage as RhdLocalized; - loop { - match self.inner.poll()? { - Async::Ready(Some(item)) => { - let comms: Communication = match Decode::decode(&mut &item[..]) { - Some(x) => x, - None => continue, - }; - - match comms { - RhdCommunication::Auxiliary(prepare_just) => { - let checked = check_prepare_justification::( - &self.authorities, - self.parent_hash, - UncheckedJustification(prepare_just.uncheck()), - ); - if let Ok(checked) = checked { - return Ok(Async::Ready( - Some(RhdCommunication::Auxiliary(checked.0)) - )); - } - } - RhdCommunication::Consensus(RhdLocalized::Propose(p)) => { - if p.sender == self.local_id { continue } - - let checked = check_proposal::( - &self.authorities, - &self.parent_hash, - &p, - ); - - if let Ok(()) = checked { - return Ok(Async::Ready( - Some(RhdCommunication::Consensus(RhdLocalized::Propose(p))) - )); - } - } - RhdCommunication::Consensus(RhdLocalized::Vote(v)) => { - if v.sender == self.local_id { continue } - - let checked = check_vote::( - &self.authorities, - &self.parent_hash, - &v, - ); - - if let Ok(()) = checked { - return Ok(Async::Ready( - Some(RhdCommunication::Consensus(RhdLocalized::Vote(v))) - )); - } - } - } - } - Async::Ready(None) => return Ok(Async::Ready(None)), - Async::NotReady => return Ok(Async::NotReady), - } - } - } + type Item = Communication; + type Error = S::Error; + + fn poll(&mut self) -> Poll, Self::Error> { + use rhododendron::LocalizedMessage as RhdLocalized; + loop { + match self.inner.poll()? { + Async::Ready(Some(item)) => { + let comms: Communication = match Decode::decode(&mut &item[..]) { + Some(x) => x, + None => continue, + }; + + match comms { + RhdCommunication::Auxiliary(prepare_just) => { + let checked = check_prepare_justification::( + &self.authorities, + self.parent_hash, + UncheckedJustification(prepare_just.uncheck()), + ); + if let Ok(checked) = checked { + return Ok(Async::Ready(Some(RhdCommunication::Auxiliary( + checked.0, + )))); + } + } + RhdCommunication::Consensus(RhdLocalized::Propose(p)) => { + if p.sender == self.local_id { + continue; + } + + let checked = + check_proposal::(&self.authorities, &self.parent_hash, &p); + + if let Ok(()) = checked { + return Ok(Async::Ready(Some(RhdCommunication::Consensus( + RhdLocalized::Propose(p), + )))); + } + } + RhdCommunication::Consensus(RhdLocalized::Vote(v)) => { + if v.sender == self.local_id { + continue; + } + + let checked = check_vote::(&self.authorities, &self.parent_hash, &v); + + if let Ok(()) = checked { + return Ok(Async::Ready(Some(RhdCommunication::Consensus( + RhdLocalized::Vote(v), + )))); + } + } + } + } + Async::Ready(None) => return Ok(Async::Ready(None)), + Async::NotReady => return Ok(Async::NotReady), + } + } + } } /// Given a total number of authorities, yield the maximum faulty that would be allowed. /// This will always be under 1/3. pub fn max_faulty_of(n: usize) -> usize { - n.saturating_sub(1) / 3 + n.saturating_sub(1) / 3 } /// Given a total number of authorities, yield the minimum required signatures. /// This will always be over 2/3. pub fn bft_threshold(n: usize) -> usize { - n - max_faulty_of(n) + n - max_faulty_of(n) } // actions in the signature scheme. #[derive(Encode)] enum Action { - Prepare(u32, H), - Commit(u32, H), - AdvanceRound(u32), - // signatures of header hash and full candidate are both included. - ProposeHeader(u32, H), - Propose(u32, B), + Prepare(u32, H), + Commit(u32, H), + AdvanceRound(u32), + // signatures of header hash and full candidate are both included. + ProposeHeader(u32, H), + Propose(u32, B), } // encode something in a way which is localized to a specific parent-hash fn localized_encode(parent_hash: H, value: E) -> Vec { - (parent_hash, value).encode() + (parent_hash, value).encode() } fn check_justification_signed_message( - authorities: &[AuthorityId], - message: &[u8], - just: UncheckedJustification) --> Result, UncheckedJustification> { - // additional error information could be useful here. - just.0.check(authorities.len() - max_faulty_of(authorities.len()), |_, _, sig| { - let auth_id = sig.signer.clone().into(); - if !authorities.contains(&auth_id) { return None } - - if ed25519::Pair::verify(&sig.signature, message, &sig.signer) { - Some(sig.signer.0) - } else { - None - } - }).map(RhdJustification).map_err(UncheckedJustification) + authorities: &[AuthorityId], + message: &[u8], + just: UncheckedJustification, +) -> Result, UncheckedJustification> { + // additional error information could be useful here. + just.0 + .check( + authorities.len() - max_faulty_of(authorities.len()), + |_, _, sig| { + let auth_id = sig.signer.clone().into(); + if !authorities.contains(&auth_id) { + return None; + } + + if ed25519::Pair::verify(&sig.signature, message, &sig.signer) { + Some(sig.signer.0) + } else { + None + } + }, + ) + .map(RhdJustification) + .map_err(UncheckedJustification) } /// Check a full justification for a header hash. @@ -775,310 +810,332 @@ fn check_justification_signed_message( /// /// On failure, returns the justification back. pub fn check_justification( - authorities: &[AuthorityId], - parent: B::Hash, - just: UncheckedJustification + authorities: &[AuthorityId], + parent: B::Hash, + just: UncheckedJustification, ) -> Result, UncheckedJustification> { - let vote: Action = Action::Commit(just.0.round_number as u32, just.0.digest.clone()); - let message = localized_encode(parent, vote); + let vote: Action = + Action::Commit(just.0.round_number as u32, just.0.digest.clone()); + let message = localized_encode(parent, vote); - check_justification_signed_message(authorities, &message[..], just) + check_justification_signed_message(authorities, &message[..], just) } /// Check a prepare justification for a header hash. /// Provide all valid authorities. /// /// On failure, returns the justification back. -pub fn check_prepare_justification(authorities: &[AuthorityId], parent: B::Hash, just: UncheckedJustification) - -> Result, UncheckedJustification> -{ - let vote: Action = Action::Prepare(just.0.round_number as u32, just.0.digest.clone()); - let message = localized_encode(parent, vote); - - check_justification_signed_message(authorities, &message[..], just).map(|e| PrepareJustification(e.0)) +pub fn check_prepare_justification( + authorities: &[AuthorityId], + parent: B::Hash, + just: UncheckedJustification, +) -> Result, UncheckedJustification> { + let vote: Action = + Action::Prepare(just.0.round_number as u32, just.0.digest.clone()); + let message = localized_encode(parent, vote); + + check_justification_signed_message(authorities, &message[..], just) + .map(|e| PrepareJustification(e.0)) } /// Check proposal message signatures and authority. /// Provide all valid authorities. pub fn check_proposal( - authorities: &[AuthorityId], - parent_hash: &B::Hash, - propose: &rhododendron::LocalizedProposal) - -> Result<(), Error> -{ - if !authorities.contains(&propose.sender) { - return Err(CommonErrorKind::InvalidAuthority(propose.sender.into()).into()); - } - - let action_header = Action::ProposeHeader(propose.round_number as u32, propose.digest.clone()); - let action_propose = Action::Propose(propose.round_number as u32, propose.proposal.clone()); - check_action::(action_header, parent_hash, &propose.digest_signature)?; - check_action::(action_propose, parent_hash, &propose.full_signature) + authorities: &[AuthorityId], + parent_hash: &B::Hash, + propose: &rhododendron::LocalizedProposal, +) -> Result<(), Error> { + if !authorities.contains(&propose.sender) { + return Err(CommonErrorKind::InvalidAuthority(propose.sender.into()).into()); + } + + let action_header = Action::ProposeHeader(propose.round_number as u32, propose.digest.clone()); + let action_propose = Action::Propose(propose.round_number as u32, propose.proposal.clone()); + check_action::(action_header, parent_hash, &propose.digest_signature)?; + check_action::(action_propose, parent_hash, &propose.full_signature) } /// Check vote message signatures and authority. /// Provide all valid authorities. pub fn check_vote( - authorities: &[AuthorityId], - parent_hash: &B::Hash, - vote: &rhododendron::LocalizedVote) - -> Result<(), Error> -{ - if !authorities.contains(&vote.sender) { - return Err(CommonErrorKind::InvalidAuthority(vote.sender.into()).into()); - } - - let action = match vote.vote { - rhododendron::Vote::Prepare(r, ref h) => Action::Prepare(r as u32, h.clone()), - rhododendron::Vote::Commit(r, ref h) => Action::Commit(r as u32, h.clone()), - rhododendron::Vote::AdvanceRound(r) => Action::AdvanceRound(r as u32), - }; - check_action::(action, parent_hash, &vote.signature) + authorities: &[AuthorityId], + parent_hash: &B::Hash, + vote: &rhododendron::LocalizedVote, +) -> Result<(), Error> { + if !authorities.contains(&vote.sender) { + return Err(CommonErrorKind::InvalidAuthority(vote.sender.into()).into()); + } + + let action = match vote.vote { + rhododendron::Vote::Prepare(r, ref h) => Action::Prepare(r as u32, h.clone()), + rhododendron::Vote::Commit(r, ref h) => Action::Commit(r as u32, h.clone()), + rhododendron::Vote::AdvanceRound(r) => Action::AdvanceRound(r as u32), + }; + check_action::(action, parent_hash, &vote.signature) } -fn check_action(action: Action, parent_hash: &B::Hash, sig: &LocalizedSignature) -> Result<(), Error> { - let message = localized_encode(*parent_hash, action); - if ed25519::Pair::verify(&sig.signature, &message, &sig.signer) { - Ok(()) - } else { - Err(CommonErrorKind::InvalidSignature(sig.signature.into(), sig.signer.clone().into()).into()) - } +fn check_action( + action: Action, + parent_hash: &B::Hash, + sig: &LocalizedSignature, +) -> Result<(), Error> { + let message = localized_encode(*parent_hash, action); + if ed25519::Pair::verify(&sig.signature, &message, &sig.signer) { + Ok(()) + } else { + Err( + CommonErrorKind::InvalidSignature(sig.signature.into(), sig.signer.clone().into()) + .into(), + ) + } } /// Sign a BFT message with the given key. pub fn sign_message( - message: RhdMessage, - key: &ed25519::Pair, - parent_hash: B::Hash + message: RhdMessage, + key: &ed25519::Pair, + parent_hash: B::Hash, ) -> LocalizedMessage { - let signer = key.public(); - - let sign_action = |action: Action| { - let to_sign = localized_encode(parent_hash.clone(), action); - - LocalizedSignature { - signer: signer.clone(), - signature: key.sign(&to_sign), - } - }; - - match message { - RhdMessage::Propose(r, proposal) => { - let header_hash = proposal.hash(); - let action_header = Action::ProposeHeader(r as u32, header_hash.clone()); - let action_propose = Action::Propose(r as u32, proposal.clone()); - - rhododendron::LocalizedMessage::Propose(rhododendron::LocalizedProposal { - round_number: r, - proposal, - digest: header_hash, - sender: signer.clone().into(), - digest_signature: sign_action(action_header), - full_signature: sign_action(action_propose), - }) - } - RhdMessage::Vote(vote) => rhododendron::LocalizedMessage::Vote({ - let action = match vote { - RhdMessageVote::Prepare(r, h) => Action::Prepare(r as u32, h), - RhdMessageVote::Commit(r, h) => Action::Commit(r as u32, h), - RhdMessageVote::AdvanceRound(r) => Action::AdvanceRound(r as u32), - }; - - rhododendron::LocalizedVote { - vote: vote, - sender: signer.clone().into(), - signature: sign_action(action), - } - }) - } + let signer = key.public(); + + let sign_action = |action: Action| { + let to_sign = localized_encode(parent_hash.clone(), action); + + LocalizedSignature { + signer: signer.clone(), + signature: key.sign(&to_sign), + } + }; + + match message { + RhdMessage::Propose(r, proposal) => { + let header_hash = proposal.hash(); + let action_header = Action::ProposeHeader(r as u32, header_hash.clone()); + let action_propose = Action::Propose(r as u32, proposal.clone()); + + rhododendron::LocalizedMessage::Propose(rhododendron::LocalizedProposal { + round_number: r, + proposal, + digest: header_hash, + sender: signer.clone().into(), + digest_signature: sign_action(action_header), + full_signature: sign_action(action_propose), + }) + } + RhdMessage::Vote(vote) => rhododendron::LocalizedMessage::Vote({ + let action = match vote { + RhdMessageVote::Prepare(r, h) => Action::Prepare(r as u32, h), + RhdMessageVote::Commit(r, h) => Action::Commit(r as u32, h), + RhdMessageVote::AdvanceRound(r) => Action::AdvanceRound(r as u32), + }; + + rhododendron::LocalizedVote { + vote: vote, + sender: signer.clone().into(), + signature: sign_action(action), + } + }), + } } - -impl<'a, B, E, Block> BlockBuilder for client::block_builder::BlockBuilder<'a, B, E, Block, Blake2Hasher> where - B: client::backend::Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + Clone + 'static, - Block: BlockT +impl<'a, B, E, Block> BlockBuilder + for client::block_builder::BlockBuilder<'a, B, E, Block, Blake2Hasher> +where + B: client::backend::Backend + Send + Sync + 'static, + E: CallExecutor + Send + Sync + Clone + 'static, + Block: BlockT, { - fn push_extrinsic(&mut self, extrinsic: ::Extrinsic) -> Result<(), Error> { - client::block_builder::BlockBuilder::push(self, extrinsic).map_err(Into::into) - } + fn push_extrinsic(&mut self, extrinsic: ::Extrinsic) -> Result<(), Error> { + client::block_builder::BlockBuilder::push(self, extrinsic).map_err(Into::into) + } } -impl<'a, B, E, Block> AuthoringApi for SubstrateClient where - B: client::backend::Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + Clone + 'static, - Block: BlockT, +impl<'a, B, E, Block> AuthoringApi for SubstrateClient +where + B: client::backend::Backend + Send + Sync + 'static, + E: CallExecutor + Send + Sync + Clone + 'static, + Block: BlockT, { - type Block = Block; - type Error = client::error::Error; - - fn build_block) -> ()>( - &self, - at: &BlockId, - inherent_data: InherentData, - mut build_ctx: F, - ) -> Result { - let runtime_version = self.runtime_version_at(at)?; - - let mut block_builder = self.new_block_at(at)?; - if runtime_version.has_api(*b"blkbuild", 1) { - for inherent in self.inherent_extrinsics(at, &inherent_data)? { - block_builder.push(inherent)?; - } - } - - build_ctx(&mut block_builder); - - block_builder.bake().map_err(Into::into) - } + type Block = Block; + type Error = client::error::Error; + + fn build_block) -> ()>( + &self, + at: &BlockId, + inherent_data: InherentData, + mut build_ctx: F, + ) -> Result { + let runtime_version = self.runtime_version_at(at)?; + + let mut block_builder = self.new_block_at(at)?; + if runtime_version.has_api(*b"blkbuild", 1) { + for inherent in self.inherent_extrinsics(at, &inherent_data)? { + block_builder.push(inherent)?; + } + } + + build_ctx(&mut block_builder); + + block_builder.bake().map_err(Into::into) + } } - /// Proposer factory. -pub struct ProposerFactory where - C: AuthoringApi, - A: txpool::ChainApi, +pub struct ProposerFactory +where + C: AuthoringApi, + A: txpool::ChainApi, { - /// The client instance. - pub client: Arc, - /// The transaction pool. - pub transaction_pool: Arc>, - /// The backing network handle. - pub network: N, - /// handle to remote task executor - pub handle: TaskExecutor, - /// Offline-tracker. - pub offline: SharedOfflineTracker, - /// Force delay in evaluation this long. - pub force_delay: u64, + /// The client instance. + pub client: Arc, + /// The transaction pool. + pub transaction_pool: Arc>, + /// The backing network handle. + pub network: N, + /// handle to remote task executor + pub handle: TaskExecutor, + /// Offline-tracker. + pub offline: SharedOfflineTracker, + /// Force delay in evaluation this long. + pub force_delay: u64, } -impl consensus::Environment<::Block> for ProposerFactory where - N: Network::Block>, - C: AuthoringApi + BlockNumberToHash, - A: txpool::ChainApi::Block>, - // <::Block as BlockT>::Hash: - // Into<::Hash> + PartialEq + Into, - Error: From<::Error> +impl consensus::Environment<::Block> for ProposerFactory +where + N: Network::Block>, + C: AuthoringApi + BlockNumberToHash, + A: txpool::ChainApi::Block>, + // <::Block as BlockT>::Hash: + // Into<::Hash> + PartialEq + Into, + Error: From<::Error>, { - type Proposer = Proposer; - type Error = Error; - - fn init( - &self, - parent_header: &<::Block as BlockT>::Header, - authorities: &[AuthorityId], - sign_with: Arc, - ) -> Result { - use runtime_primitives::traits::Hash as HashT; - let parent_hash = parent_header.hash(); - - let id = BlockId::hash(parent_hash); - let random_seed = self.client.random_seed(&id)?; - let random_seed = <<::Block as BlockT>::Header as HeaderT>::Hashing::hash(random_seed.as_ref()); - - let validators = self.client.validators(&id)?; - self.offline.write().note_new_block(&validators[..]); - - info!("Starting consensus session on top of parent {:?}", parent_hash); - - let local_id = sign_with.public().0.into(); - let (input, output) = self.network.communication_for( - authorities, - local_id, - parent_hash.clone(), - self.handle.clone(), - ); - let now = Instant::now(); - let proposer = Proposer { - client: self.client.clone(), - start: now, - local_key: sign_with, - parent_hash, - parent_id: id, - parent_number: *parent_header.number(), - random_seed, - transaction_pool: self.transaction_pool.clone(), - offline: self.offline.clone(), - validators, - minimum_timestamp: current_timestamp() + self.force_delay, - network: self.network.clone() - }; - - Ok(proposer) - } + type Proposer = Proposer; + type Error = Error; + + fn init( + &self, + parent_header: &<::Block as BlockT>::Header, + authorities: &[AuthorityId], + sign_with: Arc, + ) -> Result { + use runtime_primitives::traits::Hash as HashT; + let parent_hash = parent_header.hash(); + + let id = BlockId::hash(parent_hash); + let random_seed = self.client.random_seed(&id)?; + let random_seed = + <<::Block as BlockT>::Header as HeaderT>::Hashing::hash( + random_seed.as_ref(), + ); + + let validators = self.client.validators(&id)?; + self.offline.write().note_new_block(&validators[..]); + + info!( + "Starting consensus session on top of parent {:?}", + parent_hash + ); + + let local_id = sign_with.public().0.into(); + let (input, output) = self.network.communication_for( + authorities, + local_id, + parent_hash.clone(), + self.handle.clone(), + ); + let now = Instant::now(); + let proposer = Proposer { + client: self.client.clone(), + start: now, + local_key: sign_with, + parent_hash, + parent_id: id, + parent_number: *parent_header.number(), + random_seed, + transaction_pool: self.transaction_pool.clone(), + offline: self.offline.clone(), + validators, + minimum_timestamp: current_timestamp() + self.force_delay, + network: self.network.clone(), + }; + + Ok(proposer) + } } /// The proposer logic. pub struct Proposer { - client: Arc, - start: Instant, - local_key: Arc, - parent_hash: <::Block as BlockT>::Hash, - parent_id: BlockId<::Block>, - parent_number: <<::Block as BlockT>::Header as HeaderT>::Number, - random_seed: <::Block as BlockT>::Hash, - transaction_pool: Arc>, - offline: SharedOfflineTracker, - validators: Vec, - minimum_timestamp: u64, - network: N, + client: Arc, + start: Instant, + local_key: Arc, + parent_hash: <::Block as BlockT>::Hash, + parent_id: BlockId<::Block>, + parent_number: <<::Block as BlockT>::Header as HeaderT>::Number, + random_seed: <::Block as BlockT>::Hash, + transaction_pool: Arc>, + offline: SharedOfflineTracker, + validators: Vec, + minimum_timestamp: u64, + network: N, } impl Proposer { - fn primary_index(&self, round_number: u32, len: usize) -> usize { - use primitives::uint::U256; - - let big_len = U256::from(len); - let offset = U256::from_big_endian(self.random_seed.as_ref()) % big_len; - let offset = offset.low_u64() as usize + round_number as usize; - offset % len - } + fn primary_index(&self, round_number: u32, len: usize) -> usize { + use primitives::uint::U256; + + let big_len = U256::from(len); + let offset = U256::from_big_endian(self.random_seed.as_ref()) % big_len; + let offset = offset.low_u64() as usize + round_number as usize; + offset % len + } } -impl BaseProposer<::Block> for Proposer where - C: AuthoringApi + BlockNumberToHash, - A: txpool::ChainApi::Block>, - <::Block as BlockT>::Hash: - Into<::Hash> + PartialEq + Into, - error::Error: From<::Error> +impl BaseProposer<::Block> for Proposer +where + C: AuthoringApi + BlockNumberToHash, + A: txpool::ChainApi::Block>, + <::Block as BlockT>::Hash: + Into<::Hash> + PartialEq + Into, + error::Error: From<::Error>, { - type Create = Result<::Block, Error>; - type Error = Error; - type Evaluate = Box>; - - fn propose(&self) -> Self::Create { - use runtime_primitives::traits::BlakeTwo256; - - const MAX_VOTE_OFFLINE_SECONDS: Duration = Duration::from_secs(60); - - let timestamp = ::std::cmp::max(self.minimum_timestamp, current_timestamp()); - - let elapsed_since_start = self.start.elapsed(); - let offline_indices = if elapsed_since_start > MAX_VOTE_OFFLINE_SECONDS { - Vec::new() - } else { - self.offline.read().reports(&self.validators[..]) - }; - - if !offline_indices.is_empty() { - info!( - "Submitting offline validators {:?} for slash-vote", - offline_indices.iter().map(|&i| self.validators[i as usize]).collect::>(), - ) - } - - let inherent_data = InherentData { - timestamp, - offline_indices, - }; - - let block = self.client.build_block( - &self.parent_id, - inherent_data, - |block_builder| { - let mut unqueue_invalid = Vec::new(); - self.transaction_pool.ready(|pending_iterator| { + type Create = Result<::Block, Error>; + type Error = Error; + type Evaluate = Box>; + + fn propose(&self) -> Self::Create { + use runtime_primitives::traits::BlakeTwo256; + + const MAX_VOTE_OFFLINE_SECONDS: Duration = Duration::from_secs(60); + + let timestamp = ::std::cmp::max(self.minimum_timestamp, current_timestamp()); + + let elapsed_since_start = self.start.elapsed(); + let offline_indices = if elapsed_since_start > MAX_VOTE_OFFLINE_SECONDS { + Vec::new() + } else { + self.offline.read().reports(&self.validators[..]) + }; + + if !offline_indices.is_empty() { + info!( + "Submitting offline validators {:?} for slash-vote", + offline_indices + .iter() + .map(|&i| self.validators[i as usize]) + .collect::>(), + ) + } + + let inherent_data = InherentData { + timestamp, + offline_indices, + }; + + let block = + self.client + .build_block(&self.parent_id, inherent_data, |block_builder| { + let mut unqueue_invalid = Vec::new(); + self.transaction_pool.ready(|pending_iterator| { let mut pending_size = 0; for pending in pending_iterator { let encoded_size = pending.data.encode().len(); @@ -1096,580 +1153,650 @@ impl BaseProposer<::Block> for Proposer where } }); - self.transaction_pool.remove_invalid(&unqueue_invalid); - })?; - - info!("Proposing block [number: {}; hash: {}; parent_hash: {}; extrinsics: [{}]]", - block.header().number(), - <::Block as BlockT>::Hash::from(block.header().hash()), - block.header().parent_hash(), - block.extrinsics().iter() - .map(|xt| format!("{}", BlakeTwo256::hash_of(xt))) - .collect::>() - .join(", ") - ); - - let substrate_block = Decode::decode(&mut block.encode().as_slice()) - .expect("blocks are defined to serialize to substrate blocks correctly; qed"); - - assert!(evaluation::evaluate_initial( - &substrate_block, - &self.parent_hash, - self.parent_number, - ).is_ok()); - - Ok(substrate_block) - } - - fn evaluate(&self, unchecked_proposal: &::Block) -> Self::Evaluate { - debug!(target: "rhd", "evaluating block on top of parent ({}, {:?})", self.parent_number, self.parent_hash); - - // do initial serialization and structural integrity checks. - if let Err(e) = evaluation::evaluate_initial( - unchecked_proposal, - &self.parent_hash, - self.parent_number, - ) { - debug!(target: "rhd", "Invalid proposal: {:?}", e); - return Box::new(future::ok(false)); - }; - - let current_timestamp = current_timestamp(); - let inherent = InherentData::new( - current_timestamp, - self.offline.read().reports(&self.validators) - ); - let proposed_timestamp = match self.client.check_inherents( - &self.parent_id, - &unchecked_proposal, - &inherent, - ) { - Ok(Ok(())) => None, - Ok(Err(BlockBuilderError::ValidAtTimestamp(timestamp))) => Some(timestamp), - Ok(Err(e)) => { - debug!(target: "rhd", "Invalid proposal (check_inherents): {:?}", e); - return Box::new(future::ok(false)); - }, - Err(e) => { - debug!(target: "rhd", "Could not call into runtime: {:?}", e); - return Box::new(future::ok(false)); - } - }; - - let vote_delays = { - - // the duration until the given timestamp is current - let proposed_timestamp = ::std::cmp::max(self.minimum_timestamp, proposed_timestamp.unwrap_or(0)); - let timestamp_delay = if proposed_timestamp > current_timestamp { - let delay_s = proposed_timestamp - current_timestamp; - debug!(target: "rhd", "Delaying evaluation of proposal for {} seconds", delay_s); - Some(Instant::now() + Duration::from_secs(delay_s)) - } else { - None - }; - - match timestamp_delay { - Some(duration) => future::Either::A( - Delay::new(duration).map_err(|e| ErrorKind::Timer(e).into()) - ), - None => future::Either::B(future::ok(())), - } - }; - - // evaluate whether the block is actually valid. - // it may be better to delay this until the delays are finished - let evaluated = match self.client.execute_block(&self.parent_id, &unchecked_proposal.clone()) - .map_err(Error::from) { - Ok(()) => Ok(true), - Err(err) => match err.kind() { - error::ErrorKind::Client(client::error::ErrorKind::Execution(_)) => Ok(false), - _ => Err(err) - } - }; - - let future = future::result(evaluated).and_then(move |good| { - let end_result = future::ok(good); - if good { - // delay a "good" vote. - future::Either::A(vote_delays.and_then(|_| end_result)) - } else { - // don't delay a "bad" evaluation. - future::Either::B(end_result) - } - }); - - Box::new(future) as Box<_> - } + self.transaction_pool.remove_invalid(&unqueue_invalid); + })?; + + info!( + "Proposing block [number: {}; hash: {}; parent_hash: {}; extrinsics: [{}]]", + block.header().number(), + <::Block as BlockT>::Hash::from(block.header().hash()), + block.header().parent_hash(), + block + .extrinsics() + .iter() + .map(|xt| format!("{}", BlakeTwo256::hash_of(xt))) + .collect::>() + .join(", ") + ); + + let substrate_block = Decode::decode(&mut block.encode().as_slice()) + .expect("blocks are defined to serialize to substrate blocks correctly; qed"); + + assert!(evaluation::evaluate_initial( + &substrate_block, + &self.parent_hash, + self.parent_number, + ) + .is_ok()); + + Ok(substrate_block) + } + + fn evaluate(&self, unchecked_proposal: &::Block) -> Self::Evaluate { + debug!(target: "rhd", "evaluating block on top of parent ({}, {:?})", self.parent_number, self.parent_hash); + + // do initial serialization and structural integrity checks. + if let Err(e) = + evaluation::evaluate_initial(unchecked_proposal, &self.parent_hash, self.parent_number) + { + debug!(target: "rhd", "Invalid proposal: {:?}", e); + return Box::new(future::ok(false)); + }; + + let current_timestamp = current_timestamp(); + let inherent = InherentData::new( + current_timestamp, + self.offline.read().reports(&self.validators), + ); + let proposed_timestamp = + match self + .client + .check_inherents(&self.parent_id, &unchecked_proposal, &inherent) + { + Ok(Ok(())) => None, + Ok(Err(BlockBuilderError::ValidAtTimestamp(timestamp))) => Some(timestamp), + Ok(Err(e)) => { + debug!(target: "rhd", "Invalid proposal (check_inherents): {:?}", e); + return Box::new(future::ok(false)); + } + Err(e) => { + debug!(target: "rhd", "Could not call into runtime: {:?}", e); + return Box::new(future::ok(false)); + } + }; + + let vote_delays = { + // the duration until the given timestamp is current + let proposed_timestamp = + ::std::cmp::max(self.minimum_timestamp, proposed_timestamp.unwrap_or(0)); + let timestamp_delay = if proposed_timestamp > current_timestamp { + let delay_s = proposed_timestamp - current_timestamp; + debug!(target: "rhd", "Delaying evaluation of proposal for {} seconds", delay_s); + Some(Instant::now() + Duration::from_secs(delay_s)) + } else { + None + }; + + match timestamp_delay { + Some(duration) => { + future::Either::A(Delay::new(duration).map_err(|e| ErrorKind::Timer(e).into())) + } + None => future::Either::B(future::ok(())), + } + }; + + // evaluate whether the block is actually valid. + // it may be better to delay this until the delays are finished + let evaluated = match self + .client + .execute_block(&self.parent_id, &unchecked_proposal.clone()) + .map_err(Error::from) + { + Ok(()) => Ok(true), + Err(err) => match err.kind() { + error::ErrorKind::Client(client::error::ErrorKind::Execution(_)) => Ok(false), + _ => Err(err), + }, + }; + + let future = future::result(evaluated).and_then(move |good| { + let end_result = future::ok(good); + if good { + // delay a "good" vote. + future::Either::A(vote_delays.and_then(|_| end_result)) + } else { + // don't delay a "bad" evaluation. + future::Either::B(end_result) + } + }); + + Box::new(future) as Box<_> + } } -impl LocalProposer<::Block> for Proposer where - C: AuthoringApi + BlockNumberToHash, - A: txpool::ChainApi::Block>, - Self: BaseProposer<::Block, Error=Error>, - <::Block as BlockT>::Hash: - Into<::Hash> + PartialEq + Into, - error::Error: From<::Error> +impl LocalProposer<::Block> for Proposer +where + C: AuthoringApi + BlockNumberToHash, + A: txpool::ChainApi::Block>, + Self: BaseProposer<::Block, Error = Error>, + <::Block as BlockT>::Hash: + Into<::Hash> + PartialEq + Into, + error::Error: From<::Error>, { - - fn round_proposer(&self, round_number: u32, authorities: &[AuthorityId]) -> AuthorityId { - let offset = self.primary_index(round_number, authorities.len()); - let proposer = authorities[offset as usize].clone(); - trace!(target: "rhd", "proposer for round {} is {}", round_number, proposer); - - proposer - } - - fn import_misbehavior(&self, _misbehavior: Vec<(AuthorityId, Misbehavior<<::Block as BlockT>::Hash>)>) { - use rhododendron::Misbehavior as GenericMisbehavior; - use runtime_primitives::bft::{MisbehaviorKind, MisbehaviorReport}; - use node_runtime::{Call, UncheckedExtrinsic, ConsensusCall}; - - let mut next_index = { - let local_id = self.local_key.public().0; - let cur_index = self.transaction_pool.cull_and_get_pending(&BlockId::hash(self.parent_hash), |pending| pending - .filter(|tx| tx.verified.sender == local_id) - .last() - .map(|tx| Ok(tx.verified.index())) - .unwrap_or_else(|| self.client.account_nonce(&self.parent_id, local_id)) - .map_err(Error::from) - ); - - match cur_index { - Ok(cur_index) => cur_index + 1, - Err(e) => { - warn!(target: "consensus", "Error computing next transaction index: {:?}", e); - return; - } - } - }; - - for (target, misbehavior) in misbehavior { - let report = MisbehaviorReport { - parent_hash: self.parent_hash.into(), - parent_number: self.parent_number.as_(), - target, - misbehavior: match misbehavior { - GenericMisbehavior::ProposeOutOfTurn(_, _, _) => continue, - GenericMisbehavior::DoublePropose(_, _, _) => continue, - GenericMisbehavior::DoublePrepare(round, (h1, s1), (h2, s2)) - => MisbehaviorKind::BftDoublePrepare(round as u32, (h1.into(), s1.signature), (h2.into(), s2.signature)), - GenericMisbehavior::DoubleCommit(round, (h1, s1), (h2, s2)) - => MisbehaviorKind::BftDoubleCommit(round as u32, (h1.into(), s1.signature), (h2.into(), s2.signature)), - } - }; - let payload = ( - next_index, - Call::Consensus(ConsensusCall::report_misbehavior(report)), - Era::immortal(), - self.client.genesis_hash() - ); - let signature = self.local_key.sign(&payload.encode()).into(); - next_index += 1; - - let local_id = self.local_key.public().0.into(); - let extrinsic = UncheckedExtrinsic { - signature: Some((node_runtime::RawAddress::Id(local_id), signature, payload.0, Era::immortal())), - function: payload.1, - }; - let uxt: <::Block as BlockT>::Extrinsic = Decode::decode( - &mut extrinsic.encode().as_slice()).expect("Encoded extrinsic is valid"); - let hash = BlockId::<::Block>::hash(self.parent_hash); - if let Err(e) = self.transaction_pool.submit_one(&hash, uxt) { - warn!("Error importing misbehavior report: {:?}", e); - } - } - } - - fn on_round_end(&self, round_number: u32, was_proposed: bool) { - let primary_validator = self.validators[ - self.primary_index(round_number, self.validators.len()) - ]; - - // alter the message based on whether we think the empty proposer was forced to skip the round. - // this is determined by checking if our local validator would have been forced to skip the round. - if !was_proposed { - let public = ed25519::Public::from_raw(primary_validator.0); - info!( - "Potential Offline Validator: {} failed to propose during assigned slot: {}", - public, - round_number, - ); - } - - self.offline.write().note_round_end(primary_validator, was_proposed); - } + fn round_proposer(&self, round_number: u32, authorities: &[AuthorityId]) -> AuthorityId { + let offset = self.primary_index(round_number, authorities.len()); + let proposer = authorities[offset as usize].clone(); + trace!(target: "rhd", "proposer for round {} is {}", round_number, proposer); + + proposer + } + + fn import_misbehavior( + &self, + _misbehavior: Vec<( + AuthorityId, + Misbehavior<<::Block as BlockT>::Hash>, + )>, + ) { + use node_runtime::{Call, ConsensusCall, UncheckedExtrinsic}; + use rhododendron::Misbehavior as GenericMisbehavior; + use runtime_primitives::bft::{MisbehaviorKind, MisbehaviorReport}; + + let mut next_index = { + let local_id = self.local_key.public().0; + let cur_index = self.transaction_pool.cull_and_get_pending( + &BlockId::hash(self.parent_hash), + |pending| { + pending + .filter(|tx| tx.verified.sender == local_id) + .last() + .map(|tx| Ok(tx.verified.index())) + .unwrap_or_else(|| self.client.account_nonce(&self.parent_id, local_id)) + .map_err(Error::from) + }, + ); + + match cur_index { + Ok(cur_index) => cur_index + 1, + Err(e) => { + warn!(target: "consensus", "Error computing next transaction index: {:?}", e); + return; + } + } + }; + + for (target, misbehavior) in misbehavior { + let report = MisbehaviorReport { + parent_hash: self.parent_hash.into(), + parent_number: self.parent_number.as_(), + target, + misbehavior: match misbehavior { + GenericMisbehavior::ProposeOutOfTurn(_, _, _) => continue, + GenericMisbehavior::DoublePropose(_, _, _) => continue, + GenericMisbehavior::DoublePrepare(round, (h1, s1), (h2, s2)) => { + MisbehaviorKind::BftDoublePrepare( + round as u32, + (h1.into(), s1.signature), + (h2.into(), s2.signature), + ) + } + GenericMisbehavior::DoubleCommit(round, (h1, s1), (h2, s2)) => { + MisbehaviorKind::BftDoubleCommit( + round as u32, + (h1.into(), s1.signature), + (h2.into(), s2.signature), + ) + } + }, + }; + let payload = ( + next_index, + Call::Consensus(ConsensusCall::report_misbehavior(report)), + Era::immortal(), + self.client.genesis_hash(), + ); + let signature = self.local_key.sign(&payload.encode()).into(); + next_index += 1; + + let local_id = self.local_key.public().0.into(); + let extrinsic = UncheckedExtrinsic { + signature: Some(( + node_runtime::RawAddress::Id(local_id), + signature, + payload.0, + Era::immortal(), + )), + function: payload.1, + }; + let uxt: <::Block as BlockT>::Extrinsic = + Decode::decode(&mut extrinsic.encode().as_slice()) + .expect("Encoded extrinsic is valid"); + let hash = BlockId::<::Block>::hash(self.parent_hash); + if let Err(e) = self.transaction_pool.submit_one(&hash, uxt) { + warn!("Error importing misbehavior report: {:?}", e); + } + } + } + + fn on_round_end(&self, round_number: u32, was_proposed: bool) { + let primary_validator = + self.validators[self.primary_index(round_number, self.validators.len())]; + + // alter the message based on whether we think the empty proposer was forced to skip the round. + // this is determined by checking if our local validator would have been forced to skip the round. + if !was_proposed { + let public = ed25519::Public::from_raw(primary_validator.0); + info!( + "Potential Offline Validator: {} failed to propose during assigned slot: {}", + public, round_number, + ); + } + + self.offline + .write() + .note_round_end(primary_validator, was_proposed); + } } fn current_timestamp() -> u64 { - time::SystemTime::now().duration_since(time::UNIX_EPOCH) - .expect("now always later than unix epoch; qed") - .as_secs() + time::SystemTime::now() + .duration_since(time::UNIX_EPOCH) + .expect("now always later than unix epoch; qed") + .as_secs() } - #[cfg(test)] mod tests { - use super::*; - use std::collections::HashSet; - use std::marker::PhantomData; - - use runtime_primitives::testing::{Block as GenericTestBlock, Header as TestHeader}; - use primitives::H256; - use keyring::AuthorityKeyring; - - type TestBlock = GenericTestBlock<()>; - - struct FakeClient { - authorities: Vec, - imported_heights: Mutex> - } - - impl BlockImport for FakeClient { - type Error = Error; - - fn import_block(&self, - block: ImportBlock, - _new_authorities: Option> - ) -> Result { - assert!(self.imported_heights.lock().insert(block.header.number)); - Ok(ImportResult::Queued) - } - } - - impl Authorities for FakeClient { - type Error = Error; - - fn authorities(&self, _at: &BlockId) -> Result, Self::Error> { - Ok(self.authorities.clone()) - } - } - - // "black hole" output sink. - struct Comms(::std::marker::PhantomData); - - impl Sink for Comms { - type SinkItem = Communication; - type SinkError = E; - - fn start_send(&mut self, _item: Communication) -> ::futures::StartSend, E> { - Ok(::futures::AsyncSink::Ready) - } - - fn poll_complete(&mut self) -> ::futures::Poll<(), E> { - Ok(Async::Ready(())) - } - } - - impl Stream for Comms { - type Item = Communication; - type Error = E; - - fn poll(&mut self) -> ::futures::Poll, Self::Error> { - Ok(::futures::Async::NotReady) - } - } - - struct DummyFactory; - struct DummyProposer(u64); - - impl Environment for DummyFactory { - type Proposer = DummyProposer; - type Error = Error; - - fn init(&self, parent_header: &TestHeader, _authorities: &[AuthorityId], _sign_with: Arc) - -> Result - { - Ok(DummyProposer(parent_header.number + 1)) - } - } - - impl BaseProposer for DummyProposer { - type Error = Error; - type Create = Result; - type Evaluate = Result; - - fn propose(&self) -> Result { - - Ok(TestBlock { - header: from_block_number(self.0), - extrinsics: Default::default() - }) - } - - fn evaluate(&self, proposal: &TestBlock) -> Result { - Ok(proposal.header.number == self.0) - } - } - - impl LocalProposer for DummyProposer { - fn import_misbehavior(&self, _misbehavior: Vec<(AuthorityId, Misbehavior)>) {} - - fn round_proposer(&self, round_number: u32, authorities: &[AuthorityId]) -> AuthorityId { - authorities[(round_number as usize) % authorities.len()].clone() - } - } - - fn make_service(client: FakeClient) - -> BftService - { - BftService { - client: Arc::new(client), - live_agreement: Mutex::new(None), - round_cache: Arc::new(Mutex::new(RoundCache { - hash: None, - start_round: 0, - })), - round_timeout_multiplier: 10, - key: Arc::new(AuthorityKeyring::One.into()), - factory: DummyFactory - } - } - - fn sign_vote(vote: rhododendron::Vote, key: &ed25519::Pair, parent_hash: H256) -> LocalizedSignature { - match sign_message::(vote.into(), key, parent_hash) { - rhododendron::LocalizedMessage::Vote(vote) => vote.signature, - _ => panic!("signing vote leads to signed vote"), - } - } - - fn from_block_number(num: u64) -> TestHeader { - TestHeader::new( - num, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ) - } - - #[test] - fn future_gets_preempted() { - let client = FakeClient { - authorities: vec![ - AuthorityKeyring::One.into(), - AuthorityKeyring::Two.into(), - AuthorityKeyring::Alice.into(), - AuthorityKeyring::Eve.into(), - ], - imported_heights: Mutex::new(HashSet::new()), - }; - - let service = make_service(client); - - let first = from_block_number(2); - let first_hash = first.hash(); - - let mut second = from_block_number(3); - second.parent_hash = first_hash; - let _second_hash = second.hash(); - - let mut first_bft = service.build_upon(&first, Comms(PhantomData), Comms(PhantomData)).unwrap().unwrap(); - assert!(service.live_agreement.lock().as_ref().unwrap().0 == first); - - let _second_bft = service.build_upon(&second, Comms(PhantomData), Comms(PhantomData)).unwrap(); - assert!(service.live_agreement.lock().as_ref().unwrap().0 != first); - assert!(service.live_agreement.lock().as_ref().unwrap().0 == second); - - // first_bft has been cancelled. need to swap out so we can check it. - let (_tx, mut rx) = oneshot::channel(); - ::std::mem::swap(&mut rx, &mut first_bft.cancel); - - assert!(rx.wait().is_ok()); - } - - #[test] - fn max_faulty() { - assert_eq!(max_faulty_of(3), 0); - assert_eq!(max_faulty_of(4), 1); - assert_eq!(max_faulty_of(100), 33); - assert_eq!(max_faulty_of(0), 0); - assert_eq!(max_faulty_of(11), 3); - assert_eq!(max_faulty_of(99), 32); - } - - #[test] - fn justification_check_works() { - let parent_hash = Default::default(); - let hash = [0xff; 32].into(); - - let authorities = vec![ - AuthorityKeyring::One.into(), - AuthorityKeyring::Two.into(), - AuthorityKeyring::Alice.into(), - AuthorityKeyring::Eve.into(), - ]; - - let authorities_keys = vec![ - AuthorityKeyring::One.into(), - AuthorityKeyring::Two.into(), - AuthorityKeyring::Alice.into(), - AuthorityKeyring::Eve.into(), - ]; - - let unchecked = UncheckedJustification(rhododendron::UncheckedJustification { - digest: hash, - round_number: 1, - signatures: authorities_keys.iter().take(3).map(|key| { - sign_vote(rhododendron::Vote::Commit(1, hash).into(), key, parent_hash) - }).collect(), - }); - - assert!(check_justification::(&authorities, parent_hash, unchecked).is_ok()); - - let unchecked = UncheckedJustification(rhododendron::UncheckedJustification { - digest: hash, - round_number: 0, // wrong round number (vs. the signatures) - signatures: authorities_keys.iter().take(3).map(|key| { - sign_vote(rhododendron::Vote::Commit(1, hash).into(), key, parent_hash) - }).collect(), - }); - - assert!(check_justification::(&authorities, parent_hash, unchecked).is_err()); - - // not enough signatures. - let unchecked = UncheckedJustification(rhododendron::UncheckedJustification { - digest: hash, - round_number: 1, - signatures: authorities_keys.iter().take(2).map(|key| { - sign_vote(rhododendron::Vote::Commit(1, hash).into(), key, parent_hash) - }).collect(), - }); - - assert!(check_justification::(&authorities, parent_hash, unchecked).is_err()); - - // wrong hash. - let unchecked = UncheckedJustification(rhododendron::UncheckedJustification { - digest: [0xfe; 32].into(), - round_number: 1, - signatures: authorities_keys.iter().take(3).map(|key| { - sign_vote(rhododendron::Vote::Commit(1, hash).into(), key, parent_hash) - }).collect(), - }); - - assert!(check_justification::(&authorities, parent_hash, unchecked).is_err()); - } - - #[test] - fn propose_check_works() { - let parent_hash = Default::default(); - - let authorities = vec![ - AuthorityKeyring::Alice.into(), - AuthorityKeyring::Eve.into(), - ]; - - let block = TestBlock { - header: from_block_number(1), - extrinsics: Default::default() - }; - - let proposal = sign_message(rhododendron::Message::Propose(1, block.clone()), &AuthorityKeyring::Alice.pair(), parent_hash);; - if let rhododendron::LocalizedMessage::Propose(proposal) = proposal { - assert!(check_proposal(&authorities, &parent_hash, &proposal).is_ok()); - let mut invalid_round = proposal.clone(); - invalid_round.round_number = 0; - assert!(check_proposal(&authorities, &parent_hash, &invalid_round).is_err()); - let mut invalid_digest = proposal.clone(); - invalid_digest.digest = [0xfe; 32].into(); - assert!(check_proposal(&authorities, &parent_hash, &invalid_digest).is_err()); - } else { - assert!(false); - } - - // Not an authority - let proposal = sign_message::(rhododendron::Message::Propose(1, block), &AuthorityKeyring::Bob.pair(), parent_hash);; - if let rhododendron::LocalizedMessage::Propose(proposal) = proposal { - assert!(check_proposal(&authorities, &parent_hash, &proposal).is_err()); - } else { - assert!(false); - } - } - - #[test] - fn vote_check_works() { - let parent_hash: H256 = Default::default(); - let hash: H256 = [0xff; 32].into(); - - let authorities = vec![ - AuthorityKeyring::Alice.into(), - AuthorityKeyring::Eve.into(), - ]; - - let vote = sign_message::(rhododendron::Message::Vote(rhododendron::Vote::Prepare(1, hash)), &Keyring::Alice.pair(), parent_hash);; - if let rhododendron::LocalizedMessage::Vote(vote) = vote { - assert!(check_vote::(&authorities, &parent_hash, &vote).is_ok()); - let mut invalid_sender = vote.clone(); - invalid_sender.signature.signer = Keyring::Eve.into(); - assert!(check_vote::(&authorities, &parent_hash, &invalid_sender).is_err()); - } else { - assert!(false); - } - - // Not an authority - let vote = sign_message::(rhododendron::Message::Vote(rhododendron::Vote::Prepare(1, hash)), &Keyring::Bob.pair(), parent_hash);; - if let rhododendron::LocalizedMessage::Vote(vote) = vote { - assert!(check_vote::(&authorities, &parent_hash, &vote).is_err()); - } else { - assert!(false); - } - } - - #[test] - fn drop_bft_future_does_not_deadlock() { - let client = FakeClient { - authorities: vec![ - AuthorityKeyring::One.into(), - AuthorityKeyring::Two.into(), - AuthorityKeyring::Alice.into(), - AuthorityKeyring::Eve.into(), - ], - imported_heights: Mutex::new(HashSet::new()), - }; - - let service = make_service(client); - - let first = from_block_number(2); - let first_hash = first.hash(); - - let mut second = from_block_number(3); - second.parent_hash = first_hash; - - let _ = service.build_upon(&first, Comms(PhantomData), Comms(PhantomData)).unwrap(); - assert!(service.live_agreement.lock().as_ref().unwrap().0 == first); - service.live_agreement.lock().take(); - } - - #[test] - fn bft_can_build_though_skipped() { - let client = FakeClient { - authorities: vec![ - AuthorityKeyring::One.into(), - AuthorityKeyring::Two.into(), - AuthorityKeyring::Alice.into(), - AuthorityKeyring::Eve.into(), - ], - imported_heights: Mutex::new(HashSet::new()), - }; - - let service = make_service(client); - - let first = from_block_number(2); - let first_hash = first.hash(); - - let mut second = from_block_number(3); - second.parent_hash = first_hash; - - let mut third = from_block_number(4); - third.parent_hash = second.hash(); - - let _ = service.build_upon(&first, Comms(PhantomData), Comms(PhantomData)).unwrap(); - assert!(service.live_agreement.lock().as_ref().unwrap().0 == first); - // BFT has not seen second, but will move forward on third - service.build_upon(&third, Comms(PhantomData), Comms(PhantomData)).unwrap(); - assert!(service.live_agreement.lock().as_ref().unwrap().0 == third); - - // but we are not walking backwards - service.build_upon(&second, Comms(PhantomData), Comms(PhantomData)).unwrap(); - assert!(service.live_agreement.lock().as_ref().unwrap().0 == third); - } + use super::*; + use std::collections::HashSet; + use std::marker::PhantomData; + + use keyring::AuthorityKeyring; + use primitives::H256; + use runtime_primitives::testing::{Block as GenericTestBlock, Header as TestHeader}; + + type TestBlock = GenericTestBlock<()>; + + struct FakeClient { + authorities: Vec, + imported_heights: Mutex>, + } + + impl BlockImport for FakeClient { + type Error = Error; + + fn import_block( + &self, + block: ImportBlock, + _new_authorities: Option>, + ) -> Result { + assert!(self.imported_heights.lock().insert(block.header.number)); + Ok(ImportResult::Queued) + } + } + + impl Authorities for FakeClient { + type Error = Error; + + fn authorities(&self, _at: &BlockId) -> Result, Self::Error> { + Ok(self.authorities.clone()) + } + } + + // "black hole" output sink. + struct Comms(::std::marker::PhantomData); + + impl Sink for Comms { + type SinkItem = Communication; + type SinkError = E; + + fn start_send( + &mut self, + _item: Communication, + ) -> ::futures::StartSend, E> { + Ok(::futures::AsyncSink::Ready) + } + + fn poll_complete(&mut self) -> ::futures::Poll<(), E> { + Ok(Async::Ready(())) + } + } + + impl Stream for Comms { + type Item = Communication; + type Error = E; + + fn poll(&mut self) -> ::futures::Poll, Self::Error> { + Ok(::futures::Async::NotReady) + } + } + + struct DummyFactory; + struct DummyProposer(u64); + + impl Environment for DummyFactory { + type Proposer = DummyProposer; + type Error = Error; + + fn init( + &self, + parent_header: &TestHeader, + _authorities: &[AuthorityId], + _sign_with: Arc, + ) -> Result { + Ok(DummyProposer(parent_header.number + 1)) + } + } + + impl BaseProposer for DummyProposer { + type Error = Error; + type Create = Result; + type Evaluate = Result; + + fn propose(&self) -> Result { + Ok(TestBlock { + header: from_block_number(self.0), + extrinsics: Default::default(), + }) + } + + fn evaluate(&self, proposal: &TestBlock) -> Result { + Ok(proposal.header.number == self.0) + } + } + + impl LocalProposer for DummyProposer { + fn import_misbehavior(&self, _misbehavior: Vec<(AuthorityId, Misbehavior)>) {} + + fn round_proposer(&self, round_number: u32, authorities: &[AuthorityId]) -> AuthorityId { + authorities[(round_number as usize) % authorities.len()].clone() + } + } + + fn make_service(client: FakeClient) -> BftService { + BftService { + client: Arc::new(client), + live_agreement: Mutex::new(None), + round_cache: Arc::new(Mutex::new(RoundCache { + hash: None, + start_round: 0, + })), + round_timeout_multiplier: 10, + key: Arc::new(AuthorityKeyring::One.into()), + factory: DummyFactory, + } + } + + fn sign_vote( + vote: rhododendron::Vote, + key: &ed25519::Pair, + parent_hash: H256, + ) -> LocalizedSignature { + match sign_message::(vote.into(), key, parent_hash) { + rhododendron::LocalizedMessage::Vote(vote) => vote.signature, + _ => panic!("signing vote leads to signed vote"), + } + } + + fn from_block_number(num: u64) -> TestHeader { + TestHeader::new( + num, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ) + } + + #[test] + fn future_gets_preempted() { + let client = FakeClient { + authorities: vec![ + AuthorityKeyring::One.into(), + AuthorityKeyring::Two.into(), + AuthorityKeyring::Alice.into(), + AuthorityKeyring::Eve.into(), + ], + imported_heights: Mutex::new(HashSet::new()), + }; + + let service = make_service(client); + + let first = from_block_number(2); + let first_hash = first.hash(); + + let mut second = from_block_number(3); + second.parent_hash = first_hash; + let _second_hash = second.hash(); + + let mut first_bft = service + .build_upon(&first, Comms(PhantomData), Comms(PhantomData)) + .unwrap() + .unwrap(); + assert!(service.live_agreement.lock().as_ref().unwrap().0 == first); + + let _second_bft = service + .build_upon(&second, Comms(PhantomData), Comms(PhantomData)) + .unwrap(); + assert!(service.live_agreement.lock().as_ref().unwrap().0 != first); + assert!(service.live_agreement.lock().as_ref().unwrap().0 == second); + + // first_bft has been cancelled. need to swap out so we can check it. + let (_tx, mut rx) = oneshot::channel(); + ::std::mem::swap(&mut rx, &mut first_bft.cancel); + + assert!(rx.wait().is_ok()); + } + + #[test] + fn max_faulty() { + assert_eq!(max_faulty_of(3), 0); + assert_eq!(max_faulty_of(4), 1); + assert_eq!(max_faulty_of(100), 33); + assert_eq!(max_faulty_of(0), 0); + assert_eq!(max_faulty_of(11), 3); + assert_eq!(max_faulty_of(99), 32); + } + + #[test] + fn justification_check_works() { + let parent_hash = Default::default(); + let hash = [0xff; 32].into(); + + let authorities = vec![ + AuthorityKeyring::One.into(), + AuthorityKeyring::Two.into(), + AuthorityKeyring::Alice.into(), + AuthorityKeyring::Eve.into(), + ]; + + let authorities_keys = vec![ + AuthorityKeyring::One.into(), + AuthorityKeyring::Two.into(), + AuthorityKeyring::Alice.into(), + AuthorityKeyring::Eve.into(), + ]; + + let unchecked = UncheckedJustification(rhododendron::UncheckedJustification { + digest: hash, + round_number: 1, + signatures: authorities_keys + .iter() + .take(3) + .map(|key| sign_vote(rhododendron::Vote::Commit(1, hash).into(), key, parent_hash)) + .collect(), + }); + + assert!(check_justification::(&authorities, parent_hash, unchecked).is_ok()); + + let unchecked = UncheckedJustification(rhododendron::UncheckedJustification { + digest: hash, + round_number: 0, // wrong round number (vs. the signatures) + signatures: authorities_keys + .iter() + .take(3) + .map(|key| sign_vote(rhododendron::Vote::Commit(1, hash).into(), key, parent_hash)) + .collect(), + }); + + assert!(check_justification::(&authorities, parent_hash, unchecked).is_err()); + + // not enough signatures. + let unchecked = UncheckedJustification(rhododendron::UncheckedJustification { + digest: hash, + round_number: 1, + signatures: authorities_keys + .iter() + .take(2) + .map(|key| sign_vote(rhododendron::Vote::Commit(1, hash).into(), key, parent_hash)) + .collect(), + }); + + assert!(check_justification::(&authorities, parent_hash, unchecked).is_err()); + + // wrong hash. + let unchecked = UncheckedJustification(rhododendron::UncheckedJustification { + digest: [0xfe; 32].into(), + round_number: 1, + signatures: authorities_keys + .iter() + .take(3) + .map(|key| sign_vote(rhododendron::Vote::Commit(1, hash).into(), key, parent_hash)) + .collect(), + }); + + assert!(check_justification::(&authorities, parent_hash, unchecked).is_err()); + } + + #[test] + fn propose_check_works() { + let parent_hash = Default::default(); + + let authorities = vec![AuthorityKeyring::Alice.into(), AuthorityKeyring::Eve.into()]; + + let block = TestBlock { + header: from_block_number(1), + extrinsics: Default::default(), + }; + + let proposal = sign_message( + rhododendron::Message::Propose(1, block.clone()), + &AuthorityKeyring::Alice.pair(), + parent_hash, + );; + if let rhododendron::LocalizedMessage::Propose(proposal) = proposal { + assert!(check_proposal(&authorities, &parent_hash, &proposal).is_ok()); + let mut invalid_round = proposal.clone(); + invalid_round.round_number = 0; + assert!(check_proposal(&authorities, &parent_hash, &invalid_round).is_err()); + let mut invalid_digest = proposal.clone(); + invalid_digest.digest = [0xfe; 32].into(); + assert!(check_proposal(&authorities, &parent_hash, &invalid_digest).is_err()); + } else { + assert!(false); + } + + // Not an authority + let proposal = sign_message::( + rhododendron::Message::Propose(1, block), + &AuthorityKeyring::Bob.pair(), + parent_hash, + );; + if let rhododendron::LocalizedMessage::Propose(proposal) = proposal { + assert!(check_proposal(&authorities, &parent_hash, &proposal).is_err()); + } else { + assert!(false); + } + } + + #[test] + fn vote_check_works() { + let parent_hash: H256 = Default::default(); + let hash: H256 = [0xff; 32].into(); + + let authorities = vec![AuthorityKeyring::Alice.into(), AuthorityKeyring::Eve.into()]; + + let vote = sign_message::( + rhododendron::Message::Vote(rhododendron::Vote::Prepare(1, hash)), + &Keyring::Alice.pair(), + parent_hash, + );; + if let rhododendron::LocalizedMessage::Vote(vote) = vote { + assert!(check_vote::(&authorities, &parent_hash, &vote).is_ok()); + let mut invalid_sender = vote.clone(); + invalid_sender.signature.signer = Keyring::Eve.into(); + assert!(check_vote::(&authorities, &parent_hash, &invalid_sender).is_err()); + } else { + assert!(false); + } + + // Not an authority + let vote = sign_message::( + rhododendron::Message::Vote(rhododendron::Vote::Prepare(1, hash)), + &Keyring::Bob.pair(), + parent_hash, + );; + if let rhododendron::LocalizedMessage::Vote(vote) = vote { + assert!(check_vote::(&authorities, &parent_hash, &vote).is_err()); + } else { + assert!(false); + } + } + + #[test] + fn drop_bft_future_does_not_deadlock() { + let client = FakeClient { + authorities: vec![ + AuthorityKeyring::One.into(), + AuthorityKeyring::Two.into(), + AuthorityKeyring::Alice.into(), + AuthorityKeyring::Eve.into(), + ], + imported_heights: Mutex::new(HashSet::new()), + }; + + let service = make_service(client); + + let first = from_block_number(2); + let first_hash = first.hash(); + + let mut second = from_block_number(3); + second.parent_hash = first_hash; + + let _ = service + .build_upon(&first, Comms(PhantomData), Comms(PhantomData)) + .unwrap(); + assert!(service.live_agreement.lock().as_ref().unwrap().0 == first); + service.live_agreement.lock().take(); + } + + #[test] + fn bft_can_build_though_skipped() { + let client = FakeClient { + authorities: vec![ + AuthorityKeyring::One.into(), + AuthorityKeyring::Two.into(), + AuthorityKeyring::Alice.into(), + AuthorityKeyring::Eve.into(), + ], + imported_heights: Mutex::new(HashSet::new()), + }; + + let service = make_service(client); + + let first = from_block_number(2); + let first_hash = first.hash(); + + let mut second = from_block_number(3); + second.parent_hash = first_hash; + + let mut third = from_block_number(4); + third.parent_hash = second.hash(); + + let _ = service + .build_upon(&first, Comms(PhantomData), Comms(PhantomData)) + .unwrap(); + assert!(service.live_agreement.lock().as_ref().unwrap().0 == first); + // BFT has not seen second, but will move forward on third + service + .build_upon(&third, Comms(PhantomData), Comms(PhantomData)) + .unwrap(); + assert!(service.live_agreement.lock().as_ref().unwrap().0 == third); + + // but we are not walking backwards + service + .build_upon(&second, Comms(PhantomData), Comms(PhantomData)) + .unwrap(); + assert!(service.live_agreement.lock().as_ref().unwrap().0 == third); + } } diff --git a/core/consensus/rhd/src/service.rs b/core/consensus/rhd/src/service.rs index e2858f767a..97dea09d6a 100644 --- a/core/consensus/rhd/src/service.rs +++ b/core/consensus/rhd/src/service.rs @@ -16,27 +16,27 @@ //! Consensus service. +use std::sync::Arc; /// Consensus service. A long running service that manages BFT agreement /// the network. use std::thread; use std::time::{Duration, Instant}; -use std::sync::Arc; -use client::{BlockchainEvents, ChainHead, BlockBody}; +use client::{BlockBody, BlockchainEvents, ChainHead}; use futures::prelude::*; -use transaction_pool::txpool::{Pool as TransactionPool, ChainApi as PoolChainApi}; -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, BlockNumberToHash}; +use runtime_primitives::traits::{Block as BlockT, BlockNumberToHash, Header as HeaderT}; +use transaction_pool::txpool::{ChainApi as PoolChainApi, Pool as TransactionPool}; use tokio::executor::current_thread::TaskExecutor as LocalThreadHandle; -use tokio::runtime::TaskExecutor as ThreadPoolHandle; use tokio::runtime::current_thread::Runtime as LocalRuntime; +use tokio::runtime::TaskExecutor as ThreadPoolHandle; use tokio::timer::Interval; -use parking_lot::RwLock; use consensus::offline_tracker::OfflineTracker; +use parking_lot::RwLock; -use super::{Network, ProposerFactory, AuthoringApi}; -use {consensus, primitives, ed25519, error, BftService, LocalProposer}; +use super::{AuthoringApi, Network, ProposerFactory}; +use {consensus, ed25519, error, primitives, BftService, LocalProposer}; const TIMER_DELAY_MS: u64 = 5000; const TIMER_INTERVAL_MS: u64 = 500; @@ -44,138 +44,149 @@ const TIMER_INTERVAL_MS: u64 = 500; // spin up an instance of BFT agreement on the current thread's executor. // panics if there is no current thread executor. fn start_bft( - header: ::Header, - bft_service: Arc>, + header: ::Header, + bft_service: Arc>, ) where - F: consensus::Environment + 'static, - C: consensus::BlockImport + consensus::Authorities + 'static, - F::Error: ::std::fmt::Debug, - >::Error: ::std::fmt::Display + Into, - >::Proposer : LocalProposer, - >::Error: ::std::fmt::Display, - Block: BlockT, + F: consensus::Environment + 'static, + C: consensus::BlockImport + consensus::Authorities + 'static, + F::Error: ::std::fmt::Debug, + >::Error: ::std::fmt::Display + Into, + >::Proposer: LocalProposer, + >::Error: ::std::fmt::Display, + Block: BlockT, { - let mut handle = LocalThreadHandle::current(); - match bft_service.build_upon(&header) { - Ok(Some(bft_work)) => if let Err(e) = handle.spawn_local(Box::new(bft_work)) { - warn!(target: "bft", "Couldn't initialize BFT agreement: {:?}", e); - } - Ok(None) => trace!(target: "bft", "Could not start agreement on top of {}", header.hash()), - Err(e) => warn!(target: "bft", "BFT agreement error: {}", e), - } + let mut handle = LocalThreadHandle::current(); + match bft_service.build_upon(&header) { + Ok(Some(bft_work)) => { + if let Err(e) = handle.spawn_local(Box::new(bft_work)) { + warn!(target: "bft", "Couldn't initialize BFT agreement: {:?}", e); + } + } + Ok(None) => trace!(target: "bft", "Could not start agreement on top of {}", header.hash()), + Err(e) => warn!(target: "bft", "BFT agreement error: {}", e), + } } /// Consensus service. Starts working when created. pub struct Service { - thread: Option>, - exit_signal: Option<::exit_future::Signal>, + thread: Option>, + exit_signal: Option<::exit_future::Signal>, } impl Service { - /// Create and start a new instance. - pub fn new( - client: Arc, - api: Arc, - network: N, - transaction_pool: Arc>, - thread_pool: ThreadPoolHandle, - key: ed25519::Pair, - block_delay: u64, - ) -> Service - where - error::Error: From<::Error>, - A: AuthoringApi + BlockNumberToHash + 'static, - P: PoolChainApi::Block> + 'static, - C: BlockchainEvents<::Block> - + ChainHead<::Block> - + BlockBody<::Block>, - C: consensus::BlockImport<::Block> - + consensus::Authorities<::Block> + Send + Sync + 'static, - primitives::H256: From<<::Block as BlockT>::Hash>, - <::Block as BlockT>::Hash: PartialEq + PartialEq, - N: Network::Block> + Send + 'static, - { - - let (signal, exit) = ::exit_future::signal(); - let thread = thread::spawn(move || { - let mut runtime = LocalRuntime::new().expect("Could not create local runtime"); - let key = Arc::new(key); - - let factory = ProposerFactory { - client: api.clone(), - transaction_pool: transaction_pool.clone(), - network, - handle: thread_pool.clone(), - offline: Arc::new(RwLock::new(OfflineTracker::new())), - force_delay: block_delay, - }; - let bft_service = Arc::new(BftService::new(client.clone(), key, factory)); - - let notifications = { - let client = client.clone(); - let bft_service = bft_service.clone(); - - client.import_notification_stream().for_each(move |notification| { - if notification.is_new_best { - start_bft(notification.header, bft_service.clone()); - } - Ok(()) - }) - }; - - let interval = Interval::new( - Instant::now() + Duration::from_millis(TIMER_DELAY_MS), - Duration::from_millis(TIMER_INTERVAL_MS), - ); - - let mut prev_best = match client.best_block_header() { - Ok(header) => header.hash(), - Err(e) => { - warn!("Cant's start consensus service. Error reading best block header: {:?}", e); - return; - } - }; - - let timed = { - let c = client.clone(); - let s = bft_service.clone(); - - interval.map_err(|e| debug!(target: "bft", "Timer error: {:?}", e)).for_each(move |_| { - if let Ok(best_block) = c.best_block_header() { - let hash = best_block.hash(); - - if hash == prev_best { - debug!(target: "bft", "Starting consensus round after a timeout"); - start_bft(best_block, s.clone()); - } - prev_best = hash; - } - Ok(()) - }) - }; - - runtime.spawn(notifications); - runtime.spawn(timed); - - if let Err(e) = runtime.block_on(exit) { - debug!("BFT event loop error {:?}", e); - } - }); - Service { - thread: Some(thread), - exit_signal: Some(signal), - } - } + /// Create and start a new instance. + pub fn new( + client: Arc, + api: Arc, + network: N, + transaction_pool: Arc>, + thread_pool: ThreadPoolHandle, + key: ed25519::Pair, + block_delay: u64, + ) -> Service + where + error::Error: From<::Error>, + A: AuthoringApi + BlockNumberToHash + 'static, + P: PoolChainApi::Block> + 'static, + C: BlockchainEvents<::Block> + + ChainHead<::Block> + + BlockBody<::Block>, + C: consensus::BlockImport<::Block> + + consensus::Authorities<::Block> + + Send + + Sync + + 'static, + primitives::H256: From<<::Block as BlockT>::Hash>, + <::Block as BlockT>::Hash: PartialEq + PartialEq, + N: Network::Block> + Send + 'static, + { + let (signal, exit) = ::exit_future::signal(); + let thread = thread::spawn(move || { + let mut runtime = LocalRuntime::new().expect("Could not create local runtime"); + let key = Arc::new(key); + + let factory = ProposerFactory { + client: api.clone(), + transaction_pool: transaction_pool.clone(), + network, + handle: thread_pool.clone(), + offline: Arc::new(RwLock::new(OfflineTracker::new())), + force_delay: block_delay, + }; + let bft_service = Arc::new(BftService::new(client.clone(), key, factory)); + + let notifications = { + let client = client.clone(); + let bft_service = bft_service.clone(); + + client + .import_notification_stream() + .for_each(move |notification| { + if notification.is_new_best { + start_bft(notification.header, bft_service.clone()); + } + Ok(()) + }) + }; + + let interval = Interval::new( + Instant::now() + Duration::from_millis(TIMER_DELAY_MS), + Duration::from_millis(TIMER_INTERVAL_MS), + ); + + let mut prev_best = match client.best_block_header() { + Ok(header) => header.hash(), + Err(e) => { + warn!( + "Cant's start consensus service. Error reading best block header: {:?}", + e + ); + return; + } + }; + + let timed = { + let c = client.clone(); + let s = bft_service.clone(); + + interval + .map_err(|e| debug!(target: "bft", "Timer error: {:?}", e)) + .for_each(move |_| { + if let Ok(best_block) = c.best_block_header() { + let hash = best_block.hash(); + + if hash == prev_best { + debug!(target: "bft", "Starting consensus round after a timeout"); + start_bft(best_block, s.clone()); + } + prev_best = hash; + } + Ok(()) + }) + }; + + runtime.spawn(notifications); + runtime.spawn(timed); + + if let Err(e) = runtime.block_on(exit) { + debug!("BFT event loop error {:?}", e); + } + }); + Service { + thread: Some(thread), + exit_signal: Some(signal), + } + } } impl Drop for Service { - fn drop(&mut self) { - if let Some(signal) = self.exit_signal.take() { - signal.fire(); - } - - if let Some(thread) = self.thread.take() { - thread.join().expect("The service thread has panicked"); - } - } + fn drop(&mut self) { + if let Some(signal) = self.exit_signal.take() { + signal.fire(); + } + + if let Some(thread) = self.thread.take() { + thread.join().expect("The service thread has panicked"); + } + } } diff --git a/core/executor/src/allocator.rs b/core/executor/src/allocator.rs index 4b3f7d3219..b78228ad54 100644 --- a/core/executor/src/allocator.rs +++ b/core/executor/src/allocator.rs @@ -19,9 +19,9 @@ use crate::wasm_utils::UserError; use log::trace; +use wasmi::memory_units::Bytes; use wasmi::Error; use wasmi::MemoryRef; -use wasmi::memory_units::Bytes; // The pointers need to be aligned to 8 bytes. const ALIGNMENT: u32 = 8; @@ -38,469 +38,476 @@ pub const OUT_OF_SPACE: &str = "Requested allocation size does not fit into rema pub const REQUESTED_SIZE_TOO_LARGE: &str = "Requested size to allocate is too large"; pub struct FreeingBumpHeapAllocator { - bumper: u32, - heads: [u32; N], - heap: MemoryRef, - max_heap_size: u32, - ptr_offset: u32, - total_size: u32, + bumper: u32, + heads: [u32; N], + heap: MemoryRef, + max_heap_size: u32, + ptr_offset: u32, + total_size: u32, } impl FreeingBumpHeapAllocator { - - /// Creates a new allocation heap which follows a freeing-bump strategy. - /// The maximum size which can be allocated at once is 16 MiB. - /// - /// # Arguments - /// - /// * `ptr_offset` - The pointers returned by `allocate()` start from this - /// offset on. The pointer offset needs to be aligned to a multiple of 8, - /// hence a padding might be added to align `ptr_offset` properly. - /// - /// * `heap_size` - The size available to this heap instance (in bytes) for - /// allocating memory. - /// - /// * `heap` - A `MemoryRef` to the available `MemoryInstance` which is - /// used as the heap. - /// - pub fn new(mem: MemoryRef) -> Self { - let current_size: Bytes = mem.current_size().into(); - let current_size = current_size.0 as u32; - let used_size = mem.used_size().0 as u32; - let heap_size = current_size - used_size; - - let mut ptr_offset = used_size; - let padding = ptr_offset % ALIGNMENT; - if padding != 0 { - ptr_offset += ALIGNMENT - padding; - } - - FreeingBumpHeapAllocator { - bumper: 0, - heads: [0; N], - heap: mem, - max_heap_size: heap_size, - ptr_offset: ptr_offset, - total_size: 0, - } - } - - /// Gets requested number of bytes to allocate and returns a pointer. - /// The maximum size which can be allocated at once is 16 MiB. - pub fn allocate(&mut self, size: u32) -> Result { - if size > MAX_POSSIBLE_ALLOCATION { - return Err(UserError(REQUESTED_SIZE_TOO_LARGE)); - } - - let size = size.max(8); - let item_size = size.next_power_of_two(); - if item_size + 8 + self.total_size > self.max_heap_size { - return Err(UserError(OUT_OF_SPACE)); - } - - let list_index = (item_size.trailing_zeros() - 3) as usize; - let ptr: u32 = if self.heads[list_index] != 0 { - // Something from the free list - let item = self.heads[list_index]; - let four_bytes = self.get_heap_4bytes(item) - .map_err(|_| UserError("Unable to get bytes at pointer taken from list of free items"))?; - self.heads[list_index] = FreeingBumpHeapAllocator::le_bytes_to_u32(four_bytes); - item + 8 - } else { - // Nothing to be freed. Bump. - self.bump(item_size + 8) + 8 - }; - - for i in 1..8 { - self.set_heap(ptr - i, 255) - .map_err(|_| UserError("Unable to successively write bytes into heap at pointer prefix"))?; - } - - self.set_heap(ptr - 8, list_index as u8) - .map_err(|_| UserError("Unable to write byte into heap at pointer prefix"))?; - - self.total_size = self.total_size + item_size + 8; - trace!(target: "wasm-heap", "Heap size is {} bytes after allocation", self.total_size); - - Ok(self.ptr_offset + ptr) - } - - /// Deallocates the space which was allocated for a pointer. - pub fn deallocate(&mut self, ptr: u32) -> Result<(), UserError> { - let ptr = ptr - self.ptr_offset; - if ptr < 8 { - return Err(UserError("Invalid pointer for deallocation")); - } - - let list_index = self.get_heap_byte(ptr - 8) - .map_err(|_| UserError("Unable to access pointer prefix"))? as usize; - for i in 1..8 { - let heap_byte = self.get_heap_byte(ptr - i) - .map_err(|_| UserError("Unable to write single bytes into heap at pointer"))?; - debug_assert!(heap_byte == 255) - } - let tail = self.heads[list_index]; - self.heads[list_index] = ptr - 8; - - let mut slice = self.get_heap_4bytes(ptr - 8) - .map_err(|_| UserError("Unable to get 4 bytes from heap at pointer prefix"))?; - FreeingBumpHeapAllocator::write_u32_into_le_bytes(tail, &mut slice); - self.set_heap_4bytes(ptr - 8, slice) - .map_err(|_| UserError("Unable to write 4 bytes into heap at pointer prefix"))?; - - let item_size = FreeingBumpHeapAllocator::get_item_size_from_index(list_index); - self.total_size = self.total_size.checked_sub(item_size as u32 + 8) - .ok_or_else(|| UserError("Unable to subtract from total heap size without overflow"))?; - trace!(target: "wasm-heap", "Heap size is {} bytes after deallocation", self.total_size); - - Ok(()) - } - - fn bump(&mut self, n: u32) -> u32 { - let res = self.bumper; - self.bumper += n; - res - } - - fn le_bytes_to_u32(arr: [u8; 4]) -> u32 { - let bytes = [arr[0], arr[1], arr[2], arr[3]]; - unsafe { std::mem::transmute::<[u8; 4], u32>(bytes) }.to_le() - } - - fn write_u32_into_le_bytes(bytes: u32, slice: &mut [u8]) { - let bytes: [u8; 4] = unsafe { std::mem::transmute::(bytes.to_le()) }; - for i in 0..4 { slice[i] = bytes[i]; } - } - - fn get_item_size_from_index(index: usize) -> usize { - // we shift 1 by three places, since the first possible item size is 8 - 1 << 3 << index - } - - fn get_heap_4bytes(&mut self, ptr: u32) -> Result<[u8; 4], Error> { - let mut arr = [0u8; 4]; - self.heap.get_into(self.ptr_offset + ptr, &mut arr)?; - Ok(arr) - } - - fn get_heap_byte(&mut self, ptr: u32) -> Result { - let mut arr = [0u8; 1]; - self.heap.get_into(self.ptr_offset + ptr, &mut arr)?; - Ok(arr[0]) - } - - fn set_heap(&mut self, ptr: u32, value: u8) -> Result<(), Error> { - self.heap.set(self.ptr_offset + ptr, &[value]) - } - - fn set_heap_4bytes(&mut self, ptr: u32, value: [u8; 4]) -> Result<(), Error> { - self.heap.set(self.ptr_offset + ptr, &value) - } - + /// Creates a new allocation heap which follows a freeing-bump strategy. + /// The maximum size which can be allocated at once is 16 MiB. + /// + /// # Arguments + /// + /// * `ptr_offset` - The pointers returned by `allocate()` start from this + /// offset on. The pointer offset needs to be aligned to a multiple of 8, + /// hence a padding might be added to align `ptr_offset` properly. + /// + /// * `heap_size` - The size available to this heap instance (in bytes) for + /// allocating memory. + /// + /// * `heap` - A `MemoryRef` to the available `MemoryInstance` which is + /// used as the heap. + /// + pub fn new(mem: MemoryRef) -> Self { + let current_size: Bytes = mem.current_size().into(); + let current_size = current_size.0 as u32; + let used_size = mem.used_size().0 as u32; + let heap_size = current_size - used_size; + + let mut ptr_offset = used_size; + let padding = ptr_offset % ALIGNMENT; + if padding != 0 { + ptr_offset += ALIGNMENT - padding; + } + + FreeingBumpHeapAllocator { + bumper: 0, + heads: [0; N], + heap: mem, + max_heap_size: heap_size, + ptr_offset: ptr_offset, + total_size: 0, + } + } + + /// Gets requested number of bytes to allocate and returns a pointer. + /// The maximum size which can be allocated at once is 16 MiB. + pub fn allocate(&mut self, size: u32) -> Result { + if size > MAX_POSSIBLE_ALLOCATION { + return Err(UserError(REQUESTED_SIZE_TOO_LARGE)); + } + + let size = size.max(8); + let item_size = size.next_power_of_two(); + if item_size + 8 + self.total_size > self.max_heap_size { + return Err(UserError(OUT_OF_SPACE)); + } + + let list_index = (item_size.trailing_zeros() - 3) as usize; + let ptr: u32 = if self.heads[list_index] != 0 { + // Something from the free list + let item = self.heads[list_index]; + let four_bytes = self.get_heap_4bytes(item).map_err(|_| { + UserError("Unable to get bytes at pointer taken from list of free items") + })?; + self.heads[list_index] = FreeingBumpHeapAllocator::le_bytes_to_u32(four_bytes); + item + 8 + } else { + // Nothing to be freed. Bump. + self.bump(item_size + 8) + 8 + }; + + for i in 1..8 { + self.set_heap(ptr - i, 255).map_err(|_| { + UserError("Unable to successively write bytes into heap at pointer prefix") + })?; + } + + self.set_heap(ptr - 8, list_index as u8) + .map_err(|_| UserError("Unable to write byte into heap at pointer prefix"))?; + + self.total_size = self.total_size + item_size + 8; + trace!(target: "wasm-heap", "Heap size is {} bytes after allocation", self.total_size); + + Ok(self.ptr_offset + ptr) + } + + /// Deallocates the space which was allocated for a pointer. + pub fn deallocate(&mut self, ptr: u32) -> Result<(), UserError> { + let ptr = ptr - self.ptr_offset; + if ptr < 8 { + return Err(UserError("Invalid pointer for deallocation")); + } + + let list_index = + self.get_heap_byte(ptr - 8) + .map_err(|_| UserError("Unable to access pointer prefix"))? as usize; + for i in 1..8 { + let heap_byte = self + .get_heap_byte(ptr - i) + .map_err(|_| UserError("Unable to write single bytes into heap at pointer"))?; + debug_assert!(heap_byte == 255) + } + let tail = self.heads[list_index]; + self.heads[list_index] = ptr - 8; + + let mut slice = self + .get_heap_4bytes(ptr - 8) + .map_err(|_| UserError("Unable to get 4 bytes from heap at pointer prefix"))?; + FreeingBumpHeapAllocator::write_u32_into_le_bytes(tail, &mut slice); + self.set_heap_4bytes(ptr - 8, slice) + .map_err(|_| UserError("Unable to write 4 bytes into heap at pointer prefix"))?; + + let item_size = FreeingBumpHeapAllocator::get_item_size_from_index(list_index); + self.total_size = self + .total_size + .checked_sub(item_size as u32 + 8) + .ok_or_else(|| UserError("Unable to subtract from total heap size without overflow"))?; + trace!(target: "wasm-heap", "Heap size is {} bytes after deallocation", self.total_size); + + Ok(()) + } + + fn bump(&mut self, n: u32) -> u32 { + let res = self.bumper; + self.bumper += n; + res + } + + fn le_bytes_to_u32(arr: [u8; 4]) -> u32 { + let bytes = [arr[0], arr[1], arr[2], arr[3]]; + unsafe { std::mem::transmute::<[u8; 4], u32>(bytes) }.to_le() + } + + fn write_u32_into_le_bytes(bytes: u32, slice: &mut [u8]) { + let bytes: [u8; 4] = unsafe { std::mem::transmute::(bytes.to_le()) }; + for i in 0..4 { + slice[i] = bytes[i]; + } + } + + fn get_item_size_from_index(index: usize) -> usize { + // we shift 1 by three places, since the first possible item size is 8 + 1 << 3 << index + } + + fn get_heap_4bytes(&mut self, ptr: u32) -> Result<[u8; 4], Error> { + let mut arr = [0u8; 4]; + self.heap.get_into(self.ptr_offset + ptr, &mut arr)?; + Ok(arr) + } + + fn get_heap_byte(&mut self, ptr: u32) -> Result { + let mut arr = [0u8; 1]; + self.heap.get_into(self.ptr_offset + ptr, &mut arr)?; + Ok(arr[0]) + } + + fn set_heap(&mut self, ptr: u32, value: u8) -> Result<(), Error> { + self.heap.set(self.ptr_offset + ptr, &[value]) + } + + fn set_heap_4bytes(&mut self, ptr: u32, value: [u8; 4]) -> Result<(), Error> { + self.heap.set(self.ptr_offset + ptr, &value) + } } #[cfg(test)] mod tests { - use super::*; - use wasmi::MemoryInstance; - use wasmi::memory_units::*; - - const PAGE_SIZE: u32 = 65536; - - fn set_offset(mem: MemoryRef, offset: usize) { - let offset: Vec = vec![255; offset]; - mem.set(0, &offset).unwrap(); - } - - #[test] - fn should_allocate_properly() { - // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); - let mut heap = FreeingBumpHeapAllocator::new(mem); - - // when - let ptr = heap.allocate(1).unwrap(); - - // then - assert_eq!(ptr, 8); - } - - #[test] - fn should_always_align_pointers_to_multiples_of_8() { - // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); - set_offset(mem.clone(), 13); - let mut heap = FreeingBumpHeapAllocator::new(mem); - - // when - let ptr = heap.allocate(1).unwrap(); - - // then - // the pointer must start at the next multiple of 8 from 13 - // + the prefix of 8 bytes. - assert_eq!(ptr, 24); - } - - #[test] - fn should_increment_pointers_properly() { - // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); - let mut heap = FreeingBumpHeapAllocator::new(mem); - - // when - let ptr1 = heap.allocate(1).unwrap(); - let ptr2 = heap.allocate(9).unwrap(); - let ptr3 = heap.allocate(1).unwrap(); - - // then - // a prefix of 8 bytes is prepended to each pointer - assert_eq!(ptr1, 8); - - // the prefix of 8 bytes + the content of ptr1 padded to the lowest possible - // item size of 8 bytes + the prefix of ptr1 - assert_eq!(ptr2, 24); - - // ptr2 + its content of 16 bytes + the prefix of 8 bytes - assert_eq!(ptr3, 24 + 16 + 8); - } - - #[test] - fn should_free_properly() { - // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); - let mut heap = FreeingBumpHeapAllocator::new(mem); - let ptr1 = heap.allocate(1).unwrap(); - // the prefix of 8 bytes is prepended to the pointer - assert_eq!(ptr1, 8); - - let ptr2 = heap.allocate(1).unwrap(); - // the prefix of 8 bytes + the content of ptr 1 is prepended to the pointer - assert_eq!(ptr2, 24); - - // when - heap.deallocate(ptr2).unwrap(); - - // then - // then the heads table should contain a pointer to the - // prefix of ptr2 in the leftmost entry - assert_eq!(heap.heads[0], ptr2 - 8); - } - - #[test] - fn should_deallocate_and_reallocate_properly() { - // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); - set_offset(mem.clone(), 13); - let padded_offset = 16; - let mut heap = FreeingBumpHeapAllocator::new(mem); - - let ptr1 = heap.allocate(1).unwrap(); - // the prefix of 8 bytes is prepended to the pointer - assert_eq!(ptr1, padded_offset + 8); - - let ptr2 = heap.allocate(9).unwrap(); - // the padded_offset + the previously allocated ptr (8 bytes prefix + - // 8 bytes content) + the prefix of 8 bytes which is prepended to the - // current pointer - assert_eq!(ptr2, padded_offset + 16 + 8); - - // when - heap.deallocate(ptr2).unwrap(); - let ptr3 = heap.allocate(9).unwrap(); - - // then - // should have re-allocated - assert_eq!(ptr3, padded_offset + 16 + 8); - assert_eq!(heap.heads, [0; N]); - } - - #[test] - fn should_build_linked_list_of_free_areas_properly() { - // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); - let mut heap = FreeingBumpHeapAllocator::new(mem); - - let ptr1 = heap.allocate(8).unwrap(); - let ptr2 = heap.allocate(8).unwrap(); - let ptr3 = heap.allocate(8).unwrap(); - - // when - heap.deallocate(ptr1).unwrap(); - heap.deallocate(ptr2).unwrap(); - heap.deallocate(ptr3).unwrap(); - - // then - let mut expected = [0; N]; - expected[0] = ptr3 - 8; - assert_eq!(heap.heads, expected); - - let ptr4 = heap.allocate(8).unwrap(); - assert_eq!(ptr4, ptr3); - - expected[0] = ptr2 - 8; - assert_eq!(heap.heads, expected); - } - - #[test] - fn should_not_allocate_if_too_large() { - // given - let mem = MemoryInstance::alloc(Pages(1), Some(Pages(1))).unwrap(); - set_offset(mem.clone(), 13); - let mut heap = FreeingBumpHeapAllocator::new(mem); - - // when - let ptr = heap.allocate(PAGE_SIZE - 13); - - // then - assert_eq!(ptr.is_err(), true); - if let Err(err) = ptr { - assert_eq!(err, UserError(OUT_OF_SPACE)); - } - } - - #[test] - fn should_not_allocate_if_full() { - // given - let mem = MemoryInstance::alloc(Pages(1), Some(Pages(1))).unwrap(); - let mut heap = FreeingBumpHeapAllocator::new(mem); - let ptr1 = heap.allocate((PAGE_SIZE / 2) - 8).unwrap(); - assert_eq!(ptr1, 8); - - // when - let ptr2 = heap.allocate(PAGE_SIZE / 2); - - // then - // there is no room for another half page incl. its 8 byte prefix - assert_eq!(ptr2.is_err(), true); - if let Err(err) = ptr2 { - assert_eq!(err, UserError(OUT_OF_SPACE)); - } - } - - #[test] - fn should_allocate_max_possible_allocation_size() { - // given - let pages_needed = (MAX_POSSIBLE_ALLOCATION as usize / PAGE_SIZE as usize) + 1; - let mem = MemoryInstance::alloc(Pages(pages_needed), Some(Pages(pages_needed))).unwrap(); - let mut heap = FreeingBumpHeapAllocator::new(mem); - - // when - let ptr = heap.allocate(MAX_POSSIBLE_ALLOCATION).unwrap(); - - // then - assert_eq!(ptr, 8); - } - - #[test] - fn should_not_allocate_if_requested_size_too_large() { - // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); - let mut heap = FreeingBumpHeapAllocator::new(mem); - - // when - let ptr = heap.allocate(MAX_POSSIBLE_ALLOCATION + 1); - - // then - assert_eq!(ptr.is_err(), true); - if let Err(err) = ptr { - assert_eq!(err, UserError(REQUESTED_SIZE_TOO_LARGE)); - } - } - - #[test] - fn should_include_prefixes_in_total_heap_size() { - // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); - set_offset(mem.clone(), 1); - let mut heap = FreeingBumpHeapAllocator::new(mem); - - // when - // an item size of 16 must be used then - heap.allocate(9).unwrap(); - - // then - assert_eq!(heap.total_size, 8 + 16); - } - - #[test] - fn should_calculate_total_heap_size_to_zero() { - // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); - set_offset(mem.clone(), 13); - let mut heap = FreeingBumpHeapAllocator::new(mem); - - // when - let ptr = heap.allocate(42).unwrap(); - assert_eq!(ptr, 16 + 8); - heap.deallocate(ptr).unwrap(); - - // then - assert_eq!(heap.total_size, 0); - } - - #[test] - fn should_calculate_total_size_of_zero() { - // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); - set_offset(mem.clone(), 19); - let mut heap = FreeingBumpHeapAllocator::new(mem); - - // when - for _ in 1..10 { - let ptr = heap.allocate(42).unwrap(); - heap.deallocate(ptr).unwrap(); - } - - // then - assert_eq!(heap.total_size, 0); - } - - #[test] - fn should_write_u32_correctly_into_le() { - // given - let mut heap = vec![0; 5]; - - // when - FreeingBumpHeapAllocator::write_u32_into_le_bytes(1, &mut heap[0..4]); - - // then - assert_eq!(heap, [1, 0, 0, 0, 0]); - } - - #[test] - fn should_write_u32_max_correctly_into_le() { - // given - let mut heap = vec![0; 5]; - - // when - FreeingBumpHeapAllocator::write_u32_into_le_bytes(u32::max_value(), &mut heap[0..4]); - - // then - assert_eq!(heap, [255, 255, 255, 255, 0]); - } - - #[test] - fn should_get_item_size_from_index() { - // given - let index = 0; - - // when - let item_size = FreeingBumpHeapAllocator::get_item_size_from_index(index); - - // then - assert_eq!(item_size, 8); - } - - #[test] - fn should_get_max_item_size_from_index() { - // given - let index = 21; - - // when - let item_size = FreeingBumpHeapAllocator::get_item_size_from_index(index); - - // then - assert_eq!(item_size as u32, MAX_POSSIBLE_ALLOCATION); - } + use super::*; + use wasmi::memory_units::*; + use wasmi::MemoryInstance; + + const PAGE_SIZE: u32 = 65536; + + fn set_offset(mem: MemoryRef, offset: usize) { + let offset: Vec = vec![255; offset]; + mem.set(0, &offset).unwrap(); + } + + #[test] + fn should_allocate_properly() { + // given + let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); + let mut heap = FreeingBumpHeapAllocator::new(mem); + + // when + let ptr = heap.allocate(1).unwrap(); + + // then + assert_eq!(ptr, 8); + } + + #[test] + fn should_always_align_pointers_to_multiples_of_8() { + // given + let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); + set_offset(mem.clone(), 13); + let mut heap = FreeingBumpHeapAllocator::new(mem); + + // when + let ptr = heap.allocate(1).unwrap(); + + // then + // the pointer must start at the next multiple of 8 from 13 + // + the prefix of 8 bytes. + assert_eq!(ptr, 24); + } + + #[test] + fn should_increment_pointers_properly() { + // given + let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); + let mut heap = FreeingBumpHeapAllocator::new(mem); + + // when + let ptr1 = heap.allocate(1).unwrap(); + let ptr2 = heap.allocate(9).unwrap(); + let ptr3 = heap.allocate(1).unwrap(); + + // then + // a prefix of 8 bytes is prepended to each pointer + assert_eq!(ptr1, 8); + + // the prefix of 8 bytes + the content of ptr1 padded to the lowest possible + // item size of 8 bytes + the prefix of ptr1 + assert_eq!(ptr2, 24); + + // ptr2 + its content of 16 bytes + the prefix of 8 bytes + assert_eq!(ptr3, 24 + 16 + 8); + } + + #[test] + fn should_free_properly() { + // given + let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); + let mut heap = FreeingBumpHeapAllocator::new(mem); + let ptr1 = heap.allocate(1).unwrap(); + // the prefix of 8 bytes is prepended to the pointer + assert_eq!(ptr1, 8); + + let ptr2 = heap.allocate(1).unwrap(); + // the prefix of 8 bytes + the content of ptr 1 is prepended to the pointer + assert_eq!(ptr2, 24); + + // when + heap.deallocate(ptr2).unwrap(); + + // then + // then the heads table should contain a pointer to the + // prefix of ptr2 in the leftmost entry + assert_eq!(heap.heads[0], ptr2 - 8); + } + + #[test] + fn should_deallocate_and_reallocate_properly() { + // given + let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); + set_offset(mem.clone(), 13); + let padded_offset = 16; + let mut heap = FreeingBumpHeapAllocator::new(mem); + + let ptr1 = heap.allocate(1).unwrap(); + // the prefix of 8 bytes is prepended to the pointer + assert_eq!(ptr1, padded_offset + 8); + + let ptr2 = heap.allocate(9).unwrap(); + // the padded_offset + the previously allocated ptr (8 bytes prefix + + // 8 bytes content) + the prefix of 8 bytes which is prepended to the + // current pointer + assert_eq!(ptr2, padded_offset + 16 + 8); + + // when + heap.deallocate(ptr2).unwrap(); + let ptr3 = heap.allocate(9).unwrap(); + + // then + // should have re-allocated + assert_eq!(ptr3, padded_offset + 16 + 8); + assert_eq!(heap.heads, [0; N]); + } + + #[test] + fn should_build_linked_list_of_free_areas_properly() { + // given + let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); + let mut heap = FreeingBumpHeapAllocator::new(mem); + + let ptr1 = heap.allocate(8).unwrap(); + let ptr2 = heap.allocate(8).unwrap(); + let ptr3 = heap.allocate(8).unwrap(); + + // when + heap.deallocate(ptr1).unwrap(); + heap.deallocate(ptr2).unwrap(); + heap.deallocate(ptr3).unwrap(); + + // then + let mut expected = [0; N]; + expected[0] = ptr3 - 8; + assert_eq!(heap.heads, expected); + + let ptr4 = heap.allocate(8).unwrap(); + assert_eq!(ptr4, ptr3); + + expected[0] = ptr2 - 8; + assert_eq!(heap.heads, expected); + } + + #[test] + fn should_not_allocate_if_too_large() { + // given + let mem = MemoryInstance::alloc(Pages(1), Some(Pages(1))).unwrap(); + set_offset(mem.clone(), 13); + let mut heap = FreeingBumpHeapAllocator::new(mem); + + // when + let ptr = heap.allocate(PAGE_SIZE - 13); + + // then + assert_eq!(ptr.is_err(), true); + if let Err(err) = ptr { + assert_eq!(err, UserError(OUT_OF_SPACE)); + } + } + + #[test] + fn should_not_allocate_if_full() { + // given + let mem = MemoryInstance::alloc(Pages(1), Some(Pages(1))).unwrap(); + let mut heap = FreeingBumpHeapAllocator::new(mem); + let ptr1 = heap.allocate((PAGE_SIZE / 2) - 8).unwrap(); + assert_eq!(ptr1, 8); + + // when + let ptr2 = heap.allocate(PAGE_SIZE / 2); + + // then + // there is no room for another half page incl. its 8 byte prefix + assert_eq!(ptr2.is_err(), true); + if let Err(err) = ptr2 { + assert_eq!(err, UserError(OUT_OF_SPACE)); + } + } + + #[test] + fn should_allocate_max_possible_allocation_size() { + // given + let pages_needed = (MAX_POSSIBLE_ALLOCATION as usize / PAGE_SIZE as usize) + 1; + let mem = MemoryInstance::alloc(Pages(pages_needed), Some(Pages(pages_needed))).unwrap(); + let mut heap = FreeingBumpHeapAllocator::new(mem); + + // when + let ptr = heap.allocate(MAX_POSSIBLE_ALLOCATION).unwrap(); + + // then + assert_eq!(ptr, 8); + } + + #[test] + fn should_not_allocate_if_requested_size_too_large() { + // given + let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); + let mut heap = FreeingBumpHeapAllocator::new(mem); + + // when + let ptr = heap.allocate(MAX_POSSIBLE_ALLOCATION + 1); + + // then + assert_eq!(ptr.is_err(), true); + if let Err(err) = ptr { + assert_eq!(err, UserError(REQUESTED_SIZE_TOO_LARGE)); + } + } + + #[test] + fn should_include_prefixes_in_total_heap_size() { + // given + let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); + set_offset(mem.clone(), 1); + let mut heap = FreeingBumpHeapAllocator::new(mem); + + // when + // an item size of 16 must be used then + heap.allocate(9).unwrap(); + + // then + assert_eq!(heap.total_size, 8 + 16); + } + + #[test] + fn should_calculate_total_heap_size_to_zero() { + // given + let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); + set_offset(mem.clone(), 13); + let mut heap = FreeingBumpHeapAllocator::new(mem); + + // when + let ptr = heap.allocate(42).unwrap(); + assert_eq!(ptr, 16 + 8); + heap.deallocate(ptr).unwrap(); + + // then + assert_eq!(heap.total_size, 0); + } + + #[test] + fn should_calculate_total_size_of_zero() { + // given + let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); + set_offset(mem.clone(), 19); + let mut heap = FreeingBumpHeapAllocator::new(mem); + + // when + for _ in 1..10 { + let ptr = heap.allocate(42).unwrap(); + heap.deallocate(ptr).unwrap(); + } + + // then + assert_eq!(heap.total_size, 0); + } + + #[test] + fn should_write_u32_correctly_into_le() { + // given + let mut heap = vec![0; 5]; + + // when + FreeingBumpHeapAllocator::write_u32_into_le_bytes(1, &mut heap[0..4]); + + // then + assert_eq!(heap, [1, 0, 0, 0, 0]); + } + + #[test] + fn should_write_u32_max_correctly_into_le() { + // given + let mut heap = vec![0; 5]; + + // when + FreeingBumpHeapAllocator::write_u32_into_le_bytes(u32::max_value(), &mut heap[0..4]); + + // then + assert_eq!(heap, [255, 255, 255, 255, 0]); + } + + #[test] + fn should_get_item_size_from_index() { + // given + let index = 0; + + // when + let item_size = FreeingBumpHeapAllocator::get_item_size_from_index(index); + + // then + assert_eq!(item_size, 8); + } + + #[test] + fn should_get_max_item_size_from_index() { + // given + let index = 21; + + // when + let item_size = FreeingBumpHeapAllocator::get_item_size_from_index(index); + + // then + assert_eq!(item_size as u32, MAX_POSSIBLE_ALLOCATION); + } } diff --git a/core/executor/src/error.rs b/core/executor/src/error.rs index b27ccf01bf..4428b5ed49 100644 --- a/core/executor/src/error.rs +++ b/core/executor/src/error.rs @@ -20,70 +20,70 @@ // https://github.com/paritytech/substrate/issues/1547 #![allow(deprecated)] -use state_machine; -use serializer; -use wasmi; use error_chain::{ - error_chain, error_chain_processing, impl_error_chain_processed, - impl_extract_backtrace, impl_error_chain_kind + error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed, + impl_extract_backtrace, }; +use serializer; +use state_machine; +use wasmi; error_chain! { - foreign_links { - InvalidData(serializer::Error) #[doc = "Unserializable Data"]; - Trap(wasmi::Trap) #[doc = "Trap occured during execution"]; - Wasmi(wasmi::Error) #[doc = "Wasmi loading/instantiating error"]; - } - - errors { - /// Method is not found - MethodNotFound(t: String) { - description("method not found"), - display("Method not found: '{}'", t), - } - - /// Code is invalid (expected single byte) - InvalidCode(c: Vec) { - description("invalid code"), - display("Invalid Code: {:?}", c), - } - - /// Could not get runtime version. - VersionInvalid { - description("Runtime version error"), - display("On-chain runtime does not specify version"), - } - - /// Externalities have failed. - Externalities { - description("externalities failure"), - display("Externalities error"), - } - - /// Invalid index. - InvalidIndex { - description("index given was not in range"), - display("Invalid index provided"), - } - - /// Invalid return type. - InvalidReturn { - description("u64 was not returned"), - display("Invalid type returned (should be u64)"), - } - - /// Runtime failed. - Runtime { - description("runtime failure"), - display("Runtime error"), - } - - /// Runtime failed. - InvalidMemoryReference { - description("invalid memory reference"), - display("Invalid memory reference"), - } - } + foreign_links { + InvalidData(serializer::Error) #[doc = "Unserializable Data"]; + Trap(wasmi::Trap) #[doc = "Trap occured during execution"]; + Wasmi(wasmi::Error) #[doc = "Wasmi loading/instantiating error"]; + } + + errors { + /// Method is not found + MethodNotFound(t: String) { + description("method not found"), + display("Method not found: '{}'", t), + } + + /// Code is invalid (expected single byte) + InvalidCode(c: Vec) { + description("invalid code"), + display("Invalid Code: {:?}", c), + } + + /// Could not get runtime version. + VersionInvalid { + description("Runtime version error"), + display("On-chain runtime does not specify version"), + } + + /// Externalities have failed. + Externalities { + description("externalities failure"), + display("Externalities error"), + } + + /// Invalid index. + InvalidIndex { + description("index given was not in range"), + display("Invalid index provided"), + } + + /// Invalid return type. + InvalidReturn { + description("u64 was not returned"), + display("Invalid type returned (should be u64)"), + } + + /// Runtime failed. + Runtime { + description("runtime failure"), + display("Runtime error"), + } + + /// Runtime failed. + InvalidMemoryReference { + description("invalid memory reference"), + display("Invalid memory reference"), + } + } } impl state_machine::Error for Error {} diff --git a/core/executor/src/lib.rs b/core/executor/src/lib.rs index fa7cc71eea..9c26f80e2c 100644 --- a/core/executor/src/lib.rs +++ b/core/executor/src/lib.rs @@ -26,34 +26,34 @@ //! It is left as is for now as it might be removed before this is ever done. #![warn(missing_docs)] -#![recursion_limit="128"] +#![recursion_limit = "128"] #[macro_use] mod wasm_utils; mod wasm_executor; #[macro_use] mod native_executor; -mod sandbox; mod allocator; +mod sandbox; pub mod error; -pub use wasmi; -pub use wasm_executor::WasmExecutor; -pub use native_executor::{with_native_environment, NativeExecutor, NativeExecutionDispatch}; -pub use state_machine::Externalities; -pub use runtime_version::{RuntimeVersion, NativeVersion}; +pub use native_executor::{with_native_environment, NativeExecutionDispatch, NativeExecutor}; pub use parity_codec::Codec; #[doc(hidden)] pub use primitives::Blake2Hasher; +pub use runtime_version::{NativeVersion, RuntimeVersion}; +pub use state_machine::Externalities; +pub use wasm_executor::WasmExecutor; +pub use wasmi; /// Provides runtime information. pub trait RuntimeInfo { - /// Native runtime information. - fn native_version(&self) -> &NativeVersion; - - /// Extract RuntimeVersion of given :code block - fn runtime_version> ( - &self, - ext: &mut E, - ) -> Option; + /// Native runtime information. + fn native_version(&self) -> &NativeVersion; + + /// Extract RuntimeVersion of given :code block + fn runtime_version>( + &self, + ext: &mut E, + ) -> Option; } diff --git a/core/executor/src/native_executor.rs b/core/executor/src/native_executor.rs index 0944cbdbbd..2d9e4bb929 100644 --- a/core/executor/src/native_executor.rs +++ b/core/executor/src/native_executor.rs @@ -14,18 +14,22 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::{borrow::BorrowMut, result, cell::{RefMut, RefCell}}; use crate::error::{Error, ErrorKind, Result}; -use state_machine::{CodeExecutor, Externalities}; use crate::wasm_executor::WasmExecutor; -use wasmi::{Module as WasmModule, ModuleRef as WasmModuleInstanceRef}; -use runtime_version::{NativeVersion, RuntimeVersion}; -use std::{collections::HashMap, panic::UnwindSafe}; -use parity_codec::{Decode, Encode}; use crate::RuntimeInfo; -use primitives::{Blake2Hasher, NativeOrEncoded}; -use primitives::storage::well_known_keys; use log::trace; +use parity_codec::{Decode, Encode}; +use primitives::storage::well_known_keys; +use primitives::{Blake2Hasher, NativeOrEncoded}; +use runtime_version::{NativeVersion, RuntimeVersion}; +use state_machine::{CodeExecutor, Externalities}; +use std::{ + borrow::BorrowMut, + cell::{RefCell, RefMut}, + result, +}; +use std::{collections::HashMap, panic::UnwindSafe}; +use wasmi::{Module as WasmModule, ModuleRef as WasmModuleInstanceRef}; /// Default num of pages for the heap const DEFAULT_HEAP_PAGES: u64 = 1024; @@ -34,171 +38,186 @@ const DEFAULT_HEAP_PAGES: u64 = 1024; // Is it compatible enough to run this natively or do we need to fall back on the WasmModule enum RuntimePreproc { - InvalidCode, - ValidCode(WasmModuleInstanceRef, Option), + InvalidCode, + ValidCode(WasmModuleInstanceRef, Option), } type CacheType = HashMap<[u8; 32], RuntimePreproc>; thread_local! { - static RUNTIMES_CACHE: RefCell = RefCell::new(HashMap::new()); + static RUNTIMES_CACHE: RefCell = RefCell::new(HashMap::new()); } /// fetch a runtime version from the cache or if there is no cached version yet, create /// the runtime version entry for `code`, determines whether `Compatibility::IsCompatible` /// can be used by comparing returned RuntimeVersion to `ref_version` fn fetch_cached_runtime_version<'a, E: Externalities>( - wasm_executor: &WasmExecutor, - cache: &'a mut RefMut, - ext: &mut E, - default_heap_pages: Option, + wasm_executor: &WasmExecutor, + cache: &'a mut RefMut, + ext: &mut E, + default_heap_pages: Option, ) -> Result<(&'a WasmModuleInstanceRef, &'a Option)> { - - let code_hash = match ext.storage_hash(well_known_keys::CODE) { - Some(code_hash) => code_hash, - None => return Err(ErrorKind::InvalidCode(vec![]).into()), - }; - let maybe_runtime_preproc = cache.borrow_mut().entry(code_hash.into()) - .or_insert_with(|| { - let code = match ext.storage(well_known_keys::CODE) { - Some(code) => code, - None => return RuntimePreproc::InvalidCode, - }; - let heap_pages = ext.storage(well_known_keys::HEAP_PAGES) - .and_then(|pages| u64::decode(&mut &pages[..])) - .or(default_heap_pages) - .unwrap_or(DEFAULT_HEAP_PAGES); - match WasmModule::from_buffer(code) - .map_err(|_| ErrorKind::InvalidCode(vec![]).into()) - .and_then(|module| wasm_executor.prepare_module(ext, heap_pages as usize, &module)) - { - Ok(module) => { - let version = wasm_executor.call_in_wasm_module(ext, &module, "Core_version", &[]) - .ok() - .and_then(|v| RuntimeVersion::decode(&mut v.as_slice())); - RuntimePreproc::ValidCode(module, version) - } - Err(e) => { - trace!(target: "executor", "Invalid code presented to executor ({:?})", e); - RuntimePreproc::InvalidCode - } - } - }); - match maybe_runtime_preproc { - RuntimePreproc::InvalidCode => { - let code = ext.storage(well_known_keys::CODE).unwrap_or(vec![]); - Err(ErrorKind::InvalidCode(code).into()) - }, - RuntimePreproc::ValidCode(m, v) => { - Ok((m, v)) - } - } + let code_hash = match ext.storage_hash(well_known_keys::CODE) { + Some(code_hash) => code_hash, + None => return Err(ErrorKind::InvalidCode(vec![]).into()), + }; + let maybe_runtime_preproc = cache + .borrow_mut() + .entry(code_hash.into()) + .or_insert_with(|| { + let code = match ext.storage(well_known_keys::CODE) { + Some(code) => code, + None => return RuntimePreproc::InvalidCode, + }; + let heap_pages = ext + .storage(well_known_keys::HEAP_PAGES) + .and_then(|pages| u64::decode(&mut &pages[..])) + .or(default_heap_pages) + .unwrap_or(DEFAULT_HEAP_PAGES); + match WasmModule::from_buffer(code) + .map_err(|_| ErrorKind::InvalidCode(vec![]).into()) + .and_then(|module| wasm_executor.prepare_module(ext, heap_pages as usize, &module)) + { + Ok(module) => { + let version = wasm_executor + .call_in_wasm_module(ext, &module, "Core_version", &[]) + .ok() + .and_then(|v| RuntimeVersion::decode(&mut v.as_slice())); + RuntimePreproc::ValidCode(module, version) + } + Err(e) => { + trace!(target: "executor", "Invalid code presented to executor ({:?})", e); + RuntimePreproc::InvalidCode + } + } + }); + match maybe_runtime_preproc { + RuntimePreproc::InvalidCode => { + let code = ext.storage(well_known_keys::CODE).unwrap_or(vec![]); + Err(ErrorKind::InvalidCode(code).into()) + } + RuntimePreproc::ValidCode(m, v) => Ok((m, v)), + } } fn safe_call(f: F) -> Result - where F: UnwindSafe + FnOnce() -> U +where + F: UnwindSafe + FnOnce() -> U, { - // Substrate uses custom panic hook that terminates process on panic. Disable termination for the native call. - let _guard = panic_handler::AbortGuard::new(false); - ::std::panic::catch_unwind(f).map_err(|_| ErrorKind::Runtime.into()) + // Substrate uses custom panic hook that terminates process on panic. Disable termination for the native call. + let _guard = panic_handler::AbortGuard::new(false); + ::std::panic::catch_unwind(f).map_err(|_| ErrorKind::Runtime.into()) } /// Set up the externalities and safe calling environment to execute calls to a native runtime. /// /// If the inner closure panics, it will be caught and return an error. pub fn with_native_environment(ext: &mut Externalities, f: F) -> Result - where F: UnwindSafe + FnOnce() -> U +where + F: UnwindSafe + FnOnce() -> U, { - ::runtime_io::with_externalities(ext, move || safe_call(f)) + ::runtime_io::with_externalities(ext, move || safe_call(f)) } /// Delegate for dispatching a CodeExecutor call to native code. pub trait NativeExecutionDispatch: Send + Sync { - /// Get the wasm code that the native dispatch will be equivalent to. - fn native_equivalent() -> &'static [u8]; + /// Get the wasm code that the native dispatch will be equivalent to. + fn native_equivalent() -> &'static [u8]; - /// Dispatch a method and input data to be executed natively. Returns `Some` result or `None` - /// if the `method` is unknown. Panics if there's an unrecoverable error. - // fn dispatch(ext: &mut Externalities, method: &str, data: &[u8]) -> Result>; - fn dispatch(ext: &mut Externalities, method: &str, data: &[u8]) -> Result>; + /// Dispatch a method and input data to be executed natively. Returns `Some` result or `None` + /// if the `method` is unknown. Panics if there's an unrecoverable error. + // fn dispatch(ext: &mut Externalities, method: &str, data: &[u8]) -> Result>; + fn dispatch( + ext: &mut Externalities, + method: &str, + data: &[u8], + ) -> Result>; - /// Provide native runtime version. - fn native_version() -> NativeVersion; + /// Provide native runtime version. + fn native_version() -> NativeVersion; - /// Construct corresponding `NativeExecutor` - fn new(default_heap_pages: Option) -> NativeExecutor where Self: Sized; + /// Construct corresponding `NativeExecutor` + fn new(default_heap_pages: Option) -> NativeExecutor + where + Self: Sized; } /// A generic `CodeExecutor` implementation that uses a delegate to determine wasm code equivalence /// and dispatch to native code when possible, falling back on `WasmExecutor` when not. #[derive(Debug)] pub struct NativeExecutor { - /// Dummy field to avoid the compiler complaining about us not using `D`. - _dummy: ::std::marker::PhantomData, - /// The fallback executor in case native isn't available. - fallback: WasmExecutor, - /// Native runtime version info. - native_version: NativeVersion, - /// The default number of 64KB pages to allocate for Wasm execution. - default_heap_pages: Option, + /// Dummy field to avoid the compiler complaining about us not using `D`. + _dummy: ::std::marker::PhantomData, + /// The fallback executor in case native isn't available. + fallback: WasmExecutor, + /// Native runtime version info. + native_version: NativeVersion, + /// The default number of 64KB pages to allocate for Wasm execution. + default_heap_pages: Option, } impl NativeExecutor { - /// Create new instance. - pub fn new(default_heap_pages: Option) -> Self { - NativeExecutor { - _dummy: Default::default(), - fallback: WasmExecutor::new(), - native_version: D::native_version(), - default_heap_pages, - } - } + /// Create new instance. + pub fn new(default_heap_pages: Option) -> Self { + NativeExecutor { + _dummy: Default::default(), + fallback: WasmExecutor::new(), + native_version: D::native_version(), + default_heap_pages, + } + } } impl Clone for NativeExecutor { - fn clone(&self) -> Self { - NativeExecutor { - _dummy: Default::default(), - fallback: self.fallback.clone(), - native_version: D::native_version(), - default_heap_pages: self.default_heap_pages, - } - } + fn clone(&self) -> Self { + NativeExecutor { + _dummy: Default::default(), + fallback: self.fallback.clone(), + native_version: D::native_version(), + default_heap_pages: self.default_heap_pages, + } + } } impl RuntimeInfo for NativeExecutor { - fn native_version(&self) -> &NativeVersion { - &self.native_version - } + fn native_version(&self) -> &NativeVersion { + &self.native_version + } - fn runtime_version>( - &self, - ext: &mut E, - ) -> Option { - RUNTIMES_CACHE.with(|c| - fetch_cached_runtime_version(&self.fallback, &mut c.borrow_mut(), ext, self.default_heap_pages) - .ok()?.1.clone() - ) - } + fn runtime_version>( + &self, + ext: &mut E, + ) -> Option { + RUNTIMES_CACHE.with(|c| { + fetch_cached_runtime_version( + &self.fallback, + &mut c.borrow_mut(), + ext, + self.default_heap_pages, + ) + .ok()? + .1 + .clone() + }) + } } impl CodeExecutor for NativeExecutor { - type Error = Error; + type Error = Error; - fn call - < - E: Externalities, - R:Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe - >( - &self, - ext: &mut E, - method: &str, - data: &[u8], - use_native: bool, - native_call: Option, - ) -> (Result>, bool) { - RUNTIMES_CACHE.with(|c| { + fn call< + E: Externalities, + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + >( + &self, + ext: &mut E, + method: &str, + data: &[u8], + use_native: bool, + native_call: Option, + ) -> (Result>, bool) { + RUNTIMES_CACHE.with(|c| { let mut c = c.borrow_mut(); let (module, onchain_version) = match fetch_cached_runtime_version( &self.fallback, &mut c, ext, self.default_heap_pages) { @@ -262,7 +281,7 @@ impl CodeExecutor for NativeExecutor, + funcs: Vec, } impl GuestToSupervisorFunctionMapping { - fn new() -> GuestToSupervisorFunctionMapping { - GuestToSupervisorFunctionMapping { funcs: Vec::new() } - } - - fn define(&mut self, supervisor_func: SupervisorFuncIndex) -> GuestFuncIndex { - let idx = self.funcs.len(); - self.funcs.push(supervisor_func); - GuestFuncIndex(idx) - } - - fn func_by_guest_index(&self, guest_func_idx: GuestFuncIndex) -> Option { - self.funcs.get(guest_func_idx.0).cloned() - } + fn new() -> GuestToSupervisorFunctionMapping { + GuestToSupervisorFunctionMapping { funcs: Vec::new() } + } + + fn define(&mut self, supervisor_func: SupervisorFuncIndex) -> GuestFuncIndex { + let idx = self.funcs.len(); + self.funcs.push(supervisor_func); + GuestFuncIndex(idx) + } + + fn func_by_guest_index(&self, guest_func_idx: GuestFuncIndex) -> Option { + self.funcs.get(guest_func_idx.0).cloned() + } } struct Imports { - func_map: HashMap<(Vec, Vec), GuestFuncIndex>, - memories_map: HashMap<(Vec, Vec), MemoryRef>, + func_map: HashMap<(Vec, Vec), GuestFuncIndex>, + memories_map: HashMap<(Vec, Vec), MemoryRef>, } impl ImportResolver for Imports { - fn resolve_func( - &self, - module_name: &str, - field_name: &str, - signature: &::wasmi::Signature, - ) -> Result { - let key = ( - module_name.as_bytes().to_owned(), - field_name.as_bytes().to_owned(), - ); - let idx = *self.func_map.get(&key).ok_or_else(|| { - ::wasmi::Error::Instantiation(format!( - "Export {}:{} not found", - module_name, field_name - )) - })?; - Ok(::wasmi::FuncInstance::alloc_host(signature.clone(), idx.0)) - } - - fn resolve_memory( - &self, - module_name: &str, - field_name: &str, - _memory_type: &::wasmi::MemoryDescriptor, - ) -> Result { - let key = ( - module_name.as_bytes().to_vec(), - field_name.as_bytes().to_vec(), - ); - let mem = self.memories_map - .get(&key) - .ok_or_else(|| { - ::wasmi::Error::Instantiation(format!( - "Export {}:{} not found", - module_name, field_name - )) - })? - .clone(); - Ok(mem) - } - - fn resolve_global( - &self, - module_name: &str, - field_name: &str, - _global_type: &::wasmi::GlobalDescriptor, - ) -> Result<::wasmi::GlobalRef, ::wasmi::Error> { - Err(::wasmi::Error::Instantiation(format!( - "Export {}:{} not found", - module_name, field_name - ))) - } - - fn resolve_table( - &self, - module_name: &str, - field_name: &str, - _table_type: &::wasmi::TableDescriptor, - ) -> Result<::wasmi::TableRef, ::wasmi::Error> { - Err(::wasmi::Error::Instantiation(format!( - "Export {}:{} not found", - module_name, field_name - ))) - } + fn resolve_func( + &self, + module_name: &str, + field_name: &str, + signature: &::wasmi::Signature, + ) -> Result { + let key = ( + module_name.as_bytes().to_owned(), + field_name.as_bytes().to_owned(), + ); + let idx = *self.func_map.get(&key).ok_or_else(|| { + ::wasmi::Error::Instantiation(format!( + "Export {}:{} not found", + module_name, field_name + )) + })?; + Ok(::wasmi::FuncInstance::alloc_host(signature.clone(), idx.0)) + } + + fn resolve_memory( + &self, + module_name: &str, + field_name: &str, + _memory_type: &::wasmi::MemoryDescriptor, + ) -> Result { + let key = ( + module_name.as_bytes().to_vec(), + field_name.as_bytes().to_vec(), + ); + let mem = self + .memories_map + .get(&key) + .ok_or_else(|| { + ::wasmi::Error::Instantiation(format!( + "Export {}:{} not found", + module_name, field_name + )) + })? + .clone(); + Ok(mem) + } + + fn resolve_global( + &self, + module_name: &str, + field_name: &str, + _global_type: &::wasmi::GlobalDescriptor, + ) -> Result<::wasmi::GlobalRef, ::wasmi::Error> { + Err(::wasmi::Error::Instantiation(format!( + "Export {}:{} not found", + module_name, field_name + ))) + } + + fn resolve_table( + &self, + module_name: &str, + field_name: &str, + _table_type: &::wasmi::TableDescriptor, + ) -> Result<::wasmi::TableRef, ::wasmi::Error> { + Err(::wasmi::Error::Instantiation(format!( + "Export {}:{} not found", + module_name, field_name + ))) + } } /// This trait encapsulates sandboxing capabilities. /// /// Note that this functions are only called in the `supervisor` context. pub trait SandboxCapabilities { - /// Returns a reference to an associated sandbox `Store`. - fn store(&self) -> &Store; - - /// Returns a mutable reference to an associated sandbox `Store`. - fn store_mut(&mut self) -> &mut Store; - - /// Allocate space of the specified length in the supervisor memory. - /// - /// # Errors - /// - /// Returns `Err` if allocation not possible or errors during heap management. - /// - /// Returns pointer to the allocated block. - fn allocate(&mut self, len: u32) -> Result; - - /// Deallocate space specified by the pointer that was previously returned by [`allocate`]. - /// - /// # Errors - /// - /// Returns `Err` if deallocation not possible or because of errors in heap management. - /// - /// [`allocate`]: #tymethod.allocate - fn deallocate(&mut self, ptr: u32) -> Result<(), UserError>; - - /// Write `data` into the supervisor memory at offset specified by `ptr`. - /// - /// # Errors - /// - /// Returns `Err` if `ptr + data.len()` is out of bounds. - fn write_memory(&mut self, ptr: u32, data: &[u8]) -> Result<(), UserError>; - - /// Read `len` bytes from the supervisor memory. - /// - /// # Errors - /// - /// Returns `Err` if `ptr + len` is out of bounds. - fn read_memory(&self, ptr: u32, len: u32) -> Result, UserError>; + /// Returns a reference to an associated sandbox `Store`. + fn store(&self) -> &Store; + + /// Returns a mutable reference to an associated sandbox `Store`. + fn store_mut(&mut self) -> &mut Store; + + /// Allocate space of the specified length in the supervisor memory. + /// + /// # Errors + /// + /// Returns `Err` if allocation not possible or errors during heap management. + /// + /// Returns pointer to the allocated block. + fn allocate(&mut self, len: u32) -> Result; + + /// Deallocate space specified by the pointer that was previously returned by [`allocate`]. + /// + /// # Errors + /// + /// Returns `Err` if deallocation not possible or because of errors in heap management. + /// + /// [`allocate`]: #tymethod.allocate + fn deallocate(&mut self, ptr: u32) -> Result<(), UserError>; + + /// Write `data` into the supervisor memory at offset specified by `ptr`. + /// + /// # Errors + /// + /// Returns `Err` if `ptr + data.len()` is out of bounds. + fn write_memory(&mut self, ptr: u32, data: &[u8]) -> Result<(), UserError>; + + /// Read `len` bytes from the supervisor memory. + /// + /// # Errors + /// + /// Returns `Err` if `ptr + len` is out of bounds. + fn read_memory(&self, ptr: u32, len: u32) -> Result, UserError>; } /// Implementation of [`Externals`] that allows execution of guest module with @@ -184,40 +185,40 @@ pub trait SandboxCapabilities { /// /// [`Externals`]: ../../wasmi/trait.Externals.html pub struct GuestExternals<'a, FE: SandboxCapabilities + Externals + 'a> { - supervisor_externals: &'a mut FE, - sandbox_instance: &'a SandboxInstance, - state: u32, + supervisor_externals: &'a mut FE, + sandbox_instance: &'a SandboxInstance, + state: u32, } fn trap(msg: &'static str) -> Trap { - TrapKind::Host(Box::new(UserError(msg))).into() + TrapKind::Host(Box::new(UserError(msg))).into() } fn deserialize_result(serialized_result: &[u8]) -> Result, Trap> { - use self::sandbox_primitives::{HostError, ReturnValue}; - let result_val = Result::::decode(&mut &serialized_result[..]) - .ok_or_else(|| trap("Decoding Result failed!"))?; - - match result_val { - Ok(return_value) => Ok(match return_value { - ReturnValue::Unit => None, - ReturnValue::Value(typed_value) => Some(RuntimeValue::from(typed_value)), - }), - Err(HostError) => Err(trap("Supervisor function returned sandbox::HostError")), - } + use self::sandbox_primitives::{HostError, ReturnValue}; + let result_val = Result::::decode(&mut &serialized_result[..]) + .ok_or_else(|| trap("Decoding Result failed!"))?; + + match result_val { + Ok(return_value) => Ok(match return_value { + ReturnValue::Unit => None, + ReturnValue::Value(typed_value) => Some(RuntimeValue::from(typed_value)), + }), + Err(HostError) => Err(trap("Supervisor function returned sandbox::HostError")), + } } impl<'a, FE: SandboxCapabilities + Externals + 'a> Externals for GuestExternals<'a, FE> { - fn invoke_index( - &mut self, - index: usize, - args: RuntimeArgs, - ) -> Result, Trap> { - // Make `index` typesafe again. - let index = GuestFuncIndex(index); - - let dispatch_thunk = self.sandbox_instance.dispatch_thunk.clone(); - let func_idx = self.sandbox_instance + fn invoke_index( + &mut self, + index: usize, + args: RuntimeArgs, + ) -> Result, Trap> { + // Make `index` typesafe again. + let index = GuestFuncIndex(index); + + let dispatch_thunk = self.sandbox_instance.dispatch_thunk.clone(); + let func_idx = self.sandbox_instance .guest_to_supervisor_mapping .func_by_guest_index(index) .expect( @@ -227,76 +228,79 @@ impl<'a, FE: SandboxCapabilities + Externals + 'a> Externals for GuestExternals< qed" ); - // Serialize arguments into a byte vector. - let invoke_args_data: Vec = args.as_ref() - .iter() - .cloned() - .map(sandbox_primitives::TypedValue::from) - .collect::>() - .encode(); - - let state = self.state; - - // Move serialized arguments inside the memory and invoke dispatch thunk and - // then free allocated memory. - let invoke_args_ptr = self.supervisor_externals - .allocate(invoke_args_data.len() as u32)?; - self.supervisor_externals - .write_memory(invoke_args_ptr, &invoke_args_data)?; - let result = ::wasmi::FuncInstance::invoke( - &dispatch_thunk, - &[ - RuntimeValue::I32(invoke_args_ptr as i32), - RuntimeValue::I32(invoke_args_data.len() as i32), - RuntimeValue::I32(state as i32), - RuntimeValue::I32(func_idx.0 as i32), - ], - self.supervisor_externals, - ); - self.supervisor_externals.deallocate(invoke_args_ptr)?; - - // dispatch_thunk returns pointer to serialized arguments. - let (serialized_result_val_ptr, serialized_result_val_len) = match result { - // Unpack pointer and len of the serialized result data. - Ok(Some(RuntimeValue::I64(v))) => { - // Cast to u64 to use zero-extension. - let v = v as u64; - let ptr = (v as u64 >> 32) as u32; - let len = (v & 0xFFFFFFFF) as u32; - (ptr, len) - } - Ok(_) => return Err(trap("Supervisor function returned unexpected result!")), - Err(_) => return Err(trap("Supervisor function trapped!")), - }; - - let serialized_result_val = self.supervisor_externals - .read_memory(serialized_result_val_ptr, serialized_result_val_len)?; - self.supervisor_externals - .deallocate(serialized_result_val_ptr)?; - - // We do not have to check the signature here, because it's automatically - // checked by wasmi. - - deserialize_result(&serialized_result_val) - } + // Serialize arguments into a byte vector. + let invoke_args_data: Vec = args + .as_ref() + .iter() + .cloned() + .map(sandbox_primitives::TypedValue::from) + .collect::>() + .encode(); + + let state = self.state; + + // Move serialized arguments inside the memory and invoke dispatch thunk and + // then free allocated memory. + let invoke_args_ptr = self + .supervisor_externals + .allocate(invoke_args_data.len() as u32)?; + self.supervisor_externals + .write_memory(invoke_args_ptr, &invoke_args_data)?; + let result = ::wasmi::FuncInstance::invoke( + &dispatch_thunk, + &[ + RuntimeValue::I32(invoke_args_ptr as i32), + RuntimeValue::I32(invoke_args_data.len() as i32), + RuntimeValue::I32(state as i32), + RuntimeValue::I32(func_idx.0 as i32), + ], + self.supervisor_externals, + ); + self.supervisor_externals.deallocate(invoke_args_ptr)?; + + // dispatch_thunk returns pointer to serialized arguments. + let (serialized_result_val_ptr, serialized_result_val_len) = match result { + // Unpack pointer and len of the serialized result data. + Ok(Some(RuntimeValue::I64(v))) => { + // Cast to u64 to use zero-extension. + let v = v as u64; + let ptr = (v as u64 >> 32) as u32; + let len = (v & 0xFFFFFFFF) as u32; + (ptr, len) + } + Ok(_) => return Err(trap("Supervisor function returned unexpected result!")), + Err(_) => return Err(trap("Supervisor function trapped!")), + }; + + let serialized_result_val = self + .supervisor_externals + .read_memory(serialized_result_val_ptr, serialized_result_val_len)?; + self.supervisor_externals + .deallocate(serialized_result_val_ptr)?; + + // We do not have to check the signature here, because it's automatically + // checked by wasmi. + + deserialize_result(&serialized_result_val) + } } fn with_guest_externals( - supervisor_externals: &mut FE, - sandbox_instance: &SandboxInstance, - state: u32, - f: F, + supervisor_externals: &mut FE, + sandbox_instance: &SandboxInstance, + state: u32, + f: F, ) -> R where - FE: SandboxCapabilities + Externals, - F: FnOnce(&mut GuestExternals) -> R, + FE: SandboxCapabilities + Externals, + F: FnOnce(&mut GuestExternals) -> R, { - let mut guest_externals = GuestExternals { - supervisor_externals, - sandbox_instance, - state, - }; - f(&mut guest_externals) + let mut guest_externals = GuestExternals { + supervisor_externals, + sandbox_instance, + state, + }; + f(&mut guest_externals) } /// Sandboxed instance of a wasm module. @@ -312,92 +316,87 @@ where /// /// [`invoke`]: #method.invoke pub struct SandboxInstance { - instance: ModuleRef, - dispatch_thunk: FuncRef, - guest_to_supervisor_mapping: GuestToSupervisorFunctionMapping, + instance: ModuleRef, + dispatch_thunk: FuncRef, + guest_to_supervisor_mapping: GuestToSupervisorFunctionMapping, } impl SandboxInstance { - /// Invoke an exported function by a name. - /// - /// `supervisor_externals` is required to execute the implementations - /// of the syscalls that published to a sandboxed module instance. - /// - /// The `state` parameter can be used to provide custom data for - /// these syscall implementations. - pub fn invoke( - &self, - export_name: &str, - args: &[RuntimeValue], - supervisor_externals: &mut FE, - state: u32, - ) -> Result, wasmi::Error> { - with_guest_externals( - supervisor_externals, - self, - state, - |guest_externals| { - self.instance - .invoke_export(export_name, args, guest_externals) - }, - ) - } + /// Invoke an exported function by a name. + /// + /// `supervisor_externals` is required to execute the implementations + /// of the syscalls that published to a sandboxed module instance. + /// + /// The `state` parameter can be used to provide custom data for + /// these syscall implementations. + pub fn invoke( + &self, + export_name: &str, + args: &[RuntimeValue], + supervisor_externals: &mut FE, + state: u32, + ) -> Result, wasmi::Error> { + with_guest_externals(supervisor_externals, self, state, |guest_externals| { + self.instance + .invoke_export(export_name, args, guest_externals) + }) + } } /// Error occured during instantiation of a sandboxed module. pub enum InstantiationError { - /// Something wrong with the environment definition. It either can't - /// be decoded, have a reference to a non-existent or torn down memory instance. - EnvironmentDefintionCorrupted, - /// Provided module isn't recognized as a valid webassembly binary. - ModuleDecoding, - /// Module is a well-formed webassembly binary but could not be instantiated. This could - /// happen because, e.g. the module imports entries not provided by the environment. - Instantiation, - /// Module is well-formed, instantiated and linked, but while executing the start function - /// a trap was generated. - StartTrapped, + /// Something wrong with the environment definition. It either can't + /// be decoded, have a reference to a non-existent or torn down memory instance. + EnvironmentDefintionCorrupted, + /// Provided module isn't recognized as a valid webassembly binary. + ModuleDecoding, + /// Module is a well-formed webassembly binary but could not be instantiated. This could + /// happen because, e.g. the module imports entries not provided by the environment. + Instantiation, + /// Module is well-formed, instantiated and linked, but while executing the start function + /// a trap was generated. + StartTrapped, } fn decode_environment_definition( - raw_env_def: &[u8], - memories: &[Option], + raw_env_def: &[u8], + memories: &[Option], ) -> Result<(Imports, GuestToSupervisorFunctionMapping), InstantiationError> { - let env_def = sandbox_primitives::EnvironmentDefinition::decode(&mut &raw_env_def[..]) - .ok_or_else(|| InstantiationError::EnvironmentDefintionCorrupted)?; - - let mut func_map = HashMap::new(); - let mut memories_map = HashMap::new(); - let mut guest_to_supervisor_mapping = GuestToSupervisorFunctionMapping::new(); - - for entry in &env_def.entries { - let module = entry.module_name.clone(); - let field = entry.field_name.clone(); - - match entry.entity { - sandbox_primitives::ExternEntity::Function(func_idx) => { - let externals_idx = - guest_to_supervisor_mapping.define(SupervisorFuncIndex(func_idx as usize)); - func_map.insert((module, field), externals_idx); - } - sandbox_primitives::ExternEntity::Memory(memory_idx) => { - let memory_ref = memories - .get(memory_idx as usize) - .cloned() - .ok_or_else(|| InstantiationError::EnvironmentDefintionCorrupted)? - .ok_or_else(|| InstantiationError::EnvironmentDefintionCorrupted)?; - memories_map.insert((module, field), memory_ref); - } - } - } - - Ok(( - Imports { - func_map, - memories_map, - }, - guest_to_supervisor_mapping, - )) + let env_def = sandbox_primitives::EnvironmentDefinition::decode(&mut &raw_env_def[..]) + .ok_or_else(|| InstantiationError::EnvironmentDefintionCorrupted)?; + + let mut func_map = HashMap::new(); + let mut memories_map = HashMap::new(); + let mut guest_to_supervisor_mapping = GuestToSupervisorFunctionMapping::new(); + + for entry in &env_def.entries { + let module = entry.module_name.clone(); + let field = entry.field_name.clone(); + + match entry.entity { + sandbox_primitives::ExternEntity::Function(func_idx) => { + let externals_idx = + guest_to_supervisor_mapping.define(SupervisorFuncIndex(func_idx as usize)); + func_map.insert((module, field), externals_idx); + } + sandbox_primitives::ExternEntity::Memory(memory_idx) => { + let memory_ref = memories + .get(memory_idx as usize) + .cloned() + .ok_or_else(|| InstantiationError::EnvironmentDefintionCorrupted)? + .ok_or_else(|| InstantiationError::EnvironmentDefintionCorrupted)?; + memories_map.insert((module, field), memory_ref); + } + } + } + + Ok(( + Imports { + func_map, + memories_map, + }, + guest_to_supervisor_mapping, + )) } /// Instantiate a guest module and return it's index in the store. @@ -415,169 +414,171 @@ fn decode_environment_definition( /// /// [`EnvironmentDefinition`]: ../../sandbox/struct.EnvironmentDefinition.html pub fn instantiate( - supervisor_externals: &mut FE, - dispatch_thunk: FuncRef, - wasm: &[u8], - raw_env_def: &[u8], - state: u32, + supervisor_externals: &mut FE, + dispatch_thunk: FuncRef, + wasm: &[u8], + raw_env_def: &[u8], + state: u32, ) -> Result { - let (imports, guest_to_supervisor_mapping) = - decode_environment_definition(raw_env_def, &supervisor_externals.store().memories)?; - - let module = Module::from_buffer(wasm).map_err(|_| InstantiationError::ModuleDecoding)?; - let instance = ModuleInstance::new(&module, &imports).map_err(|_| InstantiationError::Instantiation)?; - - let sandbox_instance = Rc::new(SandboxInstance { - // In general, it's not a very good idea to use `.not_started_instance()` for anything - // but for extracting memory and tables. But in this particular case, we are extracting - // for the purpose of running `start` function which should be ok. - instance: instance.not_started_instance().clone(), - dispatch_thunk, - guest_to_supervisor_mapping, - }); - - with_guest_externals( - supervisor_externals, - &sandbox_instance, - state, - |guest_externals| { - instance - .run_start(guest_externals) - .map_err(|_| InstantiationError::StartTrapped) - }, - )?; - - // At last, register the instance. - let instance_idx = supervisor_externals - .store_mut() - .register_sandbox_instance(sandbox_instance); - Ok(instance_idx) + let (imports, guest_to_supervisor_mapping) = + decode_environment_definition(raw_env_def, &supervisor_externals.store().memories)?; + + let module = Module::from_buffer(wasm).map_err(|_| InstantiationError::ModuleDecoding)?; + let instance = + ModuleInstance::new(&module, &imports).map_err(|_| InstantiationError::Instantiation)?; + + let sandbox_instance = Rc::new(SandboxInstance { + // In general, it's not a very good idea to use `.not_started_instance()` for anything + // but for extracting memory and tables. But in this particular case, we are extracting + // for the purpose of running `start` function which should be ok. + instance: instance.not_started_instance().clone(), + dispatch_thunk, + guest_to_supervisor_mapping, + }); + + with_guest_externals( + supervisor_externals, + &sandbox_instance, + state, + |guest_externals| { + instance + .run_start(guest_externals) + .map_err(|_| InstantiationError::StartTrapped) + }, + )?; + + // At last, register the instance. + let instance_idx = supervisor_externals + .store_mut() + .register_sandbox_instance(sandbox_instance); + Ok(instance_idx) } /// This struct keeps track of all sandboxed components. pub struct Store { - // Memories and instances are `Some` untill torndown. - instances: Vec>>, - memories: Vec>, + // Memories and instances are `Some` untill torndown. + instances: Vec>>, + memories: Vec>, } impl Store { - /// Create a new empty sandbox store. - pub fn new() -> Store { - Store { - instances: Vec::new(), - memories: Vec::new(), - } - } - - /// Create a new memory instance and return it's index. - /// - /// # Errors - /// - /// Returns `Err` if the memory couldn't be created. - /// Typically happens if `initial` is more than `maximum`. - pub fn new_memory(&mut self, initial: u32, maximum: u32) -> Result { - let maximum = match maximum { - sandbox_primitives::MEM_UNLIMITED => None, - specified_limit => Some(Pages(specified_limit as usize)), - }; - - let mem = - MemoryInstance::alloc( - Pages(initial as usize), - maximum, - ) - .map_err(|_| UserError("Sandboxed memory allocation error"))?; - - let mem_idx = self.memories.len(); - self.memories.push(Some(mem)); - Ok(mem_idx as u32) - } - - /// Returns `SandboxInstance` by `instance_idx`. - /// - /// # Errors - /// - /// Returns `Err` If `instance_idx` isn't a valid index of an instance or - /// instance is already torndown. - pub fn instance(&self, instance_idx: u32) -> Result, UserError> { - self.instances - .get(instance_idx as usize) - .cloned() - .ok_or_else(|| UserError("Trying to access a non-existent instance"))? - .ok_or_else(|| UserError("Trying to access a torndown instance")) - } - - /// Returns reference to a memory instance by `memory_idx`. - /// - /// # Errors - /// - /// Returns `Err` If `memory_idx` isn't a valid index of an memory or - /// if memory has been torn down. - pub fn memory(&self, memory_idx: u32) -> Result { - self.memories - .get(memory_idx as usize) - .cloned() - .ok_or_else(|| UserError("Trying to access a non-existent sandboxed memory"))? - .ok_or_else(|| UserError("Trying to access a torndown sandboxed memory")) - } - - /// Tear down the memory at the specified index. - /// - /// # Errors - /// - /// Returns `Err` if `memory_idx` isn't a valid index of an memory or - /// if it has been torn down. - pub fn memory_teardown(&mut self, memory_idx: u32) -> Result<(), UserError> { - match self.memories.get_mut(memory_idx as usize) { - None => Err(UserError("Trying to teardown a non-existent sandboxed memory")), - Some(None) => Err(UserError("Double teardown of a sandboxed memory")), - Some(memory) => { - *memory = None; - Ok(()) - } - } - } - - /// Tear down the instance at the specified index. - /// - /// # Errors - /// - /// Returns `Err` if `instance_idx` isn't a valid index of an instance or - /// if it has been torn down. - pub fn instance_teardown(&mut self, instance_idx: u32) -> Result<(), UserError> { - match self.instances.get_mut(instance_idx as usize) { - None => Err(UserError("Trying to teardown a non-existent instance")), - Some(None) => Err(UserError("Double teardown of an instance")), - Some(instance) => { - *instance = None; - Ok(()) - } - } - } - - fn register_sandbox_instance(&mut self, sandbox_instance: Rc) -> u32 { - let instance_idx = self.instances.len(); - self.instances.push(Some(sandbox_instance)); - instance_idx as u32 - } + /// Create a new empty sandbox store. + pub fn new() -> Store { + Store { + instances: Vec::new(), + memories: Vec::new(), + } + } + + /// Create a new memory instance and return it's index. + /// + /// # Errors + /// + /// Returns `Err` if the memory couldn't be created. + /// Typically happens if `initial` is more than `maximum`. + pub fn new_memory(&mut self, initial: u32, maximum: u32) -> Result { + let maximum = match maximum { + sandbox_primitives::MEM_UNLIMITED => None, + specified_limit => Some(Pages(specified_limit as usize)), + }; + + let mem = MemoryInstance::alloc(Pages(initial as usize), maximum) + .map_err(|_| UserError("Sandboxed memory allocation error"))?; + + let mem_idx = self.memories.len(); + self.memories.push(Some(mem)); + Ok(mem_idx as u32) + } + + /// Returns `SandboxInstance` by `instance_idx`. + /// + /// # Errors + /// + /// Returns `Err` If `instance_idx` isn't a valid index of an instance or + /// instance is already torndown. + pub fn instance(&self, instance_idx: u32) -> Result, UserError> { + self.instances + .get(instance_idx as usize) + .cloned() + .ok_or_else(|| UserError("Trying to access a non-existent instance"))? + .ok_or_else(|| UserError("Trying to access a torndown instance")) + } + + /// Returns reference to a memory instance by `memory_idx`. + /// + /// # Errors + /// + /// Returns `Err` If `memory_idx` isn't a valid index of an memory or + /// if memory has been torn down. + pub fn memory(&self, memory_idx: u32) -> Result { + self.memories + .get(memory_idx as usize) + .cloned() + .ok_or_else(|| UserError("Trying to access a non-existent sandboxed memory"))? + .ok_or_else(|| UserError("Trying to access a torndown sandboxed memory")) + } + + /// Tear down the memory at the specified index. + /// + /// # Errors + /// + /// Returns `Err` if `memory_idx` isn't a valid index of an memory or + /// if it has been torn down. + pub fn memory_teardown(&mut self, memory_idx: u32) -> Result<(), UserError> { + match self.memories.get_mut(memory_idx as usize) { + None => Err(UserError( + "Trying to teardown a non-existent sandboxed memory", + )), + Some(None) => Err(UserError("Double teardown of a sandboxed memory")), + Some(memory) => { + *memory = None; + Ok(()) + } + } + } + + /// Tear down the instance at the specified index. + /// + /// # Errors + /// + /// Returns `Err` if `instance_idx` isn't a valid index of an instance or + /// if it has been torn down. + pub fn instance_teardown(&mut self, instance_idx: u32) -> Result<(), UserError> { + match self.instances.get_mut(instance_idx as usize) { + None => Err(UserError("Trying to teardown a non-existent instance")), + Some(None) => Err(UserError("Double teardown of an instance")), + Some(instance) => { + *instance = None; + Ok(()) + } + } + } + + fn register_sandbox_instance(&mut self, sandbox_instance: Rc) -> u32 { + let instance_idx = self.instances.len(); + self.instances.push(Some(sandbox_instance)); + instance_idx as u32 + } } #[cfg(test)] mod tests { - use primitives::{Blake2Hasher}; - use crate::allocator; - use crate::sandbox::trap; - use crate::wasm_executor::WasmExecutor; - use state_machine::TestExternalities; - use wabt; - - #[test] - fn sandbox_should_work() { - let mut ext = TestExternalities::::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); - - let code = wabt::wat2wasm(r#" + use crate::allocator; + use crate::sandbox::trap; + use crate::wasm_executor::WasmExecutor; + use primitives::Blake2Hasher; + use state_machine::TestExternalities; + use wabt; + + #[test] + fn sandbox_should_work() { + let mut ext = TestExternalities::::default(); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); + + let code = wabt::wat2wasm( + r#" (module (import "env" "assert" (func $assert (param i32))) (import "env" "inc_counter" (func $inc_counter (param i32) (result i32))) @@ -596,20 +597,27 @@ mod tests { call $assert ) ) - "#).unwrap(); - - assert_eq!( - WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_sandbox", &code).unwrap(), - vec![1], - ); - } - - #[test] - fn sandbox_trap() { - let mut ext = TestExternalities::::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); - - let code = wabt::wat2wasm(r#" + "#, + ) + .unwrap(); + + assert_eq!( + WasmExecutor::new() + .call(&mut ext, 8, &test_code[..], "test_sandbox", &code) + .unwrap(), + vec![1], + ); + } + + #[test] + fn sandbox_trap() { + let mut ext = TestExternalities::::default(); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); + + let code = wabt::wat2wasm( + r#" (module (import "env" "assert" (func $assert (param i32))) (func (export "call") @@ -617,20 +625,27 @@ mod tests { call $assert ) ) - "#).unwrap(); - - assert_eq!( - WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_sandbox", &code).unwrap(), - vec![0], - ); - } - - #[test] - fn sandbox_should_trap_when_heap_exhausted() { - let mut ext = TestExternalities::::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); - - let code = wabt::wat2wasm(r#" + "#, + ) + .unwrap(); + + assert_eq!( + WasmExecutor::new() + .call(&mut ext, 8, &test_code[..], "test_sandbox", &code) + .unwrap(), + vec![0], + ); + } + + #[test] + fn sandbox_should_trap_when_heap_exhausted() { + let mut ext = TestExternalities::::default(); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); + + let code = wabt::wat2wasm( + r#" (module (import "env" "assert" (func $assert (param i32))) (func (export "call") @@ -638,25 +653,30 @@ mod tests { call $assert ) ) - "#).unwrap(); - - let res = WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_exhaust_heap", &code); - assert_eq!(res.is_err(), true); - if let Err(err) = res { - let inner_err = err.iter().next().unwrap(); - assert_eq!( - format!("{}", inner_err), - format!("{}", wasmi::Error::Trap(trap(allocator::OUT_OF_SPACE))) - ); - } - } - - #[test] - fn start_called() { - let mut ext = TestExternalities::::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); - - let code = wabt::wat2wasm(r#" + "#, + ) + .unwrap(); + + let res = WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_exhaust_heap", &code); + assert_eq!(res.is_err(), true); + if let Err(err) = res { + let inner_err = err.iter().next().unwrap(); + assert_eq!( + format!("{}", inner_err), + format!("{}", wasmi::Error::Trap(trap(allocator::OUT_OF_SPACE))) + ); + } + } + + #[test] + fn start_called() { + let mut ext = TestExternalities::::default(); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); + + let code = wabt::wat2wasm( + r#" (module (import "env" "assert" (func $assert (param i32))) (import "env" "inc_counter" (func $inc_counter (param i32) (result i32))) @@ -681,20 +701,27 @@ mod tests { call $assert ) ) - "#).unwrap(); - - assert_eq!( - WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_sandbox", &code).unwrap(), - vec![1], - ); - } - - #[test] - fn invoke_args() { - let mut ext = TestExternalities::::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); - - let code = wabt::wat2wasm(r#" + "#, + ) + .unwrap(); + + assert_eq!( + WasmExecutor::new() + .call(&mut ext, 8, &test_code[..], "test_sandbox", &code) + .unwrap(), + vec![1], + ); + } + + #[test] + fn invoke_args() { + let mut ext = TestExternalities::::default(); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); + + let code = wabt::wat2wasm( + r#" (module (import "env" "assert" (func $assert (param i32))) @@ -715,20 +742,27 @@ mod tests { ) ) ) - "#).unwrap(); - - assert_eq!( - WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_sandbox_args", &code).unwrap(), - vec![1], - ); - } - - #[test] - fn return_val() { - let mut ext = TestExternalities::::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); - - let code = wabt::wat2wasm(r#" + "#, + ) + .unwrap(); + + assert_eq!( + WasmExecutor::new() + .call(&mut ext, 8, &test_code[..], "test_sandbox_args", &code) + .unwrap(), + vec![1], + ); + } + + #[test] + fn return_val() { + let mut ext = TestExternalities::::default(); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); + + let code = wabt::wat2wasm( + r#" (module (func (export "call") (param $x i32) (result i32) (i32.add @@ -737,54 +771,90 @@ mod tests { ) ) ) - "#).unwrap(); - - assert_eq!( - WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_sandbox_return_val", &code).unwrap(), - vec![1], - ); - } - - #[test] - fn unlinkable_module() { - let mut ext = TestExternalities::::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); - - let code = wabt::wat2wasm(r#" + "#, + ) + .unwrap(); + + assert_eq!( + WasmExecutor::new() + .call( + &mut ext, + 8, + &test_code[..], + "test_sandbox_return_val", + &code + ) + .unwrap(), + vec![1], + ); + } + + #[test] + fn unlinkable_module() { + let mut ext = TestExternalities::::default(); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); + + let code = wabt::wat2wasm( + r#" (module (import "env" "non-existent" (func)) (func (export "call") ) ) - "#).unwrap(); - - assert_eq!( - WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_sandbox_instantiate", &code).unwrap(), - vec![1], - ); - } - - #[test] - fn corrupted_module() { - let mut ext = TestExternalities::::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); - - // Corrupted wasm file - let code = &[0, 0, 0, 0, 1, 0, 0, 0]; - - assert_eq!( - WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_sandbox_instantiate", code).unwrap(), - vec![1], - ); - } - - #[test] - fn start_fn_ok() { - let mut ext = TestExternalities::::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); - - let code = wabt::wat2wasm(r#" + "#, + ) + .unwrap(); + + assert_eq!( + WasmExecutor::new() + .call( + &mut ext, + 8, + &test_code[..], + "test_sandbox_instantiate", + &code + ) + .unwrap(), + vec![1], + ); + } + + #[test] + fn corrupted_module() { + let mut ext = TestExternalities::::default(); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); + + // Corrupted wasm file + let code = &[0, 0, 0, 0, 1, 0, 0, 0]; + + assert_eq!( + WasmExecutor::new() + .call( + &mut ext, + 8, + &test_code[..], + "test_sandbox_instantiate", + code + ) + .unwrap(), + vec![1], + ); + } + + #[test] + fn start_fn_ok() { + let mut ext = TestExternalities::::default(); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); + + let code = wabt::wat2wasm( + r#" (module (func (export "call") ) @@ -794,20 +864,33 @@ mod tests { (start $start) ) - "#).unwrap(); - - assert_eq!( - WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_sandbox_instantiate", &code).unwrap(), - vec![0], - ); - } - - #[test] - fn start_fn_traps() { - let mut ext = TestExternalities::::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); - - let code = wabt::wat2wasm(r#" + "#, + ) + .unwrap(); + + assert_eq!( + WasmExecutor::new() + .call( + &mut ext, + 8, + &test_code[..], + "test_sandbox_instantiate", + &code + ) + .unwrap(), + vec![0], + ); + } + + #[test] + fn start_fn_traps() { + let mut ext = TestExternalities::::default(); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); + + let code = wabt::wat2wasm( + r#" (module (func (export "call") ) @@ -818,11 +901,21 @@ mod tests { (start $start) ) - "#).unwrap(); - - assert_eq!( - WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_sandbox_instantiate", &code).unwrap(), - vec![2], - ); - } + "#, + ) + .unwrap(); + + assert_eq!( + WasmExecutor::new() + .call( + &mut ext, + 8, + &test_code[..], + "test_sandbox_instantiate", + &code + ) + .unwrap(), + vec![2], + ); + } } diff --git a/core/executor/src/wasm_executor.rs b/core/executor/src/wasm_executor.rs index 42af29e9ba..d1415b9f41 100644 --- a/core/executor/src/wasm_executor.rs +++ b/core/executor/src/wasm_executor.rs @@ -16,101 +16,108 @@ //! Rust implementation of Substrate contracts. +use secp256k1; use std::collections::HashMap; use tiny_keccak; -use secp256k1; -use wasmi::{ - Module, ModuleInstance, MemoryInstance, MemoryRef, TableRef, ImportsBuilder, ModuleRef, -}; -use wasmi::RuntimeValue::{I32, I64, self}; -use wasmi::memory_units::{Pages}; -use state_machine::Externalities; +use crate::allocator; use crate::error::{Error, ErrorKind, Result}; +use crate::sandbox; use crate::wasm_utils::UserError; -use primitives::{blake2_256, twox_128, twox_256, ed25519, sr25519, Pair}; +use log::trace; use primitives::hexdisplay::HexDisplay; use primitives::sandbox as sandbox_primitives; -use primitives::{H256, Blake2Hasher}; +use primitives::{blake2_256, ed25519, sr25519, twox_128, twox_256, Pair}; +use primitives::{Blake2Hasher, H256}; +use state_machine::Externalities; use trie::ordered_trie_root; -use crate::sandbox; -use crate::allocator; -use log::trace; +use wasmi::memory_units::Pages; +use wasmi::RuntimeValue::{self, I32, I64}; +use wasmi::{ + ImportsBuilder, MemoryInstance, MemoryRef, Module, ModuleInstance, ModuleRef, TableRef, +}; -#[cfg(feature="wasm-extern-trace")] +#[cfg(feature = "wasm-extern-trace")] macro_rules! debug_trace { ( $( $x:tt )* ) => ( trace!( $( $x )* ) ) } -#[cfg(not(feature="wasm-extern-trace"))] +#[cfg(not(feature = "wasm-extern-trace"))] macro_rules! debug_trace { - ( $( $x:tt )* ) => () + ( $( $x:tt )* ) => {}; } struct FunctionExecutor<'e, E: Externalities + 'e> { - sandbox_store: sandbox::Store, - heap: allocator::FreeingBumpHeapAllocator, - memory: MemoryRef, - table: Option, - ext: &'e mut E, - hash_lookup: HashMap, Vec>, + sandbox_store: sandbox::Store, + heap: allocator::FreeingBumpHeapAllocator, + memory: MemoryRef, + table: Option, + ext: &'e mut E, + hash_lookup: HashMap, Vec>, } impl<'e, E: Externalities> FunctionExecutor<'e, E> { - fn new(m: MemoryRef, t: Option, e: &'e mut E) -> Result { - Ok(FunctionExecutor { - sandbox_store: sandbox::Store::new(), - heap: allocator::FreeingBumpHeapAllocator::new(m.clone()), - memory: m, - table: t, - ext: e, - hash_lookup: HashMap::new(), - }) - } + fn new(m: MemoryRef, t: Option, e: &'e mut E) -> Result { + Ok(FunctionExecutor { + sandbox_store: sandbox::Store::new(), + heap: allocator::FreeingBumpHeapAllocator::new(m.clone()), + memory: m, + table: t, + ext: e, + hash_lookup: HashMap::new(), + }) + } } impl<'e, E: Externalities> sandbox::SandboxCapabilities for FunctionExecutor<'e, E> { - fn store(&self) -> &sandbox::Store { - &self.sandbox_store - } - fn store_mut(&mut self) -> &mut sandbox::Store { - &mut self.sandbox_store - } - fn allocate(&mut self, len: u32) -> ::std::result::Result { - self.heap.allocate(len) - } - fn deallocate(&mut self, ptr: u32) -> ::std::result::Result<(), UserError> { - self.heap.deallocate(ptr) - } - fn write_memory(&mut self, ptr: u32, data: &[u8]) -> ::std::result::Result<(), UserError> { - self.memory.set(ptr, data).map_err(|_| UserError("Invalid attempt to write_memory")) - } - fn read_memory(&self, ptr: u32, len: u32) -> ::std::result::Result, UserError> { - self.memory.get(ptr, len as usize).map_err(|_| UserError("Invalid attempt to write_memory")) - } + fn store(&self) -> &sandbox::Store { + &self.sandbox_store + } + fn store_mut(&mut self) -> &mut sandbox::Store { + &mut self.sandbox_store + } + fn allocate(&mut self, len: u32) -> ::std::result::Result { + self.heap.allocate(len) + } + fn deallocate(&mut self, ptr: u32) -> ::std::result::Result<(), UserError> { + self.heap.deallocate(ptr) + } + fn write_memory(&mut self, ptr: u32, data: &[u8]) -> ::std::result::Result<(), UserError> { + self.memory + .set(ptr, data) + .map_err(|_| UserError("Invalid attempt to write_memory")) + } + fn read_memory(&self, ptr: u32, len: u32) -> ::std::result::Result, UserError> { + self.memory + .get(ptr, len as usize) + .map_err(|_| UserError("Invalid attempt to write_memory")) + } } trait WritePrimitive { - fn write_primitive(&self, offset: u32, t: T) -> ::std::result::Result<(), UserError>; + fn write_primitive(&self, offset: u32, t: T) -> ::std::result::Result<(), UserError>; } impl WritePrimitive for MemoryInstance { - fn write_primitive(&self, offset: u32, t: u32) -> ::std::result::Result<(), UserError> { - use byteorder::{LittleEndian, ByteOrder}; - let mut r = [0u8; 4]; - LittleEndian::write_u32(&mut r, t); - self.set(offset, &r).map_err(|_| UserError("Invalid attempt to write_primitive")) - } + fn write_primitive(&self, offset: u32, t: u32) -> ::std::result::Result<(), UserError> { + use byteorder::{ByteOrder, LittleEndian}; + let mut r = [0u8; 4]; + LittleEndian::write_u32(&mut r, t); + self.set(offset, &r) + .map_err(|_| UserError("Invalid attempt to write_primitive")) + } } trait ReadPrimitive { - fn read_primitive(&self, offset: u32) -> ::std::result::Result; + fn read_primitive(&self, offset: u32) -> ::std::result::Result; } impl ReadPrimitive for MemoryInstance { - fn read_primitive(&self, offset: u32) -> ::std::result::Result { - use byteorder::{LittleEndian, ByteOrder}; - Ok(LittleEndian::read_u32(&self.get(offset, 4).map_err(|_| UserError("Invalid attempt to read_primitive"))?)) - } + fn read_primitive(&self, offset: u32) -> ::std::result::Result { + use byteorder::{ByteOrder, LittleEndian}; + Ok(LittleEndian::read_u32(&self.get(offset, 4).map_err( + |_| UserError("Invalid attempt to read_primitive"), + )?)) + } } impl_function_executor!(this: FunctionExecutor<'e, E>, @@ -651,352 +658,460 @@ impl_function_executor!(this: FunctionExecutor<'e, E>, pub struct WasmExecutor; impl WasmExecutor { - - /// Create a new instance. - pub fn new() -> Self { - WasmExecutor - } - - /// Call a given method in the given code. - /// - /// Signature of this method needs to be `(I32, I32) -> I64`. - /// - /// This should be used for tests only. - pub fn call>( - &self, - ext: &mut E, - heap_pages: usize, - code: &[u8], - method: &str, - data: &[u8], - ) -> Result> { - let module = ::wasmi::Module::from_buffer(code)?; - let module = self.prepare_module(ext, heap_pages, &module)?; - self.call_in_wasm_module(ext, &module, method, data) - } - - /// Call a given method with a custom signature in the given code. - /// - /// This should be used for tests only. - pub fn call_with_custom_signature< - E: Externalities, - F: FnOnce(&mut FnMut(&[u8]) -> Result) -> Result>, - FR: FnOnce(Option, &MemoryRef) -> Result>, - R, - >( - &self, - ext: &mut E, - heap_pages: usize, - code: &[u8], - method: &str, - create_parameters: F, - filter_result: FR, - ) -> Result { - let module = wasmi::Module::from_buffer(code)?; - let module = self.prepare_module(ext, heap_pages, &module)?; - self.call_in_wasm_module_with_custom_signature(ext, &module, method, create_parameters, filter_result) - } - - fn get_mem_instance(module: &ModuleRef) -> Result { - Ok(module - .export_by_name("memory") - .ok_or_else(|| Error::from(ErrorKind::InvalidMemoryReference))? - .as_memory() - .ok_or_else(|| Error::from(ErrorKind::InvalidMemoryReference))? - .clone()) - } - - /// Call a given method in the given wasm-module runtime. - pub fn call_in_wasm_module>( - &self, - ext: &mut E, - module_instance: &ModuleRef, - method: &str, - data: &[u8], - ) -> Result> { - self.call_in_wasm_module_with_custom_signature( - ext, - module_instance, - method, - |alloc| { - let offset = alloc(data)?; - Ok(vec![I32(offset as i32), I32(data.len() as i32)]) - }, - |res, memory| { - if let Some(I64(r)) = res { - let offset = r as u32; - let length = (r as u64 >> 32) as usize; - memory.get(offset, length).map_err(|_| ErrorKind::Runtime.into()).map(Some) - } else { - Ok(None) - } - } - ) - } - - /// Call a given method in the given wasm-module runtime. - fn call_in_wasm_module_with_custom_signature< - E: Externalities, - F: FnOnce(&mut FnMut(&[u8]) -> Result) -> Result>, - FR: FnOnce(Option, &MemoryRef) -> Result>, - R, - >( - &self, - ext: &mut E, - module_instance: &ModuleRef, - method: &str, - create_parameters: F, - filter_result: FR, - ) -> Result { - // extract a reference to a linear memory, optional reference to a table - // and then initialize FunctionExecutor. - let memory = Self::get_mem_instance(module_instance)?; - let table: Option = module_instance - .export_by_name("__indirect_function_table") - .and_then(|e| e.as_table().cloned()); - - let low = memory.lowest_used(); - let used_mem = memory.used_size(); - let mut fec = FunctionExecutor::new(memory.clone(), table, ext)?; - let parameters = create_parameters(&mut |data: &[u8]| { - let offset = fec.heap.allocate(data.len() as u32).map_err(|_| ErrorKind::Runtime)?; - memory.set(offset, &data)?; - Ok(offset) - })?; - - let result = module_instance.invoke_export( - method, - ¶meters, - &mut fec - ); - let result = match result { - Ok(val) => match filter_result(val, &memory)? { - Some(val) => Ok(val), - None => Err(ErrorKind::InvalidReturn.into()), - }, - Err(e) => { - trace!(target: "wasm-executor", "Failed to execute code with {} pages", memory.current_size().0); - Err(e.into()) - }, - }; - - // cleanup module instance for next use - let new_low = memory.lowest_used(); - if new_low < low { - memory.zero(new_low as usize, (low - new_low) as usize)?; - memory.reset_lowest_used(low); - } - memory.with_direct_access_mut(|buf| buf.resize(used_mem.0, 0)); - result - } - - /// Prepare module instance - pub fn prepare_module>( - &self, - ext: &mut E, - heap_pages: usize, - module: &Module, - ) -> Result - { - // start module instantiation. Don't run 'start' function yet. - let intermediate_instance = ModuleInstance::new( - module, - &ImportsBuilder::new() - .with_resolver("env", FunctionExecutor::::resolver()) - )?; - - // extract a reference to a linear memory, optional reference to a table - // and then initialize FunctionExecutor. - let memory = Self::get_mem_instance(intermediate_instance.not_started_instance())?; - memory.grow(Pages(heap_pages)).map_err(|_| Error::from(ErrorKind::Runtime))?; - let table: Option = intermediate_instance - .not_started_instance() - .export_by_name("__indirect_function_table") - .and_then(|e| e.as_table().cloned()); - let mut fec = FunctionExecutor::new(memory.clone(), table, ext)?; - - // finish instantiation by running 'start' function (if any). - Ok(intermediate_instance.run_start(&mut fec)?) - } + /// Create a new instance. + pub fn new() -> Self { + WasmExecutor + } + + /// Call a given method in the given code. + /// + /// Signature of this method needs to be `(I32, I32) -> I64`. + /// + /// This should be used for tests only. + pub fn call>( + &self, + ext: &mut E, + heap_pages: usize, + code: &[u8], + method: &str, + data: &[u8], + ) -> Result> { + let module = ::wasmi::Module::from_buffer(code)?; + let module = self.prepare_module(ext, heap_pages, &module)?; + self.call_in_wasm_module(ext, &module, method, data) + } + + /// Call a given method with a custom signature in the given code. + /// + /// This should be used for tests only. + pub fn call_with_custom_signature< + E: Externalities, + F: FnOnce(&mut FnMut(&[u8]) -> Result) -> Result>, + FR: FnOnce(Option, &MemoryRef) -> Result>, + R, + >( + &self, + ext: &mut E, + heap_pages: usize, + code: &[u8], + method: &str, + create_parameters: F, + filter_result: FR, + ) -> Result { + let module = wasmi::Module::from_buffer(code)?; + let module = self.prepare_module(ext, heap_pages, &module)?; + self.call_in_wasm_module_with_custom_signature( + ext, + &module, + method, + create_parameters, + filter_result, + ) + } + + fn get_mem_instance(module: &ModuleRef) -> Result { + Ok(module + .export_by_name("memory") + .ok_or_else(|| Error::from(ErrorKind::InvalidMemoryReference))? + .as_memory() + .ok_or_else(|| Error::from(ErrorKind::InvalidMemoryReference))? + .clone()) + } + + /// Call a given method in the given wasm-module runtime. + pub fn call_in_wasm_module>( + &self, + ext: &mut E, + module_instance: &ModuleRef, + method: &str, + data: &[u8], + ) -> Result> { + self.call_in_wasm_module_with_custom_signature( + ext, + module_instance, + method, + |alloc| { + let offset = alloc(data)?; + Ok(vec![I32(offset as i32), I32(data.len() as i32)]) + }, + |res, memory| { + if let Some(I64(r)) = res { + let offset = r as u32; + let length = (r as u64 >> 32) as usize; + memory + .get(offset, length) + .map_err(|_| ErrorKind::Runtime.into()) + .map(Some) + } else { + Ok(None) + } + }, + ) + } + + /// Call a given method in the given wasm-module runtime. + fn call_in_wasm_module_with_custom_signature< + E: Externalities, + F: FnOnce(&mut FnMut(&[u8]) -> Result) -> Result>, + FR: FnOnce(Option, &MemoryRef) -> Result>, + R, + >( + &self, + ext: &mut E, + module_instance: &ModuleRef, + method: &str, + create_parameters: F, + filter_result: FR, + ) -> Result { + // extract a reference to a linear memory, optional reference to a table + // and then initialize FunctionExecutor. + let memory = Self::get_mem_instance(module_instance)?; + let table: Option = module_instance + .export_by_name("__indirect_function_table") + .and_then(|e| e.as_table().cloned()); + + let low = memory.lowest_used(); + let used_mem = memory.used_size(); + let mut fec = FunctionExecutor::new(memory.clone(), table, ext)?; + let parameters = create_parameters(&mut |data: &[u8]| { + let offset = fec + .heap + .allocate(data.len() as u32) + .map_err(|_| ErrorKind::Runtime)?; + memory.set(offset, &data)?; + Ok(offset) + })?; + + let result = module_instance.invoke_export(method, ¶meters, &mut fec); + let result = match result { + Ok(val) => match filter_result(val, &memory)? { + Some(val) => Ok(val), + None => Err(ErrorKind::InvalidReturn.into()), + }, + Err(e) => { + trace!(target: "wasm-executor", "Failed to execute code with {} pages", memory.current_size().0); + Err(e.into()) + } + }; + + // cleanup module instance for next use + let new_low = memory.lowest_used(); + if new_low < low { + memory.zero(new_low as usize, (low - new_low) as usize)?; + memory.reset_lowest_used(low); + } + memory.with_direct_access_mut(|buf| buf.resize(used_mem.0, 0)); + result + } + + /// Prepare module instance + pub fn prepare_module>( + &self, + ext: &mut E, + heap_pages: usize, + module: &Module, + ) -> Result { + // start module instantiation. Don't run 'start' function yet. + let intermediate_instance = ModuleInstance::new( + module, + &ImportsBuilder::new().with_resolver("env", FunctionExecutor::::resolver()), + )?; + + // extract a reference to a linear memory, optional reference to a table + // and then initialize FunctionExecutor. + let memory = Self::get_mem_instance(intermediate_instance.not_started_instance())?; + memory + .grow(Pages(heap_pages)) + .map_err(|_| Error::from(ErrorKind::Runtime))?; + let table: Option = intermediate_instance + .not_started_instance() + .export_by_name("__indirect_function_table") + .and_then(|e| e.as_table().cloned()); + let mut fec = FunctionExecutor::new(memory.clone(), table, ext)?; + + // finish instantiation by running 'start' function (if any). + Ok(intermediate_instance.run_start(&mut fec)?) + } } - #[cfg(test)] mod tests { - use super::*; - - use parity_codec::Encode; - - use state_machine::TestExternalities; - use hex_literal::{hex, hex_impl}; - use primitives::map; - - #[test] - fn returning_should_work() { - let mut ext = TestExternalities::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); - - let output = WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_empty_return", &[]).unwrap(); - assert_eq!(output, vec![0u8; 0]); - } - - #[test] - fn panicking_should_work() { - let mut ext = TestExternalities::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); - - let output = WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_panic", &[]); - assert!(output.is_err()); - - let output = WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_conditional_panic", &[]); - assert_eq!(output.unwrap(), vec![0u8; 0]); - - let output = WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_conditional_panic", &[2]); - assert!(output.is_err()); - } - - #[test] - fn storage_should_work() { - let mut ext = TestExternalities::default(); - ext.set_storage(b"foo".to_vec(), b"bar".to_vec()); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); - - let output = WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_data_in", b"Hello world").unwrap(); - - assert_eq!(output, b"all ok!".to_vec()); - - let expected = TestExternalities::new(map![ - b"input".to_vec() => b"Hello world".to_vec(), - b"foo".to_vec() => b"bar".to_vec(), - b"baz".to_vec() => b"bar".to_vec() - ]); - assert_eq!(ext, expected); - } - - #[test] - fn clear_prefix_should_work() { - let mut ext = TestExternalities::default(); - ext.set_storage(b"aaa".to_vec(), b"1".to_vec()); - ext.set_storage(b"aab".to_vec(), b"2".to_vec()); - ext.set_storage(b"aba".to_vec(), b"3".to_vec()); - ext.set_storage(b"abb".to_vec(), b"4".to_vec()); - ext.set_storage(b"bbb".to_vec(), b"5".to_vec()); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); - - // This will clear all entries which prefix is "ab". - let output = WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_clear_prefix", b"ab").unwrap(); - - assert_eq!(output, b"all ok!".to_vec()); - - let expected: TestExternalities<_> = map![ - b"aaa".to_vec() => b"1".to_vec(), - b"aab".to_vec() => b"2".to_vec(), - b"bbb".to_vec() => b"5".to_vec() - ]; - assert_eq!(expected, ext); - } - - #[test] - fn blake2_256_should_work() { - let mut ext = TestExternalities::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); - assert_eq!( - WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_blake2_256", &[]).unwrap(), - blake2_256(&b""[..]).encode() - ); - assert_eq!( - WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_blake2_256", b"Hello world!").unwrap(), - blake2_256(&b"Hello world!"[..]).encode() - ); - } - - #[test] - fn twox_256_should_work() { - let mut ext = TestExternalities::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); - assert_eq!( - WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_twox_256", &[]).unwrap(), - hex!("99e9d85137db46ef4bbea33613baafd56f963c64b1f3685a4eb4abd67ff6203a") - ); - assert_eq!( - WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_twox_256", b"Hello world!").unwrap(), - hex!("b27dfd7f223f177f2a13647b533599af0c07f68bda23d96d059da2b451a35a74") - ); - } - - #[test] - fn twox_128_should_work() { - let mut ext = TestExternalities::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); - assert_eq!( - WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_twox_128", &[]).unwrap(), - hex!("99e9d85137db46ef4bbea33613baafd5") - ); - assert_eq!( - WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_twox_128", b"Hello world!").unwrap(), - hex!("b27dfd7f223f177f2a13647b533599af") - ); - } - - #[test] - fn ed25519_verify_should_work() { - let mut ext = TestExternalities::::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); - let key = ed25519::Pair::from_seed(blake2_256(b"test")); - let sig = key.sign(b"all ok!"); - let mut calldata = vec![]; - calldata.extend_from_slice(key.public().as_ref()); - calldata.extend_from_slice(sig.as_ref()); - - assert_eq!( - WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_ed25519_verify", &calldata).unwrap(), - vec![1] - ); - - let other_sig = key.sign(b"all is not ok!"); - let mut calldata = vec![]; - calldata.extend_from_slice(key.public().as_ref()); - calldata.extend_from_slice(other_sig.as_ref()); - - assert_eq!( - WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_ed25519_verify", &calldata).unwrap(), - vec![0] - ); - } - - #[test] - fn sr25519_verify_should_work() { - let mut ext = TestExternalities::::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); - let key = sr25519::Pair::from_seed(blake2_256(b"test")); - let sig = key.sign(b"all ok!"); - let mut calldata = vec![]; - calldata.extend_from_slice(key.public().as_ref()); - calldata.extend_from_slice(sig.as_ref()); - - assert_eq!( - WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_sr25519_verify", &calldata).unwrap(), - vec![1] - ); - - let other_sig = key.sign(b"all is not ok!"); - let mut calldata = vec![]; - calldata.extend_from_slice(key.public().as_ref()); - calldata.extend_from_slice(other_sig.as_ref()); - - assert_eq!( - WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_sr25519_verify", &calldata).unwrap(), - vec![0] - ); - } - - #[test] - fn enumerated_trie_root_should_work() { - let mut ext = TestExternalities::::default(); - let test_code = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm"); - assert_eq!( - WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_enumerated_trie_root", &[]).unwrap(), - ordered_trie_root::(vec![b"zero".to_vec(), b"one".to_vec(), b"two".to_vec()].iter()).as_fixed_bytes().encode() - ); - } + use super::*; + + use parity_codec::Encode; + + use hex_literal::{hex, hex_impl}; + use primitives::map; + use state_machine::TestExternalities; + + #[test] + fn returning_should_work() { + let mut ext = TestExternalities::default(); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); + + let output = WasmExecutor::new() + .call(&mut ext, 8, &test_code[..], "test_empty_return", &[]) + .unwrap(); + assert_eq!(output, vec![0u8; 0]); + } + + #[test] + fn panicking_should_work() { + let mut ext = TestExternalities::default(); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); + + let output = WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_panic", &[]); + assert!(output.is_err()); + + let output = + WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_conditional_panic", &[]); + assert_eq!(output.unwrap(), vec![0u8; 0]); + + let output = + WasmExecutor::new().call(&mut ext, 8, &test_code[..], "test_conditional_panic", &[2]); + assert!(output.is_err()); + } + + #[test] + fn storage_should_work() { + let mut ext = TestExternalities::default(); + ext.set_storage(b"foo".to_vec(), b"bar".to_vec()); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); + + let output = WasmExecutor::new() + .call(&mut ext, 8, &test_code[..], "test_data_in", b"Hello world") + .unwrap(); + + assert_eq!(output, b"all ok!".to_vec()); + + let expected = TestExternalities::new(map![ + b"input".to_vec() => b"Hello world".to_vec(), + b"foo".to_vec() => b"bar".to_vec(), + b"baz".to_vec() => b"bar".to_vec() + ]); + assert_eq!(ext, expected); + } + + #[test] + fn clear_prefix_should_work() { + let mut ext = TestExternalities::default(); + ext.set_storage(b"aaa".to_vec(), b"1".to_vec()); + ext.set_storage(b"aab".to_vec(), b"2".to_vec()); + ext.set_storage(b"aba".to_vec(), b"3".to_vec()); + ext.set_storage(b"abb".to_vec(), b"4".to_vec()); + ext.set_storage(b"bbb".to_vec(), b"5".to_vec()); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); + + // This will clear all entries which prefix is "ab". + let output = WasmExecutor::new() + .call(&mut ext, 8, &test_code[..], "test_clear_prefix", b"ab") + .unwrap(); + + assert_eq!(output, b"all ok!".to_vec()); + + let expected: TestExternalities<_> = map![ + b"aaa".to_vec() => b"1".to_vec(), + b"aab".to_vec() => b"2".to_vec(), + b"bbb".to_vec() => b"5".to_vec() + ]; + assert_eq!(expected, ext); + } + + #[test] + fn blake2_256_should_work() { + let mut ext = TestExternalities::default(); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); + assert_eq!( + WasmExecutor::new() + .call(&mut ext, 8, &test_code[..], "test_blake2_256", &[]) + .unwrap(), + blake2_256(&b""[..]).encode() + ); + assert_eq!( + WasmExecutor::new() + .call( + &mut ext, + 8, + &test_code[..], + "test_blake2_256", + b"Hello world!" + ) + .unwrap(), + blake2_256(&b"Hello world!"[..]).encode() + ); + } + + #[test] + fn twox_256_should_work() { + let mut ext = TestExternalities::default(); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); + assert_eq!( + WasmExecutor::new() + .call(&mut ext, 8, &test_code[..], "test_twox_256", &[]) + .unwrap(), + hex!("99e9d85137db46ef4bbea33613baafd56f963c64b1f3685a4eb4abd67ff6203a") + ); + assert_eq!( + WasmExecutor::new() + .call( + &mut ext, + 8, + &test_code[..], + "test_twox_256", + b"Hello world!" + ) + .unwrap(), + hex!("b27dfd7f223f177f2a13647b533599af0c07f68bda23d96d059da2b451a35a74") + ); + } + + #[test] + fn twox_128_should_work() { + let mut ext = TestExternalities::default(); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); + assert_eq!( + WasmExecutor::new() + .call(&mut ext, 8, &test_code[..], "test_twox_128", &[]) + .unwrap(), + hex!("99e9d85137db46ef4bbea33613baafd5") + ); + assert_eq!( + WasmExecutor::new() + .call( + &mut ext, + 8, + &test_code[..], + "test_twox_128", + b"Hello world!" + ) + .unwrap(), + hex!("b27dfd7f223f177f2a13647b533599af") + ); + } + + #[test] + fn ed25519_verify_should_work() { + let mut ext = TestExternalities::::default(); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); + let key = ed25519::Pair::from_seed(blake2_256(b"test")); + let sig = key.sign(b"all ok!"); + let mut calldata = vec![]; + calldata.extend_from_slice(key.public().as_ref()); + calldata.extend_from_slice(sig.as_ref()); + + assert_eq!( + WasmExecutor::new() + .call( + &mut ext, + 8, + &test_code[..], + "test_ed25519_verify", + &calldata + ) + .unwrap(), + vec![1] + ); + + let other_sig = key.sign(b"all is not ok!"); + let mut calldata = vec![]; + calldata.extend_from_slice(key.public().as_ref()); + calldata.extend_from_slice(other_sig.as_ref()); + + assert_eq!( + WasmExecutor::new() + .call( + &mut ext, + 8, + &test_code[..], + "test_ed25519_verify", + &calldata + ) + .unwrap(), + vec![0] + ); + } + + #[test] + fn sr25519_verify_should_work() { + let mut ext = TestExternalities::::default(); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); + let key = sr25519::Pair::from_seed(blake2_256(b"test")); + let sig = key.sign(b"all ok!"); + let mut calldata = vec![]; + calldata.extend_from_slice(key.public().as_ref()); + calldata.extend_from_slice(sig.as_ref()); + + assert_eq!( + WasmExecutor::new() + .call( + &mut ext, + 8, + &test_code[..], + "test_sr25519_verify", + &calldata + ) + .unwrap(), + vec![1] + ); + + let other_sig = key.sign(b"all is not ok!"); + let mut calldata = vec![]; + calldata.extend_from_slice(key.public().as_ref()); + calldata.extend_from_slice(other_sig.as_ref()); + + assert_eq!( + WasmExecutor::new() + .call( + &mut ext, + 8, + &test_code[..], + "test_sr25519_verify", + &calldata + ) + .unwrap(), + vec![0] + ); + } + + #[test] + fn enumerated_trie_root_should_work() { + let mut ext = TestExternalities::::default(); + let test_code = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm" + ); + assert_eq!( + WasmExecutor::new() + .call( + &mut ext, + 8, + &test_code[..], + "test_enumerated_trie_root", + &[] + ) + .unwrap(), + ordered_trie_root::( + vec![b"zero".to_vec(), b"one".to_vec(), b"two".to_vec()].iter() + ) + .as_fixed_bytes() + .encode() + ); + } } diff --git a/core/executor/src/wasm_utils.rs b/core/executor/src/wasm_utils.rs index 4ad9541dbd..f8b11caed5 100644 --- a/core/executor/src/wasm_utils.rs +++ b/core/executor/src/wasm_utils.rs @@ -16,31 +16,94 @@ //! Rust implementation of Substrate contracts. -use wasmi::{ValueType, RuntimeValue, HostError}; -use wasmi::nan_preserving_float::{F32, F64}; use std::fmt; +use wasmi::nan_preserving_float::{F32, F64}; +use wasmi::{HostError, RuntimeValue, ValueType}; #[derive(Debug, PartialEq)] pub struct UserError(pub &'static str); impl fmt::Display for UserError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "UserError: {}", self.0) - } -} -impl HostError for UserError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "UserError: {}", self.0) + } } +impl HostError for UserError {} -pub trait ConvertibleToWasm { const VALUE_TYPE: ValueType; type NativeType; fn to_runtime_value(self) -> RuntimeValue; } -impl ConvertibleToWasm for i32 { type NativeType = i32; const VALUE_TYPE: ValueType = ValueType::I32; fn to_runtime_value(self) -> RuntimeValue { RuntimeValue::I32(self) } } -impl ConvertibleToWasm for u32 { type NativeType = u32; const VALUE_TYPE: ValueType = ValueType::I32; fn to_runtime_value(self) -> RuntimeValue { RuntimeValue::I32(self as i32) } } -impl ConvertibleToWasm for i64 { type NativeType = i64; const VALUE_TYPE: ValueType = ValueType::I64; fn to_runtime_value(self) -> RuntimeValue { RuntimeValue::I64(self) } } -impl ConvertibleToWasm for u64 { type NativeType = u64; const VALUE_TYPE: ValueType = ValueType::I64; fn to_runtime_value(self) -> RuntimeValue { RuntimeValue::I64(self as i64) } } -impl ConvertibleToWasm for F32 { type NativeType = F32; const VALUE_TYPE: ValueType = ValueType::F32; fn to_runtime_value(self) -> RuntimeValue { RuntimeValue::F32(self) } } -impl ConvertibleToWasm for F64 { type NativeType = F64; const VALUE_TYPE: ValueType = ValueType::F64; fn to_runtime_value(self) -> RuntimeValue { RuntimeValue::F64(self) } } -impl ConvertibleToWasm for isize { type NativeType = i32; const VALUE_TYPE: ValueType = ValueType::I32; fn to_runtime_value(self) -> RuntimeValue { RuntimeValue::I32(self as i32) } } -impl ConvertibleToWasm for usize { type NativeType = u32; const VALUE_TYPE: ValueType = ValueType::I32; fn to_runtime_value(self) -> RuntimeValue { RuntimeValue::I32(self as u32 as i32) } } -impl ConvertibleToWasm for *const T { type NativeType = u32; const VALUE_TYPE: ValueType = ValueType::I32; fn to_runtime_value(self) -> RuntimeValue { RuntimeValue::I32(self as isize as i32) } } -impl ConvertibleToWasm for *mut T { type NativeType = u32; const VALUE_TYPE: ValueType = ValueType::I32; fn to_runtime_value(self) -> RuntimeValue { RuntimeValue::I32(self as isize as i32) } } +pub trait ConvertibleToWasm { + const VALUE_TYPE: ValueType; + type NativeType; + fn to_runtime_value(self) -> RuntimeValue; +} +impl ConvertibleToWasm for i32 { + type NativeType = i32; + const VALUE_TYPE: ValueType = ValueType::I32; + fn to_runtime_value(self) -> RuntimeValue { + RuntimeValue::I32(self) + } +} +impl ConvertibleToWasm for u32 { + type NativeType = u32; + const VALUE_TYPE: ValueType = ValueType::I32; + fn to_runtime_value(self) -> RuntimeValue { + RuntimeValue::I32(self as i32) + } +} +impl ConvertibleToWasm for i64 { + type NativeType = i64; + const VALUE_TYPE: ValueType = ValueType::I64; + fn to_runtime_value(self) -> RuntimeValue { + RuntimeValue::I64(self) + } +} +impl ConvertibleToWasm for u64 { + type NativeType = u64; + const VALUE_TYPE: ValueType = ValueType::I64; + fn to_runtime_value(self) -> RuntimeValue { + RuntimeValue::I64(self as i64) + } +} +impl ConvertibleToWasm for F32 { + type NativeType = F32; + const VALUE_TYPE: ValueType = ValueType::F32; + fn to_runtime_value(self) -> RuntimeValue { + RuntimeValue::F32(self) + } +} +impl ConvertibleToWasm for F64 { + type NativeType = F64; + const VALUE_TYPE: ValueType = ValueType::F64; + fn to_runtime_value(self) -> RuntimeValue { + RuntimeValue::F64(self) + } +} +impl ConvertibleToWasm for isize { + type NativeType = i32; + const VALUE_TYPE: ValueType = ValueType::I32; + fn to_runtime_value(self) -> RuntimeValue { + RuntimeValue::I32(self as i32) + } +} +impl ConvertibleToWasm for usize { + type NativeType = u32; + const VALUE_TYPE: ValueType = ValueType::I32; + fn to_runtime_value(self) -> RuntimeValue { + RuntimeValue::I32(self as u32 as i32) + } +} +impl ConvertibleToWasm for *const T { + type NativeType = u32; + const VALUE_TYPE: ValueType = ValueType::I32; + fn to_runtime_value(self) -> RuntimeValue { + RuntimeValue::I32(self as isize as i32) + } +} +impl ConvertibleToWasm for *mut T { + type NativeType = u32; + const VALUE_TYPE: ValueType = ValueType::I32; + fn to_runtime_value(self) -> RuntimeValue { + RuntimeValue::I32(self as isize as i32) + } +} #[macro_export] macro_rules! convert_args { @@ -115,9 +178,9 @@ macro_rules! unmarshall_args { #[inline(always)] pub fn constrain_closure(f: F) -> F where - F: FnOnce() -> Result + F: FnOnce() -> Result, { - f + f } #[macro_export] diff --git a/core/finality-grandpa/primitives/src/lib.rs b/core/finality-grandpa/primitives/src/lib.rs index 7016a708bd..5f0c717be9 100644 --- a/core/finality-grandpa/primitives/src/lib.rs +++ b/core/finality-grandpa/primitives/src/lib.rs @@ -22,11 +22,11 @@ #[cfg(not(feature = "std"))] extern crate alloc; -use parity_codec::{Encode, Decode}; -use substrate_primitives::ed25519; -use sr_primitives::traits::{DigestFor, NumberFor}; use client::decl_runtime_apis; +use parity_codec::{Decode, Encode}; use rstd::vec::Vec; +use sr_primitives::traits::{DigestFor, NumberFor}; +use substrate_primitives::ed25519; use ed25519::Public as AuthorityId; @@ -34,10 +34,10 @@ use ed25519::Public as AuthorityId; #[cfg_attr(feature = "std", derive(Debug, PartialEq))] #[derive(Clone, Encode, Decode)] pub struct ScheduledChange { - /// The new authorities after the change, along with their respective weights. - pub next_authorities: Vec<(AuthorityId, u64)>, - /// The number of blocks to delay. - pub delay: N, + /// The new authorities after the change, along with their respective weights. + pub next_authorities: Vec<(AuthorityId, u64)>, + /// The number of blocks to delay. + pub delay: N, } /// WASM function call to check for pending changes. @@ -47,67 +47,67 @@ pub const AUTHORITIES_CALL: &str = "grandpa_authorities"; /// Well-known storage keys for GRANDPA. pub mod well_known_keys { - /// The key for the authorities and weights vector in storage. - pub const AUTHORITY_PREFIX: &[u8] = b":grandpa:auth:"; - /// The key for the authorities count. - pub const AUTHORITY_COUNT: &[u8] = b":grandpa:auth:len"; + /// The key for the authorities and weights vector in storage. + pub const AUTHORITY_PREFIX: &[u8] = b":grandpa:auth:"; + /// The key for the authorities count. + pub const AUTHORITY_COUNT: &[u8] = b":grandpa:auth:len"; } decl_runtime_apis! { - /// APIs for integrating the GRANDPA finality gadget into runtimes. - /// This should be implemented on the runtime side. - /// - /// This is primarily used for negotiating authority-set changes for the - /// gadget. GRANDPA uses a signaling model of changing authority sets: - /// changes should be signaled with a delay of N blocks, and then automatically - /// applied in the runtime after those N blocks have passed. - /// - /// The consensus protocol will coordinate the handoff externally. - #[api_version(2)] - pub trait GrandpaApi { - /// Check a digest for pending changes. - /// Return `None` if there are no pending changes. - /// - /// Precedence towards earlier or later digest items can be given - /// based on the rules of the chain. - /// - /// No change should be scheduled if one is already and the delay has not - /// passed completely. - /// - /// This should be a pure function: i.e. as long as the runtime can interpret - /// the digest type it should return the same result regardless of the current - /// state. - fn grandpa_pending_change(digest: &DigestFor) - -> Option>>; + /// APIs for integrating the GRANDPA finality gadget into runtimes. + /// This should be implemented on the runtime side. + /// + /// This is primarily used for negotiating authority-set changes for the + /// gadget. GRANDPA uses a signaling model of changing authority sets: + /// changes should be signaled with a delay of N blocks, and then automatically + /// applied in the runtime after those N blocks have passed. + /// + /// The consensus protocol will coordinate the handoff externally. + #[api_version(2)] + pub trait GrandpaApi { + /// Check a digest for pending changes. + /// Return `None` if there are no pending changes. + /// + /// Precedence towards earlier or later digest items can be given + /// based on the rules of the chain. + /// + /// No change should be scheduled if one is already and the delay has not + /// passed completely. + /// + /// This should be a pure function: i.e. as long as the runtime can interpret + /// the digest type it should return the same result regardless of the current + /// state. + fn grandpa_pending_change(digest: &DigestFor) + -> Option>>; - /// Check a digest for forced changes. - /// Return `None` if there are no forced changes. Otherwise, return a - /// tuple containing the pending change and the median last finalized - /// block number at the time the change was signaled. - /// - /// Added in version 2. - /// - /// Forced changes are applied after a delay of _imported_ blocks, - /// while pending changes are applied after a delay of _finalized_ blocks. - /// - /// Precedence towards earlier or later digest items can be given - /// based on the rules of the chain. - /// - /// No change should be scheduled if one is already and the delay has not - /// passed completely. - /// - /// This should be a pure function: i.e. as long as the runtime can interpret - /// the digest type it should return the same result regardless of the current - /// state. - fn grandpa_forced_change(digest: &DigestFor) - -> Option<(NumberFor, ScheduledChange>)>; + /// Check a digest for forced changes. + /// Return `None` if there are no forced changes. Otherwise, return a + /// tuple containing the pending change and the median last finalized + /// block number at the time the change was signaled. + /// + /// Added in version 2. + /// + /// Forced changes are applied after a delay of _imported_ blocks, + /// while pending changes are applied after a delay of _finalized_ blocks. + /// + /// Precedence towards earlier or later digest items can be given + /// based on the rules of the chain. + /// + /// No change should be scheduled if one is already and the delay has not + /// passed completely. + /// + /// This should be a pure function: i.e. as long as the runtime can interpret + /// the digest type it should return the same result regardless of the current + /// state. + fn grandpa_forced_change(digest: &DigestFor) + -> Option<(NumberFor, ScheduledChange>)>; - /// Get the current GRANDPA authorities and weights. This should not change except - /// for when changes are scheduled and the corresponding delay has passed. - /// - /// When called at block B, it will return the set of authorities that should be - /// used to finalize descendants of this block (B+1, B+2, ...). The block B itself - /// is finalized by the authorities from block B-1. - fn grandpa_authorities() -> Vec<(AuthorityId, u64)>; - } + /// Get the current GRANDPA authorities and weights. This should not change except + /// for when changes are scheduled and the corresponding delay has passed. + /// + /// When called at block B, it will return the set of authorities that should be + /// used to finalize descendants of this block (B+1, B+2, ...). The block B itself + /// is finalized by the authorities from block B-1. + fn grandpa_authorities() -> Vec<(AuthorityId, u64)>; + } } diff --git a/core/finality-grandpa/src/authorities.rs b/core/finality-grandpa/src/authorities.rs index ffded9a1ab..18d130f2fe 100644 --- a/core/finality-grandpa/src/authorities.rs +++ b/core/finality-grandpa/src/authorities.rs @@ -17,11 +17,11 @@ //! Utilities for dealing with authorities, authority sets, and handoffs. use fork_tree::ForkTree; -use parking_lot::RwLock; -use substrate_primitives::ed25519; use grandpa::VoterSet; -use parity_codec::{Encode, Decode}; use log::{debug, info}; +use parity_codec::{Decode, Encode}; +use parking_lot::RwLock; +use substrate_primitives::ed25519; use substrate_telemetry::{telemetry, CONSENSUS_INFO}; use std::cmp::Ord; @@ -33,356 +33,375 @@ use ed25519::Public as AuthorityId; /// A shared authority set. pub(crate) struct SharedAuthoritySet { - inner: Arc>>, + inner: Arc>>, } impl Clone for SharedAuthoritySet { - fn clone(&self) -> Self { - SharedAuthoritySet { inner: self.inner.clone() } - } + fn clone(&self) -> Self { + SharedAuthoritySet { + inner: self.inner.clone(), + } + } } impl SharedAuthoritySet { - /// Acquire a reference to the inner read-write lock. - pub(crate) fn inner(&self) -> &RwLock> { - &*self.inner - } + /// Acquire a reference to the inner read-write lock. + pub(crate) fn inner(&self) -> &RwLock> { + &*self.inner + } } impl SharedAuthoritySet -where N: Add + Ord + Clone + Debug, - H: Clone + Debug +where + N: Add + Ord + Clone + Debug, + H: Clone + Debug, { - /// Get the earliest limit-block number, if any. - pub(crate) fn current_limit(&self) -> Option { - self.inner.read().current_limit() - } - - /// Get the current set ID. This is incremented every time the set changes. - pub(crate) fn set_id(&self) -> u64 { - self.inner.read().set_id - } - - /// Get the current authorities and their weights (for the current set ID). - pub(crate) fn current_authorities(&self) -> VoterSet { - self.inner.read().current_authorities.iter().cloned().collect() - } + /// Get the earliest limit-block number, if any. + pub(crate) fn current_limit(&self) -> Option { + self.inner.read().current_limit() + } + + /// Get the current set ID. This is incremented every time the set changes. + pub(crate) fn set_id(&self) -> u64 { + self.inner.read().set_id + } + + /// Get the current authorities and their weights (for the current set ID). + pub(crate) fn current_authorities(&self) -> VoterSet { + self.inner + .read() + .current_authorities + .iter() + .cloned() + .collect() + } } impl From> for SharedAuthoritySet { - fn from(set: AuthoritySet) -> Self { - SharedAuthoritySet { inner: Arc::new(RwLock::new(set)) } - } + fn from(set: AuthoritySet) -> Self { + SharedAuthoritySet { + inner: Arc::new(RwLock::new(set)), + } + } } /// Status of the set after changes were applied. #[derive(Debug)] pub(crate) struct Status { - /// Whether internal changes were made. - pub(crate) changed: bool, - /// `Some` when underlying authority set has changed, containing the - /// block where that set changed. - pub(crate) new_set_block: Option<(H, N)>, + /// Whether internal changes were made. + pub(crate) changed: bool, + /// `Some` when underlying authority set has changed, containing the + /// block where that set changed. + pub(crate) new_set_block: Option<(H, N)>, } /// A set of authorities. #[derive(Debug, Clone, Encode, Decode, PartialEq)] pub(crate) struct AuthoritySet { - pub(crate) current_authorities: Vec<(AuthorityId, u64)>, - pub(crate) set_id: u64, - // Tree of pending standard changes across forks. Standard changes are - // enacted on finality and must be enacted (i.e. finalized) in-order across - // a given branch - pub(crate) pending_standard_changes: ForkTree>, - // Pending forced changes across different forks (at most one per fork). - // Forced changes are enacted on block depth (not finality), for this reason - // only one forced change should exist per fork. - pub(crate) pending_forced_changes: Vec>, + pub(crate) current_authorities: Vec<(AuthorityId, u64)>, + pub(crate) set_id: u64, + // Tree of pending standard changes across forks. Standard changes are + // enacted on finality and must be enacted (i.e. finalized) in-order across + // a given branch + pub(crate) pending_standard_changes: ForkTree>, + // Pending forced changes across different forks (at most one per fork). + // Forced changes are enacted on block depth (not finality), for this reason + // only one forced change should exist per fork. + pub(crate) pending_forced_changes: Vec>, } impl AuthoritySet -where H: PartialEq, - N: Ord, +where + H: PartialEq, + N: Ord, { - /// Get a genesis set with given authorities. - pub(crate) fn genesis(initial: Vec<(AuthorityId, u64)>) -> Self { - AuthoritySet { - current_authorities: initial, - set_id: 0, - pending_standard_changes: ForkTree::new(), - pending_forced_changes: Vec::new(), - } - } - - /// Get the current set id and a reference to the current authority set. - pub(crate) fn current(&self) -> (u64, &[(AuthorityId, u64)]) { - (self.set_id, &self.current_authorities[..]) - } + /// Get a genesis set with given authorities. + pub(crate) fn genesis(initial: Vec<(AuthorityId, u64)>) -> Self { + AuthoritySet { + current_authorities: initial, + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + } + } + + /// Get the current set id and a reference to the current authority set. + pub(crate) fn current(&self) -> (u64, &[(AuthorityId, u64)]) { + (self.set_id, &self.current_authorities[..]) + } } impl AuthoritySet where - N: Add + Ord + Clone + Debug, - H: Clone + Debug + N: Add + Ord + Clone + Debug, + H: Clone + Debug, { - fn add_standard_change( - &mut self, - pending: PendingChange, - is_descendent_of: &F, - ) -> Result<(), fork_tree::Error> where - F: Fn(&H, &H) -> Result, - E: std::error::Error, - { - let hash = pending.canon_hash.clone(); - let number = pending.canon_height.clone(); - - debug!(target: "afg", "Inserting potential standard set change signaled at block {:?} \ + fn add_standard_change( + &mut self, + pending: PendingChange, + is_descendent_of: &F, + ) -> Result<(), fork_tree::Error> + where + F: Fn(&H, &H) -> Result, + E: std::error::Error, + { + let hash = pending.canon_hash.clone(); + let number = pending.canon_height.clone(); + + debug!(target: "afg", "Inserting potential standard set change signaled at block {:?} \ (delayed by {:?} blocks).", (&number, &hash), pending.delay); - self.pending_standard_changes.import( - hash.clone(), - number.clone(), - pending, - is_descendent_of, - )?; - - debug!(target: "afg", "There are now {} alternatives for the next pending standard change (roots), \ - and a total of {} pending standard changes (across all forks).", - self.pending_standard_changes.roots().count(), - self.pending_standard_changes.iter().count(), - ); - - Ok(()) - } - - fn add_forced_change( - &mut self, - pending: PendingChange, - is_descendent_of: &F, - ) -> Result<(), fork_tree::Error> where - F: Fn(&H, &H) -> Result, - E: std::error::Error, - { - for change in self.pending_forced_changes.iter() { - if change.canon_hash == pending.canon_hash || - is_descendent_of(&change.canon_hash, &pending.canon_hash)? - { - return Err(fork_tree::Error::UnfinalizedAncestor); - } - } - - // ordered first by effective number and then by signal-block number. - let key = (pending.effective_number(), pending.canon_height.clone()); - let idx = self.pending_forced_changes - .binary_search_by_key(&key, |change| ( - change.effective_number(), - change.canon_height.clone(), - )) - .unwrap_or_else(|i| i); - - debug!(target: "afg", "Inserting potential forced set change at block {:?} \ + self.pending_standard_changes.import( + hash.clone(), + number.clone(), + pending, + is_descendent_of, + )?; + + debug!(target: "afg", "There are now {} alternatives for the next pending standard change (roots), \ + and a total of {} pending standard changes (across all forks).", + self.pending_standard_changes.roots().count(), + self.pending_standard_changes.iter().count(), + ); + + Ok(()) + } + + fn add_forced_change( + &mut self, + pending: PendingChange, + is_descendent_of: &F, + ) -> Result<(), fork_tree::Error> + where + F: Fn(&H, &H) -> Result, + E: std::error::Error, + { + for change in self.pending_forced_changes.iter() { + if change.canon_hash == pending.canon_hash + || is_descendent_of(&change.canon_hash, &pending.canon_hash)? + { + return Err(fork_tree::Error::UnfinalizedAncestor); + } + } + + // ordered first by effective number and then by signal-block number. + let key = (pending.effective_number(), pending.canon_height.clone()); + let idx = self + .pending_forced_changes + .binary_search_by_key(&key, |change| { + (change.effective_number(), change.canon_height.clone()) + }) + .unwrap_or_else(|i| i); + + debug!(target: "afg", "Inserting potential forced set change at block {:?} \ (delayed by {:?} blocks).", (&pending.canon_height, &pending.canon_hash), pending.delay); - self.pending_forced_changes.insert(idx, pending); - - debug!(target: "afg", "There are now {} pending forced changes.", self.pending_forced_changes.len()); - - Ok(()) - } - - /// Note an upcoming pending transition. Multiple pending standard changes - /// on the same branch can be added as long as they don't overlap. Forced - /// changes are restricted to one per fork. This method assumes that changes - /// on the same branch will be added in-order. The given function - /// `is_descendent_of` should return `true` if the second hash (target) is a - /// descendent of the first hash (base). - pub(crate) fn add_pending_change( - &mut self, - pending: PendingChange, - is_descendent_of: &F, - ) -> Result<(), fork_tree::Error> where - F: Fn(&H, &H) -> Result, - E: std::error::Error, - { - match pending.delay_kind { - DelayKind::Best { .. } => { - self.add_forced_change(pending, is_descendent_of) - }, - DelayKind::Finalized => { - self.add_standard_change(pending, is_descendent_of) - }, - } - } - - /// Inspect pending changes. Standard pending changes are iterated first, - /// and the changes in the tree are traversed in pre-order, afterwards all - /// forced changes are iterated. - pub(crate) fn pending_changes(&self) -> impl Iterator> { - self.pending_standard_changes.iter().map(|(_, _, c)| c) - .chain(self.pending_forced_changes.iter()) - } - - /// Get the earliest limit-block number, if any. If there are pending changes across - /// different forks, this method will return the earliest effective number (across the - /// different branches). Only standard changes are taken into account for the current - /// limit, since any existing forced change should preclude the voter from voting. - pub(crate) fn current_limit(&self) -> Option { - self.pending_standard_changes.roots() - .min_by_key(|&(_, _, c)| c.effective_number()) - .map(|(_, _, c)| c.effective_number()) - } - - /// Apply or prune any pending transitions based on a best-block trigger. - /// - /// Returns `Ok((median, new_set))` when a forced change has occurred. The - /// median represents the median last finalized block at the time the change - /// was signaled, and it should be used as the canon block when starting the - /// new grandpa voter. Only alters the internal state in this case. - /// - /// These transitions are always forced and do not lead to justifications - /// which light clients can follow. - pub(crate) fn apply_forced_changes( - &self, - best_hash: H, - best_number: N, - is_descendent_of: &F, - ) -> Result, E> - where F: Fn(&H, &H) -> Result, - { - let mut new_set = None; - - for change in self.pending_forced_changes.iter() - .take_while(|c| c.effective_number() <= best_number) // to prevent iterating too far - .filter(|c| c.effective_number() == best_number) - { - // check if the given best block is in the same branch as the block that signaled the change. - if is_descendent_of(&change.canon_hash, &best_hash)? { - // apply this change: make the set canonical - info!(target: "finality", "Applying authority set change forced at block #{:?}", + self.pending_forced_changes.insert(idx, pending); + + debug!(target: "afg", "There are now {} pending forced changes.", self.pending_forced_changes.len()); + + Ok(()) + } + + /// Note an upcoming pending transition. Multiple pending standard changes + /// on the same branch can be added as long as they don't overlap. Forced + /// changes are restricted to one per fork. This method assumes that changes + /// on the same branch will be added in-order. The given function + /// `is_descendent_of` should return `true` if the second hash (target) is a + /// descendent of the first hash (base). + pub(crate) fn add_pending_change( + &mut self, + pending: PendingChange, + is_descendent_of: &F, + ) -> Result<(), fork_tree::Error> + where + F: Fn(&H, &H) -> Result, + E: std::error::Error, + { + match pending.delay_kind { + DelayKind::Best { .. } => self.add_forced_change(pending, is_descendent_of), + DelayKind::Finalized => self.add_standard_change(pending, is_descendent_of), + } + } + + /// Inspect pending changes. Standard pending changes are iterated first, + /// and the changes in the tree are traversed in pre-order, afterwards all + /// forced changes are iterated. + pub(crate) fn pending_changes(&self) -> impl Iterator> { + self.pending_standard_changes + .iter() + .map(|(_, _, c)| c) + .chain(self.pending_forced_changes.iter()) + } + + /// Get the earliest limit-block number, if any. If there are pending changes across + /// different forks, this method will return the earliest effective number (across the + /// different branches). Only standard changes are taken into account for the current + /// limit, since any existing forced change should preclude the voter from voting. + pub(crate) fn current_limit(&self) -> Option { + self.pending_standard_changes + .roots() + .min_by_key(|&(_, _, c)| c.effective_number()) + .map(|(_, _, c)| c.effective_number()) + } + + /// Apply or prune any pending transitions based on a best-block trigger. + /// + /// Returns `Ok((median, new_set))` when a forced change has occurred. The + /// median represents the median last finalized block at the time the change + /// was signaled, and it should be used as the canon block when starting the + /// new grandpa voter. Only alters the internal state in this case. + /// + /// These transitions are always forced and do not lead to justifications + /// which light clients can follow. + pub(crate) fn apply_forced_changes( + &self, + best_hash: H, + best_number: N, + is_descendent_of: &F, + ) -> Result, E> + where + F: Fn(&H, &H) -> Result, + { + let mut new_set = None; + + for change in self + .pending_forced_changes + .iter() + .take_while(|c| c.effective_number() <= best_number) // to prevent iterating too far + .filter(|c| c.effective_number() == best_number) + { + // check if the given best block is in the same branch as the block that signaled the change. + if is_descendent_of(&change.canon_hash, &best_hash)? { + // apply this change: make the set canonical + info!(target: "finality", "Applying authority set change forced at block #{:?}", change.canon_height); - telemetry!(CONSENSUS_INFO; "afg.applying_forced_authority_set_change"; - "block" => ?change.canon_height - ); + telemetry!(CONSENSUS_INFO; "afg.applying_forced_authority_set_change"; + "block" => ?change.canon_height + ); - let median_last_finalized = match change.delay_kind { + let median_last_finalized = match change.delay_kind { DelayKind::Best { ref median_last_finalized } => median_last_finalized.clone(), _ => unreachable!("pending_forced_changes only contains forced changes; forced changes have delay kind Best; qed."), }; - new_set = Some((median_last_finalized, AuthoritySet { - current_authorities: change.next_authorities.clone(), - set_id: self.set_id + 1, - pending_standard_changes: ForkTree::new(), // new set, new changes. - pending_forced_changes: Vec::new(), - })); - - break; - } - - // we don't wipe forced changes until another change is - // applied - } - - Ok(new_set) - } - - /// Apply or prune any pending transitions based on a finality trigger. This - /// method ensures that if there are multiple changes in the same branch, - /// finalizing this block won't finalize past multiple transitions (i.e. - /// transitions must be finalized in-order). The given function - /// `is_descendent_of` should return `true` if the second hash (target) is a - /// descendent of the first hash (base). - /// - /// When the set has changed, the return value will be `Ok(Some((H, N)))` - /// which is the canonical block where the set last changed (i.e. the given - /// hash and number). - pub(crate) fn apply_standard_changes( - &mut self, - finalized_hash: H, - finalized_number: N, - is_descendent_of: &F, - ) -> Result, fork_tree::Error> - where F: Fn(&H, &H) -> Result, - E: std::error::Error, - { - let mut status = Status { - changed: false, - new_set_block: None, - }; - - match self.pending_standard_changes.finalize_with_descendent_if( - &finalized_hash, - finalized_number.clone(), - is_descendent_of, - |change| change.effective_number() <= finalized_number - )? { - fork_tree::FinalizationResult::Changed(change) => { - status.changed = true; - - // if we are able to finalize any standard change then we can - // discard all pending forced changes (on different forks) - self.pending_forced_changes.clear(); - - if let Some(change) = change { - info!(target: "finality", "Applying authority set change scheduled at block #{:?}", + new_set = Some(( + median_last_finalized, + AuthoritySet { + current_authorities: change.next_authorities.clone(), + set_id: self.set_id + 1, + pending_standard_changes: ForkTree::new(), // new set, new changes. + pending_forced_changes: Vec::new(), + }, + )); + + break; + } + + // we don't wipe forced changes until another change is + // applied + } + + Ok(new_set) + } + + /// Apply or prune any pending transitions based on a finality trigger. This + /// method ensures that if there are multiple changes in the same branch, + /// finalizing this block won't finalize past multiple transitions (i.e. + /// transitions must be finalized in-order). The given function + /// `is_descendent_of` should return `true` if the second hash (target) is a + /// descendent of the first hash (base). + /// + /// When the set has changed, the return value will be `Ok(Some((H, N)))` + /// which is the canonical block where the set last changed (i.e. the given + /// hash and number). + pub(crate) fn apply_standard_changes( + &mut self, + finalized_hash: H, + finalized_number: N, + is_descendent_of: &F, + ) -> Result, fork_tree::Error> + where + F: Fn(&H, &H) -> Result, + E: std::error::Error, + { + let mut status = Status { + changed: false, + new_set_block: None, + }; + + match self.pending_standard_changes.finalize_with_descendent_if( + &finalized_hash, + finalized_number.clone(), + is_descendent_of, + |change| change.effective_number() <= finalized_number, + )? { + fork_tree::FinalizationResult::Changed(change) => { + status.changed = true; + + // if we are able to finalize any standard change then we can + // discard all pending forced changes (on different forks) + self.pending_forced_changes.clear(); + + if let Some(change) = change { + info!(target: "finality", "Applying authority set change scheduled at block #{:?}", change.canon_height); - telemetry!(CONSENSUS_INFO; "afg.applying_scheduled_authority_set_change"; - "block" => ?change.canon_height - ); - - self.current_authorities = change.next_authorities; - self.set_id += 1; - - status.new_set_block = Some(( - finalized_hash, - finalized_number, - )); - } - }, - fork_tree::FinalizationResult::Unchanged => {}, - } - - Ok(status) - } - - /// Check whether the given finalized block number enacts any standard - /// authority set change (without triggering it), ensuring that if there are - /// multiple changes in the same branch, finalizing this block won't - /// finalize past multiple transitions (i.e. transitions must be finalized - /// in-order). Returns `Some(true)` if the block being finalized enacts a - /// change that can be immediately applied, `Some(false)` if the block being - /// finalized enacts a change but it cannot be applied yet since there are - /// other dependent changes, and `None` if no change is enacted. The given - /// function `is_descendent_of` should return `true` if the second hash - /// (target) is a descendent of the first hash (base). - pub fn enacts_standard_change( - &self, - finalized_hash: H, - finalized_number: N, - is_descendent_of: &F, - ) -> Result, fork_tree::Error> - where F: Fn(&H, &H) -> Result, - E: std::error::Error, - { - self.pending_standard_changes.finalizes_any_with_descendent_if( - &finalized_hash, - finalized_number.clone(), - is_descendent_of, - |change| change.effective_number() == finalized_number - ) - } + telemetry!(CONSENSUS_INFO; "afg.applying_scheduled_authority_set_change"; + "block" => ?change.canon_height + ); + + self.current_authorities = change.next_authorities; + self.set_id += 1; + + status.new_set_block = Some((finalized_hash, finalized_number)); + } + } + fork_tree::FinalizationResult::Unchanged => {} + } + + Ok(status) + } + + /// Check whether the given finalized block number enacts any standard + /// authority set change (without triggering it), ensuring that if there are + /// multiple changes in the same branch, finalizing this block won't + /// finalize past multiple transitions (i.e. transitions must be finalized + /// in-order). Returns `Some(true)` if the block being finalized enacts a + /// change that can be immediately applied, `Some(false)` if the block being + /// finalized enacts a change but it cannot be applied yet since there are + /// other dependent changes, and `None` if no change is enacted. The given + /// function `is_descendent_of` should return `true` if the second hash + /// (target) is a descendent of the first hash (base). + pub fn enacts_standard_change( + &self, + finalized_hash: H, + finalized_number: N, + is_descendent_of: &F, + ) -> Result, fork_tree::Error> + where + F: Fn(&H, &H) -> Result, + E: std::error::Error, + { + self.pending_standard_changes + .finalizes_any_with_descendent_if( + &finalized_hash, + finalized_number.clone(), + is_descendent_of, + |change| change.effective_number() == finalized_number, + ) + } } /// Kinds of delays for pending changes. #[derive(Debug, Clone, Encode, Decode, PartialEq)] pub(crate) enum DelayKind { - /// Depth in finalized chain. - Finalized, - /// Depth in best chain. The median last finalized block is calculated at the time the - /// change was signaled. - Best { median_last_finalized: N }, + /// Depth in finalized chain. + Finalized, + /// Depth in best chain. The median last finalized block is calculated at the time the + /// change was signaled. + Best { median_last_finalized: N }, } /// A pending change to the authority set. @@ -391,398 +410,474 @@ pub(crate) enum DelayKind { /// the finalized or unfinalized chain. #[derive(Debug, Clone, Encode, PartialEq)] pub(crate) struct PendingChange { - /// The new authorities and weights to apply. - pub(crate) next_authorities: Vec<(AuthorityId, u64)>, - /// How deep in the chain the announcing block must be - /// before the change is applied. - pub(crate) delay: N, - /// The announcing block's height. - pub(crate) canon_height: N, - /// The announcing block's hash. - pub(crate) canon_hash: H, - /// The delay kind. - pub(crate) delay_kind: DelayKind, + /// The new authorities and weights to apply. + pub(crate) next_authorities: Vec<(AuthorityId, u64)>, + /// How deep in the chain the announcing block must be + /// before the change is applied. + pub(crate) delay: N, + /// The announcing block's height. + pub(crate) canon_height: N, + /// The announcing block's hash. + pub(crate) canon_hash: H, + /// The delay kind. + pub(crate) delay_kind: DelayKind, } impl Decode for PendingChange { - fn decode(value: &mut I) -> Option { - let next_authorities = Decode::decode(value)?; - let delay = Decode::decode(value)?; - let canon_height = Decode::decode(value)?; - let canon_hash = Decode::decode(value)?; - - let delay_kind = DelayKind::decode(value).unwrap_or(DelayKind::Finalized); - - Some(PendingChange { - next_authorities, - delay, - canon_height, - canon_hash, - delay_kind, - }) - } + fn decode(value: &mut I) -> Option { + let next_authorities = Decode::decode(value)?; + let delay = Decode::decode(value)?; + let canon_height = Decode::decode(value)?; + let canon_hash = Decode::decode(value)?; + + let delay_kind = DelayKind::decode(value).unwrap_or(DelayKind::Finalized); + + Some(PendingChange { + next_authorities, + delay, + canon_height, + canon_hash, + delay_kind, + }) + } } -impl + Clone> PendingChange { - /// Returns the effective number this change will be applied at. - pub fn effective_number(&self) -> N { - self.canon_height.clone() + self.delay.clone() - } +impl + Clone> PendingChange { + /// Returns the effective number this change will be applied at. + pub fn effective_number(&self) -> N { + self.canon_height.clone() + self.delay.clone() + } } #[cfg(test)] mod tests { - use super::*; - - fn static_is_descendent_of(value: bool) - -> impl Fn(&A, &A) -> Result - { - move |_, _| Ok(value) - } - - fn is_descendent_of(f: F) -> impl Fn(&A, &A) -> Result - where F: Fn(&A, &A) -> bool - { - move |base, hash| Ok(f(base, hash)) - } - - #[test] - fn changes_iterated_in_pre_order() { - let mut authorities = AuthoritySet { - current_authorities: Vec::new(), - set_id: 0, - pending_standard_changes: ForkTree::new(), - pending_forced_changes: Vec::new(), - }; - - let change_a = PendingChange { - next_authorities: Vec::new(), - delay: 10, - canon_height: 5, - canon_hash: "hash_a", - delay_kind: DelayKind::Finalized, - }; - - let change_b = PendingChange { - next_authorities: Vec::new(), - delay: 0, - canon_height: 5, - canon_hash: "hash_b", - delay_kind: DelayKind::Finalized, - }; - - let change_c = PendingChange { - next_authorities: Vec::new(), - delay: 5, - canon_height: 10, - canon_hash: "hash_c", - delay_kind: DelayKind::Finalized, - }; - - authorities.add_pending_change(change_a.clone(), &static_is_descendent_of(false)).unwrap(); - authorities.add_pending_change(change_b.clone(), &static_is_descendent_of(false)).unwrap(); - authorities.add_pending_change(change_c.clone(), &is_descendent_of(|base, hash| match (*base, *hash) { - ("hash_a", "hash_c") => true, - ("hash_b", "hash_c") => false, - _ => unreachable!(), - })).unwrap(); - - // forced changes are iterated last - let change_d = PendingChange { - next_authorities: Vec::new(), - delay: 2, - canon_height: 1, - canon_hash: "hash_d", - delay_kind: DelayKind::Best { median_last_finalized: 0 }, - }; - - let change_e = PendingChange { - next_authorities: Vec::new(), - delay: 2, - canon_height: 0, - canon_hash: "hash_e", - delay_kind: DelayKind::Best { median_last_finalized: 0 }, - }; - - authorities.add_pending_change(change_d.clone(), &static_is_descendent_of(false)).unwrap(); - authorities.add_pending_change(change_e.clone(), &static_is_descendent_of(false)).unwrap(); - - assert_eq!( - authorities.pending_changes().collect::>(), - vec![&change_b, &change_a, &change_c, &change_e, &change_d], - ); - } - - #[test] - fn apply_change() { - let mut authorities = AuthoritySet { - current_authorities: Vec::new(), - set_id: 0, - pending_standard_changes: ForkTree::new(), - pending_forced_changes: Vec::new(), - }; - - let set_a = vec![(AuthorityId([1; 32]), 5)]; - let set_b = vec![(AuthorityId([2; 32]), 5)]; - - // two competing changes at the same height on different forks - let change_a = PendingChange { - next_authorities: set_a.clone(), - delay: 10, - canon_height: 5, - canon_hash: "hash_a", - delay_kind: DelayKind::Finalized, - }; - - let change_b = PendingChange { - next_authorities: set_b.clone(), - delay: 10, - canon_height: 5, - canon_hash: "hash_b", - delay_kind: DelayKind::Finalized, - }; - - authorities.add_pending_change(change_a.clone(), &static_is_descendent_of(true)).unwrap(); - authorities.add_pending_change(change_b.clone(), &static_is_descendent_of(true)).unwrap(); - - assert_eq!( - authorities.pending_changes().collect::>(), - vec![&change_b, &change_a], - ); - - // finalizing "hash_c" won't enact the change signaled at "hash_a" but it will prune out "hash_b" - let status = authorities.apply_standard_changes("hash_c", 11, &is_descendent_of(|base, hash| match (*base, *hash) { - ("hash_a", "hash_c") => true, - ("hash_b", "hash_c") => false, - _ => unreachable!(), - })).unwrap(); - - assert!(status.changed); - assert_eq!(status.new_set_block, None); - assert_eq!( - authorities.pending_changes().collect::>(), - vec![&change_a], - ); - - // finalizing "hash_d" will enact the change signaled at "hash_a" - let status = authorities.apply_standard_changes("hash_d", 15, &is_descendent_of(|base, hash| match (*base, *hash) { - ("hash_a", "hash_d") => true, - _ => unreachable!(), - })).unwrap(); - - assert!(status.changed); - assert_eq!(status.new_set_block, Some(("hash_d", 15))); - - assert_eq!(authorities.current_authorities, set_a); - assert_eq!(authorities.set_id, 1); - assert_eq!(authorities.pending_changes().count(), 0); - } - - #[test] - fn disallow_multiple_changes_being_finalized_at_once() { - let mut authorities = AuthoritySet { - current_authorities: Vec::new(), - set_id: 0, - pending_standard_changes: ForkTree::new(), - pending_forced_changes: Vec::new(), - }; - - let set_a = vec![(AuthorityId([1; 32]), 5)]; - let set_c = vec![(AuthorityId([2; 32]), 5)]; - - // two competing changes at the same height on different forks - let change_a = PendingChange { - next_authorities: set_a.clone(), - delay: 10, - canon_height: 5, - canon_hash: "hash_a", - delay_kind: DelayKind::Finalized, - }; - - let change_c = PendingChange { - next_authorities: set_c.clone(), - delay: 10, - canon_height: 30, - canon_hash: "hash_c", - delay_kind: DelayKind::Finalized, - }; - - authorities.add_pending_change(change_a.clone(), &static_is_descendent_of(true)).unwrap(); - authorities.add_pending_change(change_c.clone(), &static_is_descendent_of(true)).unwrap(); - - let is_descendent_of = is_descendent_of(|base, hash| match (*base, *hash) { - ("hash_a", "hash_b") => true, - ("hash_a", "hash_c") => true, - ("hash_a", "hash_d") => true, - - ("hash_c", "hash_b") => false, - ("hash_c", "hash_d") => true, - - ("hash_b", "hash_c") => true, - _ => unreachable!(), - }); - - // trying to finalize past `change_c` without finalizing `change_a` first - match authorities.apply_standard_changes("hash_d", 40, &is_descendent_of) { - Err(fork_tree::Error::UnfinalizedAncestor) => {}, - _ => unreachable!(), - } - - let status = authorities.apply_standard_changes("hash_b", 15, &is_descendent_of).unwrap(); - assert!(status.changed); - assert_eq!(status.new_set_block, Some(("hash_b", 15))); - - assert_eq!(authorities.current_authorities, set_a); - assert_eq!(authorities.set_id, 1); - - // after finalizing `change_a` it should be possible to finalize `change_c` - let status = authorities.apply_standard_changes("hash_d", 40, &is_descendent_of).unwrap(); - assert!(status.changed); - assert_eq!(status.new_set_block, Some(("hash_d", 40))); - - assert_eq!(authorities.current_authorities, set_c); - assert_eq!(authorities.set_id, 2); - } - - #[test] - fn enacts_standard_change_works() { - let mut authorities = AuthoritySet { - current_authorities: Vec::new(), - set_id: 0, - pending_standard_changes: ForkTree::new(), - pending_forced_changes: Vec::new(), - }; - - let set_a = vec![(AuthorityId([1; 32]), 5)]; - - let change_a = PendingChange { - next_authorities: set_a.clone(), - delay: 10, - canon_height: 5, - canon_hash: "hash_a", - delay_kind: DelayKind::Finalized, - }; - - let change_b = PendingChange { - next_authorities: set_a.clone(), - delay: 10, - canon_height: 20, - canon_hash: "hash_b", - delay_kind: DelayKind::Finalized, - }; - - authorities.add_pending_change(change_a.clone(), &static_is_descendent_of(false)).unwrap(); - authorities.add_pending_change(change_b.clone(), &static_is_descendent_of(true)).unwrap(); - - let is_descendent_of = is_descendent_of(|base, hash| match (*base, *hash) { - ("hash_a", "hash_d") => true, - ("hash_a", "hash_e") => true, - ("hash_b", "hash_d") => true, - ("hash_b", "hash_e") => true, - ("hash_a", "hash_c") => false, - ("hash_b", "hash_c") => false, - _ => unreachable!(), - }); - - // "hash_c" won't finalize the existing change since it isn't a descendent - assert_eq!( - authorities.enacts_standard_change("hash_c", 15, &is_descendent_of).unwrap(), - None, - ); - - // "hash_d" at depth 14 won't work either - assert_eq!( - authorities.enacts_standard_change("hash_d", 14, &is_descendent_of).unwrap(), - None, - ); - - // but it should work at depth 15 (change height + depth) - assert_eq!( - authorities.enacts_standard_change("hash_d", 15, &is_descendent_of).unwrap(), - Some(true), - ); - - // finalizing "hash_e" at depth 20 will trigger change at "hash_b", but - // it can't be applied yet since "hash_a" must be applied first - assert_eq!( - authorities.enacts_standard_change("hash_e", 30, &is_descendent_of).unwrap(), - Some(false), - ); - } - - #[test] - fn forced_changes() { - let mut authorities = AuthoritySet { - current_authorities: Vec::new(), - set_id: 0, - pending_standard_changes: ForkTree::new(), - pending_forced_changes: Vec::new(), - }; - - let set_a = vec![(AuthorityId([1; 32]), 5)]; - let set_b = vec![(AuthorityId([2; 32]), 5)]; - - let change_a = PendingChange { - next_authorities: set_a.clone(), - delay: 10, - canon_height: 5, - canon_hash: "hash_a", - delay_kind: DelayKind::Best { median_last_finalized: 42 }, - }; - - let change_b = PendingChange { - next_authorities: set_b.clone(), - delay: 10, - canon_height: 5, - canon_hash: "hash_b", - delay_kind: DelayKind::Best { median_last_finalized: 0 }, - }; - - authorities.add_pending_change(change_a, &static_is_descendent_of(false)).unwrap(); - authorities.add_pending_change(change_b, &static_is_descendent_of(false)).unwrap(); - - // there's an effective change triggered at block 15 but not a standard one. - // so this should do nothing. - assert_eq!( - authorities.enacts_standard_change("hash_c", 15, &static_is_descendent_of(true)).unwrap(), - None, - ); - - // throw a standard change into the mix to prove that it's discarded - // for being on the same fork. - // - // NOTE: after https://github.com/paritytech/substrate/issues/1861 - // this should still be rejected based on the "span" rule -- it overlaps - // with another change on the same fork. - let change_c = PendingChange { - next_authorities: set_b.clone(), - delay: 3, - canon_height: 8, - canon_hash: "hash_a8", - delay_kind: DelayKind::Best { median_last_finalized: 0 }, - }; - - let is_descendent_of_a = is_descendent_of(|base: &&str, _| { - base.starts_with("hash_a") - }); - - assert!(authorities.add_pending_change(change_c, &is_descendent_of_a).is_err()); - - // too early. - assert!(authorities.apply_forced_changes("hash_a10", 10, &static_is_descendent_of(true)).unwrap().is_none()); - - // too late. - assert!(authorities.apply_forced_changes("hash_a16", 16, &static_is_descendent_of(true)).unwrap().is_none()); - - // on time -- chooses the right change. - assert_eq!( - authorities.apply_forced_changes("hash_a15", 15, &is_descendent_of_a).unwrap().unwrap(), - (42, AuthoritySet { - current_authorities: set_a, - set_id: 1, - pending_standard_changes: ForkTree::new(), - pending_forced_changes: Vec::new(), - }) - ); - } + use super::*; + + fn static_is_descendent_of(value: bool) -> impl Fn(&A, &A) -> Result { + move |_, _| Ok(value) + } + + fn is_descendent_of(f: F) -> impl Fn(&A, &A) -> Result + where + F: Fn(&A, &A) -> bool, + { + move |base, hash| Ok(f(base, hash)) + } + + #[test] + fn changes_iterated_in_pre_order() { + let mut authorities = AuthoritySet { + current_authorities: Vec::new(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + }; + + let change_a = PendingChange { + next_authorities: Vec::new(), + delay: 10, + canon_height: 5, + canon_hash: "hash_a", + delay_kind: DelayKind::Finalized, + }; + + let change_b = PendingChange { + next_authorities: Vec::new(), + delay: 0, + canon_height: 5, + canon_hash: "hash_b", + delay_kind: DelayKind::Finalized, + }; + + let change_c = PendingChange { + next_authorities: Vec::new(), + delay: 5, + canon_height: 10, + canon_hash: "hash_c", + delay_kind: DelayKind::Finalized, + }; + + authorities + .add_pending_change(change_a.clone(), &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change(change_b.clone(), &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change( + change_c.clone(), + &is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_c") => true, + ("hash_b", "hash_c") => false, + _ => unreachable!(), + }), + ) + .unwrap(); + + // forced changes are iterated last + let change_d = PendingChange { + next_authorities: Vec::new(), + delay: 2, + canon_height: 1, + canon_hash: "hash_d", + delay_kind: DelayKind::Best { + median_last_finalized: 0, + }, + }; + + let change_e = PendingChange { + next_authorities: Vec::new(), + delay: 2, + canon_height: 0, + canon_hash: "hash_e", + delay_kind: DelayKind::Best { + median_last_finalized: 0, + }, + }; + + authorities + .add_pending_change(change_d.clone(), &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change(change_e.clone(), &static_is_descendent_of(false)) + .unwrap(); + + assert_eq!( + authorities.pending_changes().collect::>(), + vec![&change_b, &change_a, &change_c, &change_e, &change_d], + ); + } + + #[test] + fn apply_change() { + let mut authorities = AuthoritySet { + current_authorities: Vec::new(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + }; + + let set_a = vec![(AuthorityId([1; 32]), 5)]; + let set_b = vec![(AuthorityId([2; 32]), 5)]; + + // two competing changes at the same height on different forks + let change_a = PendingChange { + next_authorities: set_a.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_a", + delay_kind: DelayKind::Finalized, + }; + + let change_b = PendingChange { + next_authorities: set_b.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_b", + delay_kind: DelayKind::Finalized, + }; + + authorities + .add_pending_change(change_a.clone(), &static_is_descendent_of(true)) + .unwrap(); + authorities + .add_pending_change(change_b.clone(), &static_is_descendent_of(true)) + .unwrap(); + + assert_eq!( + authorities.pending_changes().collect::>(), + vec![&change_b, &change_a], + ); + + // finalizing "hash_c" won't enact the change signaled at "hash_a" but it will prune out "hash_b" + let status = authorities + .apply_standard_changes( + "hash_c", + 11, + &is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_c") => true, + ("hash_b", "hash_c") => false, + _ => unreachable!(), + }), + ) + .unwrap(); + + assert!(status.changed); + assert_eq!(status.new_set_block, None); + assert_eq!( + authorities.pending_changes().collect::>(), + vec![&change_a], + ); + + // finalizing "hash_d" will enact the change signaled at "hash_a" + let status = authorities + .apply_standard_changes( + "hash_d", + 15, + &is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_d") => true, + _ => unreachable!(), + }), + ) + .unwrap(); + + assert!(status.changed); + assert_eq!(status.new_set_block, Some(("hash_d", 15))); + + assert_eq!(authorities.current_authorities, set_a); + assert_eq!(authorities.set_id, 1); + assert_eq!(authorities.pending_changes().count(), 0); + } + + #[test] + fn disallow_multiple_changes_being_finalized_at_once() { + let mut authorities = AuthoritySet { + current_authorities: Vec::new(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + }; + + let set_a = vec![(AuthorityId([1; 32]), 5)]; + let set_c = vec![(AuthorityId([2; 32]), 5)]; + + // two competing changes at the same height on different forks + let change_a = PendingChange { + next_authorities: set_a.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_a", + delay_kind: DelayKind::Finalized, + }; + + let change_c = PendingChange { + next_authorities: set_c.clone(), + delay: 10, + canon_height: 30, + canon_hash: "hash_c", + delay_kind: DelayKind::Finalized, + }; + + authorities + .add_pending_change(change_a.clone(), &static_is_descendent_of(true)) + .unwrap(); + authorities + .add_pending_change(change_c.clone(), &static_is_descendent_of(true)) + .unwrap(); + + let is_descendent_of = is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_b") => true, + ("hash_a", "hash_c") => true, + ("hash_a", "hash_d") => true, + + ("hash_c", "hash_b") => false, + ("hash_c", "hash_d") => true, + + ("hash_b", "hash_c") => true, + _ => unreachable!(), + }); + + // trying to finalize past `change_c` without finalizing `change_a` first + match authorities.apply_standard_changes("hash_d", 40, &is_descendent_of) { + Err(fork_tree::Error::UnfinalizedAncestor) => {} + _ => unreachable!(), + } + + let status = authorities + .apply_standard_changes("hash_b", 15, &is_descendent_of) + .unwrap(); + assert!(status.changed); + assert_eq!(status.new_set_block, Some(("hash_b", 15))); + + assert_eq!(authorities.current_authorities, set_a); + assert_eq!(authorities.set_id, 1); + + // after finalizing `change_a` it should be possible to finalize `change_c` + let status = authorities + .apply_standard_changes("hash_d", 40, &is_descendent_of) + .unwrap(); + assert!(status.changed); + assert_eq!(status.new_set_block, Some(("hash_d", 40))); + + assert_eq!(authorities.current_authorities, set_c); + assert_eq!(authorities.set_id, 2); + } + + #[test] + fn enacts_standard_change_works() { + let mut authorities = AuthoritySet { + current_authorities: Vec::new(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + }; + + let set_a = vec![(AuthorityId([1; 32]), 5)]; + + let change_a = PendingChange { + next_authorities: set_a.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_a", + delay_kind: DelayKind::Finalized, + }; + + let change_b = PendingChange { + next_authorities: set_a.clone(), + delay: 10, + canon_height: 20, + canon_hash: "hash_b", + delay_kind: DelayKind::Finalized, + }; + + authorities + .add_pending_change(change_a.clone(), &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change(change_b.clone(), &static_is_descendent_of(true)) + .unwrap(); + + let is_descendent_of = is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_d") => true, + ("hash_a", "hash_e") => true, + ("hash_b", "hash_d") => true, + ("hash_b", "hash_e") => true, + ("hash_a", "hash_c") => false, + ("hash_b", "hash_c") => false, + _ => unreachable!(), + }); + + // "hash_c" won't finalize the existing change since it isn't a descendent + assert_eq!( + authorities + .enacts_standard_change("hash_c", 15, &is_descendent_of) + .unwrap(), + None, + ); + + // "hash_d" at depth 14 won't work either + assert_eq!( + authorities + .enacts_standard_change("hash_d", 14, &is_descendent_of) + .unwrap(), + None, + ); + + // but it should work at depth 15 (change height + depth) + assert_eq!( + authorities + .enacts_standard_change("hash_d", 15, &is_descendent_of) + .unwrap(), + Some(true), + ); + + // finalizing "hash_e" at depth 20 will trigger change at "hash_b", but + // it can't be applied yet since "hash_a" must be applied first + assert_eq!( + authorities + .enacts_standard_change("hash_e", 30, &is_descendent_of) + .unwrap(), + Some(false), + ); + } + + #[test] + fn forced_changes() { + let mut authorities = AuthoritySet { + current_authorities: Vec::new(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + }; + + let set_a = vec![(AuthorityId([1; 32]), 5)]; + let set_b = vec![(AuthorityId([2; 32]), 5)]; + + let change_a = PendingChange { + next_authorities: set_a.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_a", + delay_kind: DelayKind::Best { + median_last_finalized: 42, + }, + }; + + let change_b = PendingChange { + next_authorities: set_b.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_b", + delay_kind: DelayKind::Best { + median_last_finalized: 0, + }, + }; + + authorities + .add_pending_change(change_a, &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change(change_b, &static_is_descendent_of(false)) + .unwrap(); + + // there's an effective change triggered at block 15 but not a standard one. + // so this should do nothing. + assert_eq!( + authorities + .enacts_standard_change("hash_c", 15, &static_is_descendent_of(true)) + .unwrap(), + None, + ); + + // throw a standard change into the mix to prove that it's discarded + // for being on the same fork. + // + // NOTE: after https://github.com/paritytech/substrate/issues/1861 + // this should still be rejected based on the "span" rule -- it overlaps + // with another change on the same fork. + let change_c = PendingChange { + next_authorities: set_b.clone(), + delay: 3, + canon_height: 8, + canon_hash: "hash_a8", + delay_kind: DelayKind::Best { + median_last_finalized: 0, + }, + }; + + let is_descendent_of_a = is_descendent_of(|base: &&str, _| base.starts_with("hash_a")); + + assert!(authorities + .add_pending_change(change_c, &is_descendent_of_a) + .is_err()); + + // too early. + assert!(authorities + .apply_forced_changes("hash_a10", 10, &static_is_descendent_of(true)) + .unwrap() + .is_none()); + + // too late. + assert!(authorities + .apply_forced_changes("hash_a16", 16, &static_is_descendent_of(true)) + .unwrap() + .is_none()); + + // on time -- chooses the right change. + assert_eq!( + authorities + .apply_forced_changes("hash_a15", 15, &is_descendent_of_a) + .unwrap() + .unwrap(), + ( + 42, + AuthoritySet { + current_authorities: set_a, + set_id: 1, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + } + ) + ); + } } diff --git a/core/finality-grandpa/src/aux_schema.rs b/core/finality-grandpa/src/aux_schema.rs index cb41d481e3..6936ffc7fc 100644 --- a/core/finality-grandpa/src/aux_schema.rs +++ b/core/finality-grandpa/src/aux_schema.rs @@ -16,17 +16,17 @@ //! Schema for stuff in the aux-db. -use std::fmt::Debug; -use std::sync::Arc; -use parity_codec::{Encode, Decode}; use client::backend::AuxStore; -use client::error::{Result as ClientResult, Error as ClientError, ErrorKind as ClientErrorKind}; +use client::error::{Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult}; use fork_tree::ForkTree; use grandpa::round::State as RoundState; use log::{info, warn}; +use parity_codec::{Decode, Encode}; +use std::fmt::Debug; +use std::sync::Arc; -use crate::authorities::{AuthoritySet, SharedAuthoritySet, PendingChange, DelayKind}; -use crate::consensus_changes::{SharedConsensusChanges, ConsensusChanges}; +use crate::authorities::{AuthoritySet, DelayKind, PendingChange, SharedAuthoritySet}; +use crate::consensus_changes::{ConsensusChanges, SharedConsensusChanges}; use crate::NewAuthoritySet; use substrate_primitives::ed25519::Public as AuthorityId; @@ -42,319 +42,315 @@ const CURRENT_VERSION: u32 = 1; #[derive(Debug, Clone, Encode, Decode)] #[cfg_attr(test, derive(PartialEq))] pub enum VoterSetState { - /// The voter set state, currently paused. - Paused(u64, RoundState), - /// The voter set state, currently live. - Live(u64, RoundState), + /// The voter set state, currently paused. + Paused(u64, RoundState), + /// The voter set state, currently live. + Live(u64, RoundState), } impl VoterSetState { - /// Yields the current state. - pub(crate) fn round(&self) -> (u64, RoundState) { - match *self { - VoterSetState::Paused(n, ref s) => (n, s.clone()), - VoterSetState::Live(n, ref s) => (n, s.clone()), - } - } + /// Yields the current state. + pub(crate) fn round(&self) -> (u64, RoundState) { + match *self { + VoterSetState::Paused(n, ref s) => (n, s.clone()), + VoterSetState::Live(n, ref s) => (n, s.clone()), + } + } } type V0VoterSetState = (u64, RoundState); #[derive(Debug, Clone, Encode, Decode, PartialEq)] struct V0PendingChange { - next_authorities: Vec<(AuthorityId, u64)>, - delay: N, - canon_height: N, - canon_hash: H, + next_authorities: Vec<(AuthorityId, u64)>, + delay: N, + canon_height: N, + canon_hash: H, } #[derive(Debug, Clone, Encode, Decode, PartialEq)] struct V0AuthoritySet { - current_authorities: Vec<(AuthorityId, u64)>, - set_id: u64, - pending_changes: Vec>, + current_authorities: Vec<(AuthorityId, u64)>, + set_id: u64, + pending_changes: Vec>, } impl Into> for V0AuthoritySet -where H: Clone + Debug + PartialEq, - N: Clone + Debug + Ord, +where + H: Clone + Debug + PartialEq, + N: Clone + Debug + Ord, { - fn into(self) -> AuthoritySet { - let mut pending_standard_changes = ForkTree::new(); - - for old_change in self.pending_changes { - let new_change = PendingChange { - next_authorities: old_change.next_authorities, - delay: old_change.delay, - canon_height: old_change.canon_height, - canon_hash: old_change.canon_hash, - delay_kind: DelayKind::Finalized, - }; - - if let Err(err) = pending_standard_changes.import::<_, ClientError>( - new_change.canon_hash.clone(), - new_change.canon_height.clone(), - new_change, - // previously we only supported at most one pending change per fork - &|_, _| Ok(false), - ) { - warn!(target: "afg", "Error migrating pending authority set change: {:?}.", err); - warn!(target: "afg", "Node is in a potentially inconsistent state."); - } - } - - AuthoritySet { - current_authorities: self.current_authorities, - set_id: self.set_id, - pending_forced_changes: Vec::new(), - pending_standard_changes - } - } + fn into(self) -> AuthoritySet { + let mut pending_standard_changes = ForkTree::new(); + + for old_change in self.pending_changes { + let new_change = PendingChange { + next_authorities: old_change.next_authorities, + delay: old_change.delay, + canon_height: old_change.canon_height, + canon_hash: old_change.canon_hash, + delay_kind: DelayKind::Finalized, + }; + + if let Err(err) = pending_standard_changes.import::<_, ClientError>( + new_change.canon_hash.clone(), + new_change.canon_height.clone(), + new_change, + // previously we only supported at most one pending change per fork + &|_, _| Ok(false), + ) { + warn!(target: "afg", "Error migrating pending authority set change: {:?}.", err); + warn!(target: "afg", "Node is in a potentially inconsistent state."); + } + } + + AuthoritySet { + current_authorities: self.current_authorities, + set_id: self.set_id, + pending_forced_changes: Vec::new(), + pending_standard_changes, + } + } } fn load_decode(backend: &B, key: &[u8]) -> ClientResult> { - match backend.get_aux(key)? { - None => Ok(None), - Some(t) => T::decode(&mut &t[..]) - .ok_or_else( - || ClientErrorKind::Backend(format!("GRANDPA DB is corrupted.")).into(), - ) - .map(Some) - } + match backend.get_aux(key)? { + None => Ok(None), + Some(t) => T::decode(&mut &t[..]) + .ok_or_else(|| ClientErrorKind::Backend(format!("GRANDPA DB is corrupted.")).into()) + .map(Some), + } } /// Persistent data kept between runs. pub(crate) struct PersistentData { - pub(crate) authority_set: SharedAuthoritySet, - pub(crate) consensus_changes: SharedConsensusChanges, - pub(crate) set_state: VoterSetState, + pub(crate) authority_set: SharedAuthoritySet, + pub(crate) consensus_changes: SharedConsensusChanges, + pub(crate) set_state: VoterSetState, } /// Load or initialize persistent data from backend. pub(crate) fn load_persistent( - backend: &B, - genesis_hash: H, - genesis_number: N, - genesis_authorities: G, -) - -> ClientResult> - where - B: AuxStore, - H: Debug + Decode + Encode + Clone + PartialEq, - N: Debug + Decode + Encode + Clone + Ord, - G: FnOnce() -> ClientResult> + backend: &B, + genesis_hash: H, + genesis_number: N, + genesis_authorities: G, +) -> ClientResult> +where + B: AuxStore, + H: Debug + Decode + Encode + Clone + PartialEq, + N: Debug + Decode + Encode + Clone + Ord, + G: FnOnce() -> ClientResult>, { - let version: Option = load_decode(backend, VERSION_KEY)?; - let consensus_changes = load_decode(backend, CONSENSUS_CHANGES_KEY)? - .unwrap_or_else(ConsensusChanges::::empty); - - let make_genesis_round = move || RoundState::genesis((genesis_hash, genesis_number)); - - match version { - None => { - CURRENT_VERSION.using_encoded(|s| - backend.insert_aux(&[(VERSION_KEY, s)], &[]) - )?; - - if let Some(old_set) = load_decode::<_, V0AuthoritySet>(backend, AUTHORITY_SET_KEY)? { - let new_set: AuthoritySet = old_set.into(); - backend.insert_aux(&[(AUTHORITY_SET_KEY, new_set.encode().as_slice())], &[])?; - - let set_state = match load_decode::<_, V0VoterSetState>(backend, SET_STATE_KEY)? { - Some((number, state)) => { - let set_state = VoterSetState::Live(number, state); - backend.insert_aux(&[(SET_STATE_KEY, set_state.encode().as_slice())], &[])?; - set_state - }, - None => VoterSetState::Live(0, make_genesis_round()), - }; - - return Ok(PersistentData { - authority_set: new_set.into(), - consensus_changes: Arc::new(consensus_changes.into()), - set_state, - }); - } - } - Some(1) => { - if let Some(set) = load_decode::<_, AuthoritySet>(backend, AUTHORITY_SET_KEY)? { - let set_state = match load_decode::<_, VoterSetState>(backend, SET_STATE_KEY)? { - Some(state) => state, - None => VoterSetState::Live(0, make_genesis_round()), - }; - - return Ok(PersistentData { - authority_set: set.into(), - consensus_changes: Arc::new(consensus_changes.into()), - set_state, - }); - } - } - Some(other) => return Err(ClientErrorKind::Backend( - format!("Unsupported GRANDPA DB version: {:?}", other) - ).into()), - } - - // genesis. - info!(target: "afg", "Loading GRANDPA authority set \ + let version: Option = load_decode(backend, VERSION_KEY)?; + let consensus_changes = load_decode(backend, CONSENSUS_CHANGES_KEY)? + .unwrap_or_else(ConsensusChanges::::empty); + + let make_genesis_round = move || RoundState::genesis((genesis_hash, genesis_number)); + + match version { + None => { + CURRENT_VERSION.using_encoded(|s| backend.insert_aux(&[(VERSION_KEY, s)], &[]))?; + + if let Some(old_set) = + load_decode::<_, V0AuthoritySet>(backend, AUTHORITY_SET_KEY)? + { + let new_set: AuthoritySet = old_set.into(); + backend.insert_aux(&[(AUTHORITY_SET_KEY, new_set.encode().as_slice())], &[])?; + + let set_state = + match load_decode::<_, V0VoterSetState>(backend, SET_STATE_KEY)? { + Some((number, state)) => { + let set_state = VoterSetState::Live(number, state); + backend.insert_aux( + &[(SET_STATE_KEY, set_state.encode().as_slice())], + &[], + )?; + set_state + } + None => VoterSetState::Live(0, make_genesis_round()), + }; + + return Ok(PersistentData { + authority_set: new_set.into(), + consensus_changes: Arc::new(consensus_changes.into()), + set_state, + }); + } + } + Some(1) => { + if let Some(set) = load_decode::<_, AuthoritySet>(backend, AUTHORITY_SET_KEY)? { + let set_state = match load_decode::<_, VoterSetState>(backend, SET_STATE_KEY)? + { + Some(state) => state, + None => VoterSetState::Live(0, make_genesis_round()), + }; + + return Ok(PersistentData { + authority_set: set.into(), + consensus_changes: Arc::new(consensus_changes.into()), + set_state, + }); + } + } + Some(other) => { + return Err(ClientErrorKind::Backend(format!( + "Unsupported GRANDPA DB version: {:?}", + other + )) + .into()); + } + } + + // genesis. + info!(target: "afg", "Loading GRANDPA authority set \ from genesis on what appears to be first startup."); - let genesis_set = AuthoritySet::genesis(genesis_authorities()?); - let genesis_state = VoterSetState::Live(0, make_genesis_round()); - backend.insert_aux( - &[ - (AUTHORITY_SET_KEY, genesis_set.encode().as_slice()), - (SET_STATE_KEY, genesis_state.encode().as_slice()), - ], - &[], - )?; - - Ok(PersistentData { - authority_set: genesis_set.into(), - set_state: genesis_state, - consensus_changes: Arc::new(consensus_changes.into()), - }) + let genesis_set = AuthoritySet::genesis(genesis_authorities()?); + let genesis_state = VoterSetState::Live(0, make_genesis_round()); + backend.insert_aux( + &[ + (AUTHORITY_SET_KEY, genesis_set.encode().as_slice()), + (SET_STATE_KEY, genesis_state.encode().as_slice()), + ], + &[], + )?; + + Ok(PersistentData { + authority_set: genesis_set.into(), + set_state: genesis_state, + consensus_changes: Arc::new(consensus_changes.into()), + }) } /// Update the authority set on disk after a change. pub(crate) fn update_authority_set( - set: &AuthoritySet, - new_set: Option<&NewAuthoritySet>, - write_aux: F -) -> R where - H: Encode + Clone, - N: Encode + Clone, - F: FnOnce(&[(&'static [u8], &[u8])]) -> R, + set: &AuthoritySet, + new_set: Option<&NewAuthoritySet>, + write_aux: F, +) -> R +where + H: Encode + Clone, + N: Encode + Clone, + F: FnOnce(&[(&'static [u8], &[u8])]) -> R, { - // write new authority set state to disk. - let encoded_set = set.encode(); - - if let Some(new_set) = new_set { - // we also overwrite the "last completed round" entry with a blank slate - // because from the perspective of the finality gadget, the chain has - // reset. - let round_state = RoundState::genesis(( - new_set.canon_hash.clone(), - new_set.canon_number.clone(), - )); - let set_state = VoterSetState::Live(0, round_state); - let encoded = set_state.encode(); - - write_aux(&[ - (AUTHORITY_SET_KEY, &encoded_set[..]), - (SET_STATE_KEY, &encoded[..]), - ]) - } else { - write_aux(&[(AUTHORITY_SET_KEY, &encoded_set[..])]) - } + // write new authority set state to disk. + let encoded_set = set.encode(); + + if let Some(new_set) = new_set { + // we also overwrite the "last completed round" entry with a blank slate + // because from the perspective of the finality gadget, the chain has + // reset. + let round_state = + RoundState::genesis((new_set.canon_hash.clone(), new_set.canon_number.clone())); + let set_state = VoterSetState::Live(0, round_state); + let encoded = set_state.encode(); + + write_aux(&[ + (AUTHORITY_SET_KEY, &encoded_set[..]), + (SET_STATE_KEY, &encoded[..]), + ]) + } else { + write_aux(&[(AUTHORITY_SET_KEY, &encoded_set[..])]) + } } /// Write voter set state. -pub(crate) fn write_voter_set_state(backend: &B, state: &VoterSetState) - -> ClientResult<()> - where B: AuxStore, H: Encode, N: Encode +pub(crate) fn write_voter_set_state( + backend: &B, + state: &VoterSetState, +) -> ClientResult<()> +where + B: AuxStore, + H: Encode, + N: Encode, { - backend.insert_aux( - &[(SET_STATE_KEY, state.encode().as_slice())], - &[] - ) + backend.insert_aux(&[(SET_STATE_KEY, state.encode().as_slice())], &[]) } /// Update the consensus changes. -pub(crate) fn update_consensus_changes( - set: &ConsensusChanges, - write_aux: F -) -> R where - H: Encode + Clone, - N: Encode + Clone, - F: FnOnce(&[(&'static [u8], &[u8])]) -> R, +pub(crate) fn update_consensus_changes(set: &ConsensusChanges, write_aux: F) -> R +where + H: Encode + Clone, + N: Encode + Clone, + F: FnOnce(&[(&'static [u8], &[u8])]) -> R, { - write_aux(&[(CONSENSUS_CHANGES_KEY, set.encode().as_slice())]) + write_aux(&[(CONSENSUS_CHANGES_KEY, set.encode().as_slice())]) } #[cfg(test)] -pub(crate) fn load_authorities(backend: &B) - -> Option> { - load_decode::<_, AuthoritySet>(backend, AUTHORITY_SET_KEY) - .expect("backend error") +pub(crate) fn load_authorities( + backend: &B, +) -> Option> { + load_decode::<_, AuthoritySet>(backend, AUTHORITY_SET_KEY).expect("backend error") } #[cfg(test)] mod test { - use substrate_primitives::H256; - use test_client; - use super::*; - - #[test] - fn load_decode_migrates_data_format() { - let client = test_client::new(); - - let authorities = vec![(AuthorityId::default(), 100)]; - let set_id = 3; - let round_number = 42; - let round_state = RoundState:: { - prevote_ghost: None, - finalized: None, - estimate: None, - completable: false, - }; - - { - let authority_set = V0AuthoritySet:: { - current_authorities: authorities.clone(), - pending_changes: Vec::new(), - set_id, - }; - - let voter_set_state = (round_number, round_state.clone()); - - client.insert_aux( - &[ - (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), - (SET_STATE_KEY, voter_set_state.encode().as_slice()), - ], - &[], - ).unwrap(); - } - - assert_eq!( - load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), - None, - ); - - // should perform the migration - load_persistent( - &client, - H256::random(), - 0, - || unreachable!(), - ).unwrap(); - - assert_eq!( - load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), - Some(1), - ); - - let PersistentData { authority_set, set_state, .. } = load_persistent( - &client, - H256::random(), - 0, - || unreachable!(), - ).unwrap(); - - assert_eq!( - *authority_set.inner().read(), - AuthoritySet { - current_authorities: authorities, - pending_standard_changes: ForkTree::new(), - pending_forced_changes: Vec::new(), - set_id, - }, - ); - - assert_eq!( - set_state, - VoterSetState::Live(round_number, round_state), - ); - } + use super::*; + use substrate_primitives::H256; + use test_client; + + #[test] + fn load_decode_migrates_data_format() { + let client = test_client::new(); + + let authorities = vec![(AuthorityId::default(), 100)]; + let set_id = 3; + let round_number = 42; + let round_state = RoundState:: { + prevote_ghost: None, + finalized: None, + estimate: None, + completable: false, + }; + + { + let authority_set = V0AuthoritySet:: { + current_authorities: authorities.clone(), + pending_changes: Vec::new(), + set_id, + }; + + let voter_set_state = (round_number, round_state.clone()); + + client + .insert_aux( + &[ + (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), + (SET_STATE_KEY, voter_set_state.encode().as_slice()), + ], + &[], + ) + .unwrap(); + } + + assert_eq!(load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), None,); + + // should perform the migration + load_persistent(&client, H256::random(), 0, || unreachable!()).unwrap(); + + assert_eq!( + load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), + Some(1), + ); + + let PersistentData { + authority_set, + set_state, + .. + } = load_persistent(&client, H256::random(), 0, || unreachable!()).unwrap(); + + assert_eq!( + *authority_set.inner().read(), + AuthoritySet { + current_authorities: authorities, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + set_id, + }, + ); + + assert_eq!(set_state, VoterSetState::Live(round_number, round_state),); + } } diff --git a/core/finality-grandpa/src/communication.rs b/core/finality-grandpa/src/communication.rs index f498b51460..e6367eb443 100644 --- a/core/finality-grandpa/src/communication.rs +++ b/core/finality-grandpa/src/communication.rs @@ -20,22 +20,24 @@ use std::collections::HashMap; use std::sync::Arc; -use grandpa::VoterSet; -use grandpa::Message::{Prevote, Precommit}; +use crate::{ + Commit, CompactCommit, Error, FullCommitMessage, GossipMessage, Message, Network, + SignedMessage, VoteOrPrecommitMessage, +}; +use ed25519::{Public as AuthorityId, Signature as AuthoritySignature}; use futures::prelude::*; use futures::sync::mpsc; +use grandpa::Message::{Precommit, Prevote}; +use grandpa::VoterSet; use log::{debug, trace}; -use parity_codec::{Encode, Decode}; +use parity_codec::{Decode, Encode}; +use runtime_primitives::traits::Block as BlockT; use substrate_primitives::{ed25519, Pair}; use substrate_telemetry::{telemetry, CONSENSUS_INFO}; -use runtime_primitives::traits::Block as BlockT; use tokio::timer::Interval; -use crate::{Error, Network, Message, SignedMessage, Commit, - CompactCommit, GossipMessage, FullCommitMessage, VoteOrPrecommitMessage}; -use ed25519::{Public as AuthorityId, Signature as AuthoritySignature}; fn localized_payload(round: u64, set_id: u64, message: &E) -> Vec { - (message, round, set_id).encode() + (message, round, set_id).encode() } #[derive(Clone, Copy, Eq, PartialEq, PartialOrd, Ord)] @@ -44,233 +46,247 @@ struct Round(u64); struct SetId(u64); enum Broadcast { - // round, set id, encoded commit. - Commit(Round, SetId, Vec), - // round, set id, encoded signed message. - Message(Round, SetId, Vec), - // round, set id, announcement of block hash that should be downloaded - Announcement(Round, SetId, Block::Hash), - // round, set id being dropped. - DropRound(Round, SetId), - // set_id being dropped. - DropSet(SetId), + // round, set id, encoded commit. + Commit(Round, SetId, Vec), + // round, set id, encoded signed message. + Message(Round, SetId, Vec), + // round, set id, announcement of block hash that should be downloaded + Announcement(Round, SetId, Block::Hash), + // round, set id being dropped. + DropRound(Round, SetId), + // set_id being dropped. + DropSet(SetId), } impl Broadcast { - fn set_id(&self) -> SetId { - match *self { - Broadcast::Commit(_, s, _) => s, - Broadcast::Message(_, s, _) => s, - Broadcast::Announcement(_, s, _) => s, - Broadcast::DropRound(_, s) => s, - Broadcast::DropSet(s) => s, - } - } + fn set_id(&self) -> SetId { + match *self { + Broadcast::Commit(_, s, _) => s, + Broadcast::Message(_, s, _) => s, + Broadcast::Announcement(_, s, _) => s, + Broadcast::DropRound(_, s) => s, + Broadcast::DropSet(s) => s, + } + } } /// Produces a future that should be run in the background and proxies /// and rebroadcasts messages. -pub(crate) fn rebroadcasting_network>(network: N) -> (BroadcastWorker, BroadcastHandle) { - use std::time::Duration; - const REBROADCAST_PERIOD: Duration = Duration::from_secs(60); - - let (tx, rx) = mpsc::unbounded(); - - ( - BroadcastWorker { - interval: Interval::new_interval(REBROADCAST_PERIOD), - set_id: SetId(0), // will be overwritten on first item to broadcast. - last_commit: None, - round_messages: (Round(0), Vec::new()), - announcements: HashMap::new(), - network: network.clone(), - incoming_broadcast: rx, - }, - BroadcastHandle { - relay: tx, - network, - }, - ) +pub(crate) fn rebroadcasting_network>( + network: N, +) -> (BroadcastWorker, BroadcastHandle) { + use std::time::Duration; + const REBROADCAST_PERIOD: Duration = Duration::from_secs(60); + + let (tx, rx) = mpsc::unbounded(); + + ( + BroadcastWorker { + interval: Interval::new_interval(REBROADCAST_PERIOD), + set_id: SetId(0), // will be overwritten on first item to broadcast. + last_commit: None, + round_messages: (Round(0), Vec::new()), + announcements: HashMap::new(), + network: network.clone(), + incoming_broadcast: rx, + }, + BroadcastHandle { relay: tx, network }, + ) } // A worker which broadcasts messages to the background, potentially // rebroadcasting. #[must_use = "network rebroadcast future must be driven to completion"] pub(crate) struct BroadcastWorker> { - interval: Interval, - set_id: SetId, - last_commit: Option<(Round, Vec)>, - round_messages: (Round, Vec>), - announcements: HashMap, - network: N, - incoming_broadcast: mpsc::UnboundedReceiver>, + interval: Interval, + set_id: SetId, + last_commit: Option<(Round, Vec)>, + round_messages: (Round, Vec>), + announcements: HashMap, + network: N, + incoming_broadcast: mpsc::UnboundedReceiver>, } /// A handle used by communication work to broadcast to network. #[derive(Clone)] pub(crate) struct BroadcastHandle { - relay: mpsc::UnboundedSender>, - network: N, + relay: mpsc::UnboundedSender>, + network: N, } impl> Future for BroadcastWorker { - type Item = (); - type Error = Error; - - fn poll(&mut self) -> Poll<(), Error> { - { - let mut rebroadcast = false; - loop { - match self.interval.poll().map_err(Error::Timer)? { - Async::NotReady => break, - Async::Ready(_) => { rebroadcast = true; } - } - } - - if rebroadcast { - let SetId(set_id) = self.set_id; - if let Some((Round(c_round), ref c_commit)) = self.last_commit { - self.network.send_commit(c_round, set_id, c_commit.clone(), true); - } - - let Round(round) = self.round_messages.0; - for message in self.round_messages.1.iter().cloned() { - self.network.send_message(round, set_id, message, true); - } - - for (&announce_hash, &Round(round)) in &self.announcements { - self.network.announce(round, set_id, announce_hash); - } - } - } - - loop { - match self.incoming_broadcast.poll().expect("UnboundedReceiver does not yield errors; qed") { - Async::NotReady => return Ok(Async::NotReady), - Async::Ready(None) => return Err(Error::Network( - "all broadcast handles dropped, connection to network severed".into() - )), - Async::Ready(Some(item)) => { - if item.set_id() > self.set_id { - self.set_id = item.set_id(); - self.last_commit = None; - self.round_messages = (Round(0), Vec::new()); - self.announcements.clear(); - } - - match item { - Broadcast::Commit(round, set_id, commit) => { - if self.set_id == set_id { - if round >= self.last_commit.as_ref() - .map_or(Round(0), |&(r, _)| r) - { - self.last_commit = Some((round, commit.clone())); - } - } - - // always send out to network. - self.network.send_commit(round.0, self.set_id.0, commit, false); - } - Broadcast::Message(round, set_id, message) => { - if self.set_id == set_id { - if round > self.round_messages.0 { - self.round_messages = (round, vec![message.clone()]); - } else if round == self.round_messages.0 { - self.round_messages.1.push(message.clone()); - }; - - // ignore messages from earlier rounds. - } - - // always send out to network. - self.network.send_message(round.0, set_id.0, message, false); - } - Broadcast::Announcement(round, set_id, hash) => { - if self.set_id == set_id { - self.announcements.insert(hash, round); - } - - // always send out. - self.network.announce(round.0, set_id.0, hash); - } - Broadcast::DropRound(round, set_id) => { - // stop making announcements for any dead rounds. - self.announcements.retain(|_, &mut r| r > round); - self.network.drop_round_messages(round.0, set_id.0); - } - Broadcast::DropSet(set_id) => { - // stop making announcements for any dead rounds. - self.network.drop_set_messages(set_id.0); - } - } - } - } - } - } + type Item = (); + type Error = Error; + + fn poll(&mut self) -> Poll<(), Error> { + { + let mut rebroadcast = false; + loop { + match self.interval.poll().map_err(Error::Timer)? { + Async::NotReady => break, + Async::Ready(_) => { + rebroadcast = true; + } + } + } + + if rebroadcast { + let SetId(set_id) = self.set_id; + if let Some((Round(c_round), ref c_commit)) = self.last_commit { + self.network + .send_commit(c_round, set_id, c_commit.clone(), true); + } + + let Round(round) = self.round_messages.0; + for message in self.round_messages.1.iter().cloned() { + self.network.send_message(round, set_id, message, true); + } + + for (&announce_hash, &Round(round)) in &self.announcements { + self.network.announce(round, set_id, announce_hash); + } + } + } + + loop { + match self + .incoming_broadcast + .poll() + .expect("UnboundedReceiver does not yield errors; qed") + { + Async::NotReady => return Ok(Async::NotReady), + Async::Ready(None) => { + return Err(Error::Network( + "all broadcast handles dropped, connection to network severed".into(), + )); + } + Async::Ready(Some(item)) => { + if item.set_id() > self.set_id { + self.set_id = item.set_id(); + self.last_commit = None; + self.round_messages = (Round(0), Vec::new()); + self.announcements.clear(); + } + + match item { + Broadcast::Commit(round, set_id, commit) => { + if self.set_id == set_id { + if round >= self.last_commit.as_ref().map_or(Round(0), |&(r, _)| r) + { + self.last_commit = Some((round, commit.clone())); + } + } + + // always send out to network. + self.network + .send_commit(round.0, self.set_id.0, commit, false); + } + Broadcast::Message(round, set_id, message) => { + if self.set_id == set_id { + if round > self.round_messages.0 { + self.round_messages = (round, vec![message.clone()]); + } else if round == self.round_messages.0 { + self.round_messages.1.push(message.clone()); + }; + + // ignore messages from earlier rounds. + } + + // always send out to network. + self.network.send_message(round.0, set_id.0, message, false); + } + Broadcast::Announcement(round, set_id, hash) => { + if self.set_id == set_id { + self.announcements.insert(hash, round); + } + + // always send out. + self.network.announce(round.0, set_id.0, hash); + } + Broadcast::DropRound(round, set_id) => { + // stop making announcements for any dead rounds. + self.announcements.retain(|_, &mut r| r > round); + self.network.drop_round_messages(round.0, set_id.0); + } + Broadcast::DropSet(set_id) => { + // stop making announcements for any dead rounds. + self.network.drop_set_messages(set_id.0); + } + } + } + } + } + } } impl> Network for BroadcastHandle { - type In = N::In; - - fn messages_for(&self, round: u64, set_id: u64) -> Self::In { - self.network.messages_for(round, set_id) - } - - fn send_message(&self, round: u64, set_id: u64, message: Vec, _force: bool) { - let _ = self.relay.unbounded_send(Broadcast::Message(Round(round), SetId(set_id), message)); - } - - fn drop_round_messages(&self, round: u64, set_id: u64) { - let _ = self.relay.unbounded_send(Broadcast::DropRound(Round(round), SetId(set_id))); - } - - fn drop_set_messages(&self, set_id: u64) { - let _ = self.relay.unbounded_send(Broadcast::DropSet(SetId(set_id))); - } - - fn commit_messages(&self, set_id: u64) -> Self::In { - self.network.commit_messages(set_id) - } - - fn send_commit(&self, round: u64, set_id: u64, message: Vec, _force: bool) { - let _ = self.relay.unbounded_send(Broadcast::Commit(Round(round), SetId(set_id), message)); - } - - fn announce(&self, round: u64, set_id: u64, block: B::Hash) { - let _ = self.relay.unbounded_send( - Broadcast::Announcement(Round(round), SetId(set_id), block) - ); - } + type In = N::In; + + fn messages_for(&self, round: u64, set_id: u64) -> Self::In { + self.network.messages_for(round, set_id) + } + + fn send_message(&self, round: u64, set_id: u64, message: Vec, _force: bool) { + let _ = self + .relay + .unbounded_send(Broadcast::Message(Round(round), SetId(set_id), message)); + } + + fn drop_round_messages(&self, round: u64, set_id: u64) { + let _ = self + .relay + .unbounded_send(Broadcast::DropRound(Round(round), SetId(set_id))); + } + + fn drop_set_messages(&self, set_id: u64) { + let _ = self.relay.unbounded_send(Broadcast::DropSet(SetId(set_id))); + } + + fn commit_messages(&self, set_id: u64) -> Self::In { + self.network.commit_messages(set_id) + } + + fn send_commit(&self, round: u64, set_id: u64, message: Vec, _force: bool) { + let _ = self + .relay + .unbounded_send(Broadcast::Commit(Round(round), SetId(set_id), message)); + } + + fn announce(&self, round: u64, set_id: u64, block: B::Hash) { + let _ = + self.relay + .unbounded_send(Broadcast::Announcement(Round(round), SetId(set_id), block)); + } } // check a message. pub(crate) fn check_message_sig( - message: &Message, - id: &AuthorityId, - signature: &AuthoritySignature, - round: u64, - set_id: u64, + message: &Message, + id: &AuthorityId, + signature: &AuthoritySignature, + round: u64, + set_id: u64, ) -> Result<(), ()> { - let as_public = AuthorityId::from_raw(id.0); - let encoded_raw = localized_payload(round, set_id, message); - if ed25519::Pair::verify(signature, &encoded_raw, as_public) { - Ok(()) - } else { - debug!(target: "afg", "Bad signature on message from {:?}", id); - Err(()) - } + let as_public = AuthorityId::from_raw(id.0); + let encoded_raw = localized_payload(round, set_id, message); + if ed25519::Pair::verify(signature, &encoded_raw, as_public) { + Ok(()) + } else { + debug!(target: "afg", "Bad signature on message from {:?}", id); + Err(()) + } } /// converts a message stream into a stream of signed messages. /// the output stream checks signatures also. pub(crate) fn checked_message_stream( - inner: S, - voters: Arc>, -) - -> impl Stream,Error=Error> where - S: Stream,Error=()> + inner: S, + voters: Arc>, +) -> impl Stream, Error = Error> +where + S: Stream, Error = ()>, { - inner + inner .filter_map(|raw| { let decoded = GossipMessage::::decode(&mut &raw[..]); if decoded.is_none() { @@ -317,61 +333,63 @@ pub(crate) fn checked_message_stream( } pub(crate) struct OutgoingMessages> { - round: u64, - set_id: u64, - locals: Option<(Arc, AuthorityId)>, - sender: mpsc::UnboundedSender>, - network: N, + round: u64, + set_id: u64, + locals: Option<(Arc, AuthorityId)>, + sender: mpsc::UnboundedSender>, + network: N, } -impl> Sink for OutgoingMessages -{ - type SinkItem = Message; - type SinkError = Error; - - fn start_send(&mut self, msg: Message) -> StartSend, Error> { - // when locals exist, sign messages on import - if let Some((ref pair, ref local_id)) = self.locals { - let encoded = localized_payload(self.round, self.set_id, &msg); - let signature = pair.sign(&encoded[..]); - - let target_hash = msg.target().0.clone(); - let signed = SignedMessage:: { - message: msg, - signature, - id: local_id.clone(), - }; - - let message = GossipMessage::VoteOrPrecommit(VoteOrPrecommitMessage:: { - message: signed.clone(), - round: self.round, - set_id: self.set_id, - }); - - // announce our block hash to peers and propagate the - // message. - self.network.announce(self.round, self.set_id, target_hash); - self.network.send_message(self.round, self.set_id, message.encode(), false); - - // forward the message to the inner sender. - let _ = self.sender.unbounded_send(signed); - } - - Ok(AsyncSink::Ready) - } - - fn poll_complete(&mut self) -> Poll<(), Error> { Ok(Async::Ready(())) } - - fn close(&mut self) -> Poll<(), Error> { - // ignore errors since we allow this inner sender to be closed already. - self.sender.close().or_else(|_| Ok(Async::Ready(()))) - } +impl> Sink for OutgoingMessages { + type SinkItem = Message; + type SinkError = Error; + + fn start_send(&mut self, msg: Message) -> StartSend, Error> { + // when locals exist, sign messages on import + if let Some((ref pair, ref local_id)) = self.locals { + let encoded = localized_payload(self.round, self.set_id, &msg); + let signature = pair.sign(&encoded[..]); + + let target_hash = msg.target().0.clone(); + let signed = SignedMessage:: { + message: msg, + signature, + id: local_id.clone(), + }; + + let message = GossipMessage::VoteOrPrecommit(VoteOrPrecommitMessage:: { + message: signed.clone(), + round: self.round, + set_id: self.set_id, + }); + + // announce our block hash to peers and propagate the + // message. + self.network.announce(self.round, self.set_id, target_hash); + self.network + .send_message(self.round, self.set_id, message.encode(), false); + + // forward the message to the inner sender. + let _ = self.sender.unbounded_send(signed); + } + + Ok(AsyncSink::Ready) + } + + fn poll_complete(&mut self) -> Poll<(), Error> { + Ok(Async::Ready(())) + } + + fn close(&mut self) -> Poll<(), Error> { + // ignore errors since we allow this inner sender to be closed already. + self.sender.close().or_else(|_| Ok(Async::Ready(()))) + } } impl> Drop for OutgoingMessages { - fn drop(&mut self) { - self.network.drop_round_messages(self.round, self.set_id); - } + fn drop(&mut self) { + self.network.drop_round_messages(self.round, self.set_id); + } } /// A sink for outgoing messages. This signs the messages with the key, @@ -380,164 +398,174 @@ impl> Drop for OutgoingMessages { /// A future can push unsigned messages into the sink. They will be automatically /// broadcast to the network. The returned stream should be combined with other input. pub(crate) fn outgoing_messages>( - round: u64, - set_id: u64, - local_key: Option>, - voters: Arc>, - network: N, + round: u64, + set_id: u64, + local_key: Option>, + voters: Arc>, + network: N, ) -> ( - impl Stream,Error=Error>, - OutgoingMessages, + impl Stream, Error = Error>, + OutgoingMessages, ) { - let locals = local_key.and_then(|pair| { - let public = pair.public(); - let id = AuthorityId(public.0); - if voters.contains_key(&id) { - Some((pair, id)) - } else { - None - } - }); - - let (tx, rx) = mpsc::unbounded(); - let outgoing = OutgoingMessages:: { - round, - set_id, - network, - locals, - sender: tx, - }; - - let rx = rx.map_err(move |()| Error::Network( - format!("Failed to receive on unbounded receiver for round {}", round) - )); - - (rx, outgoing) + let locals = local_key.and_then(|pair| { + let public = pair.public(); + let id = AuthorityId(public.0); + if voters.contains_key(&id) { + Some((pair, id)) + } else { + None + } + }); + + let (tx, rx) = mpsc::unbounded(); + let outgoing = OutgoingMessages:: { + round, + set_id, + network, + locals, + sender: tx, + }; + + let rx = rx.map_err(move |()| { + Error::Network(format!( + "Failed to receive on unbounded receiver for round {}", + round + )) + }); + + (rx, outgoing) } fn check_compact_commit( - msg: CompactCommit, - voters: &VoterSet, + msg: CompactCommit, + voters: &VoterSet, ) -> Option> { - if msg.precommits.len() != msg.auth_data.len() || msg.precommits.is_empty() { - debug!(target: "afg", "Skipping malformed compact commit"); - return None; - } - - // check signatures on all contained precommits. - for (_, ref id) in &msg.auth_data { - if !voters.contains_key(id) { - debug!(target: "afg", "Skipping commit containing unknown voter {}", id); - return None; - } - } - - Some(msg) + if msg.precommits.len() != msg.auth_data.len() || msg.precommits.is_empty() { + debug!(target: "afg", "Skipping malformed compact commit"); + return None; + } + + // check signatures on all contained precommits. + for (_, ref id) in &msg.auth_data { + if !voters.contains_key(id) { + debug!(target: "afg", "Skipping commit containing unknown voter {}", id); + return None; + } + } + + Some(msg) } /// A stream for incoming commit messages. This checks all the signatures on the /// messages. pub(crate) fn checked_commit_stream( - inner: S, - voters: Arc>, -) - -> impl Stream),Error=Error> where - S: Stream,Error=()> + inner: S, + voters: Arc>, +) -> impl Stream), Error = Error> +where + S: Stream, Error = ()>, { - inner - .filter_map(|raw| { - // this could be optimized by decoding piecewise. - let decoded = GossipMessage::::decode(&mut &raw[..]); - if decoded.is_none() { - trace!(target: "afg", "Skipping malformed commit message {:?}", raw); - } - decoded - }) - .filter_map(move |msg| { - match msg { - GossipMessage::Commit(msg) => { - let round = msg.round; - let precommits_signed_by: Vec = - msg.message.auth_data.iter().map(move |(_, a)| { - format!("{}", a) - }).collect(); - telemetry!(CONSENSUS_INFO; "afg.received_commit"; - "contains_precommits_signed_by" => ?precommits_signed_by, - "target_number" => ?msg.message.target_number, - "target_hash" => ?msg.message.target_hash, - ); - check_compact_commit::(msg.message, &*voters).map(move |c| (round, c)) - }, - _ => { - debug!(target: "afg", "Skipping unknown message type"); - return None; - } - } - }) - .map_err(|()| Error::Network(format!("Failed to receive message on unbounded stream"))) + inner + .filter_map(|raw| { + // this could be optimized by decoding piecewise. + let decoded = GossipMessage::::decode(&mut &raw[..]); + if decoded.is_none() { + trace!(target: "afg", "Skipping malformed commit message {:?}", raw); + } + decoded + }) + .filter_map(move |msg| match msg { + GossipMessage::Commit(msg) => { + let round = msg.round; + let precommits_signed_by: Vec = msg + .message + .auth_data + .iter() + .map(move |(_, a)| format!("{}", a)) + .collect(); + telemetry!(CONSENSUS_INFO; "afg.received_commit"; + "contains_precommits_signed_by" => ?precommits_signed_by, + "target_number" => ?msg.message.target_number, + "target_hash" => ?msg.message.target_hash, + ); + check_compact_commit::(msg.message, &*voters).map(move |c| (round, c)) + } + _ => { + debug!(target: "afg", "Skipping unknown message type"); + return None; + } + }) + .map_err(|()| Error::Network(format!("Failed to receive message on unbounded stream"))) } /// An output sink for commit messages. pub(crate) struct CommitsOut> { - network: N, - set_id: u64, - _marker: ::std::marker::PhantomData, - is_voter: bool, + network: N, + set_id: u64, + _marker: ::std::marker::PhantomData, + is_voter: bool, } impl> CommitsOut { - /// Create a new commit output stream. - pub(crate) fn new(network: N, set_id: u64, is_voter: bool) -> Self { - CommitsOut { - network, - set_id, - is_voter, - _marker: Default::default(), - } - } + /// Create a new commit output stream. + pub(crate) fn new(network: N, set_id: u64, is_voter: bool) -> Self { + CommitsOut { + network, + set_id, + is_voter, + _marker: Default::default(), + } + } } impl> Sink for CommitsOut { - type SinkItem = (u64, Commit); - type SinkError = Error; - - fn start_send(&mut self, input: (u64, Commit)) -> StartSend { - if !self.is_voter { - return Ok(AsyncSink::Ready); - } - - let (round, commit) = input; - telemetry!(CONSENSUS_INFO; "afg.commit_issued"; - "target_number" => ?commit.target_number, "target_hash" => ?commit.target_hash, - ); - let (precommits, auth_data) = commit.precommits.into_iter() - .map(|signed| (signed.precommit, (signed.signature, signed.id))) - .unzip(); - - let compact_commit = CompactCommit:: { - target_hash: commit.target_hash, - target_number: commit.target_number, - precommits, - auth_data - }; - - let message = GossipMessage::Commit(FullCommitMessage:: { - round: round, - set_id: self.set_id, - message: compact_commit, - }); - - self.network.send_commit(round, self.set_id, Encode::encode(&message), false); - - Ok(AsyncSink::Ready) - } - - fn close(&mut self) -> Poll<(), Error> { Ok(Async::Ready(())) } - fn poll_complete(&mut self) -> Poll<(), Error> { Ok(Async::Ready(())) } + type SinkItem = (u64, Commit); + type SinkError = Error; + + fn start_send(&mut self, input: (u64, Commit)) -> StartSend { + if !self.is_voter { + return Ok(AsyncSink::Ready); + } + + let (round, commit) = input; + telemetry!(CONSENSUS_INFO; "afg.commit_issued"; + "target_number" => ?commit.target_number, "target_hash" => ?commit.target_hash, + ); + let (precommits, auth_data) = commit + .precommits + .into_iter() + .map(|signed| (signed.precommit, (signed.signature, signed.id))) + .unzip(); + + let compact_commit = CompactCommit:: { + target_hash: commit.target_hash, + target_number: commit.target_number, + precommits, + auth_data, + }; + + let message = GossipMessage::Commit(FullCommitMessage:: { + round: round, + set_id: self.set_id, + message: compact_commit, + }); + + self.network + .send_commit(round, self.set_id, Encode::encode(&message), false); + + Ok(AsyncSink::Ready) + } + + fn close(&mut self) -> Poll<(), Error> { + Ok(Async::Ready(())) + } + fn poll_complete(&mut self) -> Poll<(), Error> { + Ok(Async::Ready(())) + } } impl> Drop for CommitsOut { - fn drop(&mut self) { - self.network.drop_set_messages(self.set_id); - } + fn drop(&mut self) { + self.network.drop_set_messages(self.set_id); + } } diff --git a/core/finality-grandpa/src/consensus_changes.rs b/core/finality-grandpa/src/consensus_changes.rs index cbd7b30f8e..4c05895c0d 100644 --- a/core/finality-grandpa/src/consensus_changes.rs +++ b/core/finality-grandpa/src/consensus_changes.rs @@ -14,59 +14,68 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use parity_codec::{Decode, Encode}; use std::sync::Arc; -use parity_codec::{Encode, Decode}; /// Consensus-related data changes tracker. #[derive(Clone, Debug, Encode, Decode)] pub(crate) struct ConsensusChanges { - pending_changes: Vec<(N, H)>, + pending_changes: Vec<(N, H)>, } impl ConsensusChanges { - /// Create empty consensus changes. - pub(crate) fn empty() -> Self { - ConsensusChanges { pending_changes: Vec::new(), } - } + /// Create empty consensus changes. + pub(crate) fn empty() -> Self { + ConsensusChanges { + pending_changes: Vec::new(), + } + } } impl ConsensusChanges { + /// Note unfinalized change of consensus-related data. + pub(crate) fn note_change(&mut self, at: (N, H)) { + let idx = self + .pending_changes + .binary_search_by_key(&at.0, |change| change.0) + .unwrap_or_else(|i| i); + self.pending_changes.insert(idx, at); + } - /// Note unfinalized change of consensus-related data. - pub(crate) fn note_change(&mut self, at: (N, H)) { - let idx = self.pending_changes - .binary_search_by_key(&at.0, |change| change.0) - .unwrap_or_else(|i| i); - self.pending_changes.insert(idx, at); - } + /// Finalize all pending consensus changes that are finalized by given block. + /// Returns true if there any changes were finalized. + pub(crate) fn finalize ::client::error::Result>>( + &mut self, + block: (N, H), + canonical_at_height: F, + ) -> ::client::error::Result<(bool, bool)> { + let (split_idx, has_finalized_changes) = self + .pending_changes + .iter() + .enumerate() + .take_while(|(_, &(at_height, _))| at_height <= block.0) + .fold( + (None, Ok(false)), + |(_, has_finalized_changes), (idx, ref at)| { + ( + Some(idx), + has_finalized_changes.and_then(|has_finalized_changes| { + if has_finalized_changes { + Ok(has_finalized_changes) + } else { + canonical_at_height(at.0).map(|can_hash| Some(at.1) == can_hash) + } + }), + ) + }, + ); - /// Finalize all pending consensus changes that are finalized by given block. - /// Returns true if there any changes were finalized. - pub(crate) fn finalize ::client::error::Result>>( - &mut self, - block: (N, H), - canonical_at_height: F, - ) -> ::client::error::Result<(bool, bool)> { - let (split_idx, has_finalized_changes) = self.pending_changes.iter() - .enumerate() - .take_while(|(_, &(at_height, _))| at_height <= block.0) - .fold((None, Ok(false)), |(_, has_finalized_changes), (idx, ref at)| - ( - Some(idx), - has_finalized_changes - .and_then(|has_finalized_changes| if has_finalized_changes { - Ok(has_finalized_changes) - } else { - canonical_at_height(at.0).map(|can_hash| Some(at.1) == can_hash) - }), - )); - - let altered_changes = split_idx.is_some(); - if let Some(split_idx) = split_idx { - self.pending_changes = self.pending_changes.split_off(split_idx + 1); - } - has_finalized_changes.map(|has_finalized_changes| (altered_changes, has_finalized_changes)) - } + let altered_changes = split_idx.is_some(); + if let Some(split_idx) = split_idx { + self.pending_changes = self.pending_changes.split_off(split_idx + 1); + } + has_finalized_changes.map(|has_finalized_changes| (altered_changes, has_finalized_changes)) + } } /// Thread-safe consensus changes tracker reference. diff --git a/core/finality-grandpa/src/environment.rs b/core/finality-grandpa/src/environment.rs index 587762b608..a767ad65d6 100644 --- a/core/finality-grandpa/src/environment.rs +++ b/core/finality-grandpa/src/environment.rs @@ -17,28 +17,27 @@ use std::sync::Arc; use std::time::{Duration, Instant}; -use log::{debug, warn, info}; -use parity_codec::Encode; use futures::prelude::*; -use tokio::timer::Delay; +use log::{debug, info, warn}; +use parity_codec::Encode; use parking_lot::RwLock; +use tokio::timer::Delay; use client::{ - backend::Backend, BlockchainEvents, CallExecutor, Client, error::Error as ClientError + backend::Backend, error::Error as ClientError, BlockchainEvents, CallExecutor, Client, }; use grandpa::{ - BlockNumberOps, Equivocation, Error as GrandpaError, round::State as RoundState, voter, VoterSet, + round::State as RoundState, voter, BlockNumberOps, Equivocation, Error as GrandpaError, + VoterSet, }; use runtime_primitives::generic::BlockId; -use runtime_primitives::traits::{ - As, Block as BlockT, Header as HeaderT, NumberFor, One, Zero, -}; -use substrate_primitives::{Blake2Hasher, ed25519, H256, Pair}; +use runtime_primitives::traits::{As, Block as BlockT, Header as HeaderT, NumberFor, One, Zero}; +use substrate_primitives::{ed25519, Blake2Hasher, Pair, H256}; use substrate_telemetry::{telemetry, CONSENSUS_INFO}; use crate::{ - Commit, Config, Error, Network, Precommit, Prevote, - CommandOrError, NewAuthoritySet, VoterCommand, + CommandOrError, Commit, Config, Error, Network, NewAuthoritySet, Precommit, Prevote, + VoterCommand, }; use crate::authorities::SharedAuthoritySet; @@ -53,348 +52,400 @@ pub(crate) type CompletedRound = (u64, RoundState); /// A read-only view of the last completed round. pub(crate) struct LastCompletedRound { - inner: RwLock>, + inner: RwLock>, } impl LastCompletedRound { - /// Create a new tracker based on some starting last-completed round. - pub(crate) fn new(round: CompletedRound) -> Self { - LastCompletedRound { inner: RwLock::new(round) } - } - - /// Read the last completed round. - pub(crate) fn read(&self) -> CompletedRound { - self.inner.read().clone() - } - - // NOTE: not exposed outside of this module intentionally. - fn with(&self, f: F) -> R - where F: FnOnce(&mut CompletedRound) -> R - { - f(&mut *self.inner.write()) - } + /// Create a new tracker based on some starting last-completed round. + pub(crate) fn new(round: CompletedRound) -> Self { + LastCompletedRound { + inner: RwLock::new(round), + } + } + + /// Read the last completed round. + pub(crate) fn read(&self) -> CompletedRound { + self.inner.read().clone() + } + + // NOTE: not exposed outside of this module intentionally. + fn with(&self, f: F) -> R + where + F: FnOnce(&mut CompletedRound) -> R, + { + f(&mut *self.inner.write()) + } } /// The environment we run GRANDPA in. pub(crate) struct Environment, RA> { - pub(crate) inner: Arc>, - pub(crate) voters: Arc>, - pub(crate) config: Config, - pub(crate) authority_set: SharedAuthoritySet>, - pub(crate) consensus_changes: SharedConsensusChanges>, - pub(crate) network: N, - pub(crate) set_id: u64, - pub(crate) last_completed: LastCompletedRound>, + pub(crate) inner: Arc>, + pub(crate) voters: Arc>, + pub(crate) config: Config, + pub(crate) authority_set: SharedAuthoritySet>, + pub(crate) consensus_changes: SharedConsensusChanges>, + pub(crate) network: N, + pub(crate) set_id: u64, + pub(crate) last_completed: LastCompletedRound>, } -impl, B, E, N, RA> grandpa::Chain> for Environment where - Block: 'static, - B: Backend + 'static, - E: CallExecutor + 'static, - N: Network + 'static, - N::In: 'static, - NumberFor: BlockNumberOps, +impl, B, E, N, RA> grandpa::Chain> + for Environment +where + Block: 'static, + B: Backend + 'static, + E: CallExecutor + 'static, + N: Network + 'static, + N::In: 'static, + NumberFor: BlockNumberOps, { - fn ancestry(&self, base: Block::Hash, block: Block::Hash) -> Result, GrandpaError> { - if base == block { return Err(GrandpaError::NotDescendent) } - - let tree_route_res = ::client::blockchain::tree_route( - self.inner.backend().blockchain(), - BlockId::Hash(block), - BlockId::Hash(base), - ); - - let tree_route = match tree_route_res { - Ok(tree_route) => tree_route, - Err(e) => { - debug!(target: "afg", "Encountered error computing ancestry between block {:?} and base {:?}: {:?}", + fn ancestry( + &self, + base: Block::Hash, + block: Block::Hash, + ) -> Result, GrandpaError> { + if base == block { + return Err(GrandpaError::NotDescendent); + } + + let tree_route_res = ::client::blockchain::tree_route( + self.inner.backend().blockchain(), + BlockId::Hash(block), + BlockId::Hash(base), + ); + + let tree_route = match tree_route_res { + Ok(tree_route) => tree_route, + Err(e) => { + debug!(target: "afg", "Encountered error computing ancestry between block {:?} and base {:?}: {:?}", block, base, e); - return Err(GrandpaError::NotDescendent); - } - }; - - if tree_route.common_block().hash != base { - return Err(GrandpaError::NotDescendent); - } - - // skip one because our ancestry is meant to start from the parent of `block`, - // and `tree_route` includes it. - Ok(tree_route.retracted().iter().skip(1).map(|e| e.hash).collect()) - } - - fn best_chain_containing(&self, block: Block::Hash) -> Option<(Block::Hash, NumberFor)> { - // NOTE: when we finalize an authority set change through the sync protocol the voter is - // signaled asynchronously. therefore the voter could still vote in the next round - // before activating the new set. the `authority_set` is updated immediately thus we - // restrict the voter based on that. - if self.set_id != self.authority_set.inner().read().current().0 { - return None; - } - - // we refuse to vote beyond the current limit number where transitions are scheduled to - // occur. - // once blocks are finalized that make that transition irrelevant or activate it, - // we will proceed onwards. most of the time there will be no pending transition. - let limit = self.authority_set.current_limit(); - debug!(target: "afg", "Finding best chain containing block {:?} with number limit {:?}", block, limit); - - match self.inner.best_containing(block, None) { - Ok(Some(mut best_hash)) => { - let base_header = self.inner.header(&BlockId::Hash(block)).ok()? - .expect("Header known to exist after `best_containing` call; qed"); - - if let Some(limit) = limit { - // this is a rare case which might cause issues, - // might be better to return the header itself. - if *base_header.number() > limit { - debug!(target: "afg", "Encountered error finding best chain containing {:?} with limit {:?}: target block is after limit", - block, - limit, - ); - return None; - } - } - - let mut best_header = self.inner.header(&BlockId::Hash(best_hash)).ok()? - .expect("Header known to exist after `best_containing` call; qed"); - - // we target a vote towards 3/4 of the unfinalized chain (rounding up) - let target = { - let two = NumberFor::::one() + One::one(); - let three = two + One::one(); - let four = three + One::one(); - - let diff = *best_header.number() - *base_header.number(); - let diff = ((diff * three) + two) / four; - - *base_header.number() + diff - }; - - // unless our vote is currently being limited due to a pending change - let target = limit.map(|limit| limit.min(target)).unwrap_or(target); - - // walk backwards until we find the target block - loop { - if *best_header.number() < target { unreachable!(); } - if *best_header.number() == target { - return Some((best_hash, *best_header.number())); - } - - best_hash = *best_header.parent_hash(); - best_header = self.inner.header(&BlockId::Hash(best_hash)).ok()? - .expect("Header known to exist after `best_containing` call; qed"); - } - }, - Ok(None) => { - debug!(target: "afg", "Encountered error finding best chain containing {:?}: couldn't find target block", block); - None - } - Err(e) => { - debug!(target: "afg", "Encountered error finding best chain containing {:?}: {:?}", block, e); - None - } - } - } + return Err(GrandpaError::NotDescendent); + } + }; + + if tree_route.common_block().hash != base { + return Err(GrandpaError::NotDescendent); + } + + // skip one because our ancestry is meant to start from the parent of `block`, + // and `tree_route` includes it. + Ok(tree_route + .retracted() + .iter() + .skip(1) + .map(|e| e.hash) + .collect()) + } + + fn best_chain_containing(&self, block: Block::Hash) -> Option<(Block::Hash, NumberFor)> { + // NOTE: when we finalize an authority set change through the sync protocol the voter is + // signaled asynchronously. therefore the voter could still vote in the next round + // before activating the new set. the `authority_set` is updated immediately thus we + // restrict the voter based on that. + if self.set_id != self.authority_set.inner().read().current().0 { + return None; + } + + // we refuse to vote beyond the current limit number where transitions are scheduled to + // occur. + // once blocks are finalized that make that transition irrelevant or activate it, + // we will proceed onwards. most of the time there will be no pending transition. + let limit = self.authority_set.current_limit(); + debug!(target: "afg", "Finding best chain containing block {:?} with number limit {:?}", block, limit); + + match self.inner.best_containing(block, None) { + Ok(Some(mut best_hash)) => { + let base_header = self + .inner + .header(&BlockId::Hash(block)) + .ok()? + .expect("Header known to exist after `best_containing` call; qed"); + + if let Some(limit) = limit { + // this is a rare case which might cause issues, + // might be better to return the header itself. + if *base_header.number() > limit { + debug!(target: "afg", "Encountered error finding best chain containing {:?} with limit {:?}: target block is after limit", + block, + limit, + ); + return None; + } + } + + let mut best_header = self + .inner + .header(&BlockId::Hash(best_hash)) + .ok()? + .expect("Header known to exist after `best_containing` call; qed"); + + // we target a vote towards 3/4 of the unfinalized chain (rounding up) + let target = { + let two = NumberFor::::one() + One::one(); + let three = two + One::one(); + let four = three + One::one(); + + let diff = *best_header.number() - *base_header.number(); + let diff = ((diff * three) + two) / four; + + *base_header.number() + diff + }; + + // unless our vote is currently being limited due to a pending change + let target = limit.map(|limit| limit.min(target)).unwrap_or(target); + + // walk backwards until we find the target block + loop { + if *best_header.number() < target { + unreachable!(); + } + if *best_header.number() == target { + return Some((best_hash, *best_header.number())); + } + + best_hash = *best_header.parent_hash(); + best_header = self + .inner + .header(&BlockId::Hash(best_hash)) + .ok()? + .expect("Header known to exist after `best_containing` call; qed"); + } + } + Ok(None) => { + debug!(target: "afg", "Encountered error finding best chain containing {:?}: couldn't find target block", block); + None + } + Err(e) => { + debug!(target: "afg", "Encountered error finding best chain containing {:?}: {:?}", block, e); + None + } + } + } } -impl, N, RA> voter::Environment> for Environment where - Block: 'static, - B: Backend + 'static, - E: CallExecutor + 'static + Send + Sync, - N: Network + 'static + Send, - N::In: 'static + Send, - RA: 'static + Send + Sync, - NumberFor: BlockNumberOps, +impl, N, RA> voter::Environment> + for Environment +where + Block: 'static, + B: Backend + 'static, + E: CallExecutor + 'static + Send + Sync, + N: Network + 'static + Send, + N::In: 'static + Send, + RA: 'static + Send + Sync, + NumberFor: BlockNumberOps, { - type Timer = Box + Send>; - type Id = AuthorityId; - type Signature = ed25519::Signature; - - // regular round message streams - type In = Box, Self::Signature, Self::Id>, - Error = Self::Error, - > + Send>; - type Out = Box>, - SinkError = Self::Error, - > + Send>; - - type Error = CommandOrError>; - - fn round_data( - &self, - round: u64 - ) -> voter::RoundData { - let now = Instant::now(); - let prevote_timer = Delay::new(now + self.config.gossip_duration * 2); - let precommit_timer = Delay::new(now + self.config.gossip_duration * 4); - - let incoming = crate::communication::checked_message_stream::( - self.network.messages_for(round, self.set_id), - self.voters.clone(), - ); - - let local_key = self.config.local_key.as_ref() - .filter(|pair| self.voters.contains_key(&pair.public().into())); - - let (out_rx, outgoing) = crate::communication::outgoing_messages::( - round, - self.set_id, - local_key.cloned(), - self.voters.clone(), - self.network.clone(), - ); - - // schedule incoming messages from the network to be held until - // corresponding blocks are imported. - let incoming = UntilVoteTargetImported::new( - self.inner.import_notification_stream(), - self.inner.clone(), - incoming, - ); - - // join incoming network messages with locally originating ones. - let incoming = Box::new(out_rx.select(incoming).map_err(Into::into)); - - // schedule network message cleanup when sink drops. - let outgoing = Box::new(outgoing.sink_map_err(Into::into)); - - voter::RoundData { - prevote_timer: Box::new(prevote_timer.map_err(|e| Error::Timer(e).into())), - precommit_timer: Box::new(precommit_timer.map_err(|e| Error::Timer(e).into())), - incoming, - outgoing, - } - } - - fn completed(&self, round: u64, state: RoundState>) -> Result<(), Self::Error> { - debug!( - target: "afg", "Voter {} completed round {} in set {}. Estimate = {:?}, Finalized in round = {:?}", - self.config.name(), - round, - self.set_id, - state.estimate.as_ref().map(|e| e.1), - state.finalized.as_ref().map(|e| e.1), - ); - - self.last_completed.with(|last_completed| { - let set_state = crate::aux_schema::VoterSetState::Live(round, state.clone()); - crate::aux_schema::write_voter_set_state(&**self.inner.backend(), &set_state)?; - - *last_completed = (round, state); // after writing to DB successfully. - Ok(()) - }) - } - - fn finalize_block(&self, hash: Block::Hash, number: NumberFor, round: u64, commit: Commit) -> Result<(), Self::Error> { - use client::blockchain::HeaderBackend; - - let status = self.inner.backend().blockchain().info()?; - if number <= status.finalized_number && self.inner.backend().blockchain().hash(number)? == Some(hash) { - // This can happen after a forced change (triggered by the finality tracker when finality is stalled), since - // the voter will be restarted at the median last finalized block, which can be lower than the local best - // finalized block. - warn!(target: "afg", "Re-finalized block #{:?} ({:?}) in the canonical chain, current best finalized is #{:?}", - hash, - number, - status.finalized_number, - ); - - return Ok(()); - } - - finalize_block( - &*self.inner, - &self.authority_set, - &self.consensus_changes, - Some(As::sa(self.config.justification_period)), - hash, - number, - (round, commit).into(), - ) - } - - fn round_commit_timer(&self) -> Self::Timer { - use rand::{thread_rng, Rng}; - - //random between 0-1 seconds. - let delay: u64 = thread_rng().gen_range(0, 1000); - Box::new(Delay::new( - Instant::now() + Duration::from_millis(delay) - ).map_err(|e| Error::Timer(e).into())) - } - - fn prevote_equivocation( - &self, - _round: u64, - equivocation: ::grandpa::Equivocation, Self::Signature> - ) { - warn!(target: "afg", "Detected prevote equivocation in the finality worker: {:?}", equivocation); - // nothing yet; this could craft misbehavior reports of some kind. - } - - fn precommit_equivocation( - &self, - _round: u64, - equivocation: Equivocation, Self::Signature> - ) { - warn!(target: "afg", "Detected precommit equivocation in the finality worker: {:?}", equivocation); - // nothing yet - } + type Timer = Box + Send>; + type Id = AuthorityId; + type Signature = ed25519::Signature; + + // regular round message streams + type In = Box< + dyn Stream< + Item = ::grandpa::SignedMessage< + Block::Hash, + NumberFor, + Self::Signature, + Self::Id, + >, + Error = Self::Error, + > + Send, + >; + type Out = Box< + dyn Sink< + SinkItem = ::grandpa::Message>, + SinkError = Self::Error, + > + Send, + >; + + type Error = CommandOrError>; + + fn round_data(&self, round: u64) -> voter::RoundData { + let now = Instant::now(); + let prevote_timer = Delay::new(now + self.config.gossip_duration * 2); + let precommit_timer = Delay::new(now + self.config.gossip_duration * 4); + + let incoming = crate::communication::checked_message_stream::( + self.network.messages_for(round, self.set_id), + self.voters.clone(), + ); + + let local_key = self + .config + .local_key + .as_ref() + .filter(|pair| self.voters.contains_key(&pair.public().into())); + + let (out_rx, outgoing) = crate::communication::outgoing_messages::( + round, + self.set_id, + local_key.cloned(), + self.voters.clone(), + self.network.clone(), + ); + + // schedule incoming messages from the network to be held until + // corresponding blocks are imported. + let incoming = UntilVoteTargetImported::new( + self.inner.import_notification_stream(), + self.inner.clone(), + incoming, + ); + + // join incoming network messages with locally originating ones. + let incoming = Box::new(out_rx.select(incoming).map_err(Into::into)); + + // schedule network message cleanup when sink drops. + let outgoing = Box::new(outgoing.sink_map_err(Into::into)); + + voter::RoundData { + prevote_timer: Box::new(prevote_timer.map_err(|e| Error::Timer(e).into())), + precommit_timer: Box::new(precommit_timer.map_err(|e| Error::Timer(e).into())), + incoming, + outgoing, + } + } + + fn completed( + &self, + round: u64, + state: RoundState>, + ) -> Result<(), Self::Error> { + debug!( + target: "afg", "Voter {} completed round {} in set {}. Estimate = {:?}, Finalized in round = {:?}", + self.config.name(), + round, + self.set_id, + state.estimate.as_ref().map(|e| e.1), + state.finalized.as_ref().map(|e| e.1), + ); + + self.last_completed.with(|last_completed| { + let set_state = crate::aux_schema::VoterSetState::Live(round, state.clone()); + crate::aux_schema::write_voter_set_state(&**self.inner.backend(), &set_state)?; + + *last_completed = (round, state); // after writing to DB successfully. + Ok(()) + }) + } + + fn finalize_block( + &self, + hash: Block::Hash, + number: NumberFor, + round: u64, + commit: Commit, + ) -> Result<(), Self::Error> { + use client::blockchain::HeaderBackend; + + let status = self.inner.backend().blockchain().info()?; + if number <= status.finalized_number + && self.inner.backend().blockchain().hash(number)? == Some(hash) + { + // This can happen after a forced change (triggered by the finality tracker when finality is stalled), since + // the voter will be restarted at the median last finalized block, which can be lower than the local best + // finalized block. + warn!(target: "afg", "Re-finalized block #{:?} ({:?}) in the canonical chain, current best finalized is #{:?}", + hash, + number, + status.finalized_number, + ); + + return Ok(()); + } + + finalize_block( + &*self.inner, + &self.authority_set, + &self.consensus_changes, + Some(As::sa(self.config.justification_period)), + hash, + number, + (round, commit).into(), + ) + } + + fn round_commit_timer(&self) -> Self::Timer { + use rand::{thread_rng, Rng}; + + //random between 0-1 seconds. + let delay: u64 = thread_rng().gen_range(0, 1000); + Box::new( + Delay::new(Instant::now() + Duration::from_millis(delay)) + .map_err(|e| Error::Timer(e).into()), + ) + } + + fn prevote_equivocation( + &self, + _round: u64, + equivocation: ::grandpa::Equivocation, Self::Signature>, + ) { + warn!(target: "afg", "Detected prevote equivocation in the finality worker: {:?}", equivocation); + // nothing yet; this could craft misbehavior reports of some kind. + } + + fn precommit_equivocation( + &self, + _round: u64, + equivocation: Equivocation, Self::Signature>, + ) { + warn!(target: "afg", "Detected precommit equivocation in the finality worker: {:?}", equivocation); + // nothing yet + } } pub(crate) enum JustificationOrCommit { - Justification(GrandpaJustification), - Commit((u64, Commit)), + Justification(GrandpaJustification), + Commit((u64, Commit)), } impl From<(u64, Commit)> for JustificationOrCommit { - fn from(commit: (u64, Commit)) -> JustificationOrCommit { - JustificationOrCommit::Commit(commit) - } + fn from(commit: (u64, Commit)) -> JustificationOrCommit { + JustificationOrCommit::Commit(commit) + } } impl From> for JustificationOrCommit { - fn from(justification: GrandpaJustification) -> JustificationOrCommit { - JustificationOrCommit::Justification(justification) - } + fn from(justification: GrandpaJustification) -> JustificationOrCommit { + JustificationOrCommit::Justification(justification) + } } /// Finalize the given block and apply any authority set changes. If an /// authority set change is enacted then a justification is created (if not /// given) and stored with the block when finalizing it. /// This method assumes that the block being finalized has already been imported. -pub(crate) fn finalize_block, E, RA>( - client: &Client, - authority_set: &SharedAuthoritySet>, - consensus_changes: &SharedConsensusChanges>, - justification_period: Option>, - hash: Block::Hash, - number: NumberFor, - justification_or_commit: JustificationOrCommit, -) -> Result<(), CommandOrError>> where - B: Backend, - E: CallExecutor + Send + Sync, - RA: Send + Sync, +pub(crate) fn finalize_block, E, RA>( + client: &Client, + authority_set: &SharedAuthoritySet>, + consensus_changes: &SharedConsensusChanges>, + justification_period: Option>, + hash: Block::Hash, + number: NumberFor, + justification_or_commit: JustificationOrCommit, +) -> Result<(), CommandOrError>> +where + B: Backend, + E: CallExecutor + Send + Sync, + RA: Send + Sync, { - // lock must be held through writing to DB to avoid race - let mut authority_set = authority_set.inner().write(); - - // FIXME #1483: clone only when changed - let old_authority_set = authority_set.clone(); - // holds the old consensus changes in case it is changed below, needed for - // reverting in case of failure - let mut old_consensus_changes = None; - - let mut consensus_changes = consensus_changes.lock(); - let canon_at_height = |canon_number| { - // "true" because the block is finalized - canonical_at_height(client, (hash, number), true, canon_number) - }; - - let update_res: Result<_, Error> = client.lock_import_and_run(|import_op| { + // lock must be held through writing to DB to avoid race + let mut authority_set = authority_set.inner().write(); + + // FIXME #1483: clone only when changed + let old_authority_set = authority_set.clone(); + // holds the old consensus changes in case it is changed below, needed for + // reverting in case of failure + let mut old_consensus_changes = None; + + let mut consensus_changes = consensus_changes.lock(); + let canon_at_height = |canon_number| { + // "true" because the block is finalized + canonical_at_height(client, (hash, number), true, canon_number) + }; + + let update_res: Result<_, Error> = client.lock_import_and_run(|import_op| { let status = authority_set.apply_standard_changes( hash, number, @@ -517,70 +568,71 @@ pub(crate) fn finalize_block, E, RA>( Ok(new_authorities.map(VoterCommand::ChangeAuthorities)) }); - match update_res { - Ok(Some(command)) => Err(CommandOrError::VoterCommand(command)), - Ok(None) => Ok(()), - Err(e) => { - *authority_set = old_authority_set; + match update_res { + Ok(Some(command)) => Err(CommandOrError::VoterCommand(command)), + Ok(None) => Ok(()), + Err(e) => { + *authority_set = old_authority_set; - if let Some(old_consensus_changes) = old_consensus_changes { - *consensus_changes = old_consensus_changes; - } + if let Some(old_consensus_changes) = old_consensus_changes { + *consensus_changes = old_consensus_changes; + } - Err(CommandOrError::Error(e)) - } - } + Err(CommandOrError::Error(e)) + } + } } /// Using the given base get the block at the given height on this chain. The /// target block must be an ancestor of base, therefore `height <= base.height`. -pub(crate) fn canonical_at_height, RA>( - client: &Client, - base: (Block::Hash, NumberFor), - base_is_canonical: bool, - height: NumberFor, -) -> Result, ClientError> where - B: Backend, - E: CallExecutor + Send + Sync, +pub(crate) fn canonical_at_height, RA>( + client: &Client, + base: (Block::Hash, NumberFor), + base_is_canonical: bool, + height: NumberFor, +) -> Result, ClientError> +where + B: Backend, + E: CallExecutor + Send + Sync, { - use runtime_primitives::traits::{One, Zero, BlockNumberToHash}; - - if height > base.1 { - return Ok(None); - } - - if height == base.1 { - if base_is_canonical { - return Ok(Some(base.0)); - } else { - return Ok(client.block_number_to_hash(height)); - } - } else if base_is_canonical { - return Ok(client.block_number_to_hash(height)); - } - - let one = NumberFor::::one(); - - // start by getting _canonical_ block with number at parent position and then iterating - // backwards by hash. - let mut current = match client.header(&BlockId::Number(base.1 - one))? { - Some(header) => header, - _ => return Ok(None), - }; - - // we've already checked that base > height above. - let mut steps = base.1 - height - one; - - while steps > NumberFor::::zero() { - current = match client.header(&BlockId::Hash(*current.parent_hash()))? { - Some(header) => header, - _ => return Ok(None), - }; - - steps -= one; - } - - Ok(Some(current.hash())) + use runtime_primitives::traits::{BlockNumberToHash, One, Zero}; + + if height > base.1 { + return Ok(None); + } + + if height == base.1 { + if base_is_canonical { + return Ok(Some(base.0)); + } else { + return Ok(client.block_number_to_hash(height)); + } + } else if base_is_canonical { + return Ok(client.block_number_to_hash(height)); + } + + let one = NumberFor::::one(); + + // start by getting _canonical_ block with number at parent position and then iterating + // backwards by hash. + let mut current = match client.header(&BlockId::Number(base.1 - one))? { + Some(header) => header, + _ => return Ok(None), + }; + + // we've already checked that base > height above. + let mut steps = base.1 - height - one; + + while steps > NumberFor::::zero() { + current = match client.header(&BlockId::Hash(*current.parent_hash()))? { + Some(header) => header, + _ => return Ok(None), + }; + + steps -= one; + } + + Ok(Some(current.hash())) } /// Returns a function for checking block ancestry, the returned function will @@ -589,34 +641,39 @@ pub(crate) fn canonical_at_height, RA>( /// represent the current block `hash` and its `parent hash`, if given the /// function that's returned will assume that `hash` isn't part of the local DB /// yet, and all searches in the DB will instead reference the parent. -pub fn is_descendent_of<'a, B, E, Block: BlockT, RA>( - client: &'a Client, - current: Option<(&'a H256, &'a H256)>, +pub fn is_descendent_of<'a, B, E, Block: BlockT, RA>( + client: &'a Client, + current: Option<(&'a H256, &'a H256)>, ) -> impl Fn(&H256, &H256) -> Result + 'a -where B: Backend, - E: CallExecutor + Send + Sync, +where + B: Backend, + E: CallExecutor + Send + Sync, { - move |base, hash| { - if base == hash { return Ok(false); } - - let mut hash = hash; - if let Some((current_hash, current_parent_hash)) = current { - if base == current_hash { return Ok(false); } - if hash == current_hash { - if base == current_parent_hash { - return Ok(true); - } else { - hash = current_parent_hash; - } - } - } - - let tree_route = client::blockchain::tree_route( - client.backend().blockchain(), - BlockId::Hash(*hash), - BlockId::Hash(*base), - )?; - - Ok(tree_route.common_block().hash == *base) - } + move |base, hash| { + if base == hash { + return Ok(false); + } + + let mut hash = hash; + if let Some((current_hash, current_parent_hash)) = current { + if base == current_hash { + return Ok(false); + } + if hash == current_hash { + if base == current_parent_hash { + return Ok(true); + } else { + hash = current_parent_hash; + } + } + } + + let tree_route = client::blockchain::tree_route( + client.backend().blockchain(), + BlockId::Hash(*hash), + BlockId::Hash(*base), + )?; + + Ok(tree_route.common_block().hash == *base) + } } diff --git a/core/finality-grandpa/src/finality_proof.rs b/core/finality-grandpa/src/finality_proof.rs index 2b34a094a0..aa02b0af25 100644 --- a/core/finality-grandpa/src/finality_proof.rs +++ b/core/finality-grandpa/src/finality_proof.rs @@ -32,18 +32,16 @@ use grandpa::VoterSet; use client::{ - blockchain::Backend as BlockchainBackend, - error::{Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult}, - light::fetcher::RemoteCallRequest, + blockchain::Backend as BlockchainBackend, + error::{Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult}, + light::fetcher::RemoteCallRequest, }; -use parity_codec::{Encode, Decode}; +use ed25519::Public as AuthorityId; use grandpa::BlockNumberOps; +use parity_codec::{Decode, Encode}; use runtime_primitives::generic::BlockId; -use runtime_primitives::traits::{ - NumberFor, Block as BlockT, Header as HeaderT, One, -}; +use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, One}; use substrate_primitives::{ed25519, H256}; -use ed25519::Public as AuthorityId; use substrate_telemetry::{telemetry, CONSENSUS_INFO}; use crate::justification::GrandpaJustification; @@ -53,67 +51,68 @@ use crate::justification::GrandpaJustification; /// The proof is the serialized `FinalityProof` constructed using earliest known /// justification of the block. None is returned if there's no known justification atm. pub fn prove_finality( - blockchain: &B, - generate_execution_proof: G, - block: Block::Hash, + blockchain: &B, + generate_execution_proof: G, + block: Block::Hash, ) -> ::client::error::Result>> - where - B: BlockchainBackend, - G: Fn(&BlockId, &str, &[u8]) -> ClientResult>>, +where + B: BlockchainBackend, + G: Fn(&BlockId, &str, &[u8]) -> ClientResult>>, { - let block_id = BlockId::Hash(block); - let mut block_number = blockchain.expect_block_number_from_id(&block_id)?; - - // early-return if we sure that the block isn't finalized yet - let info = blockchain.info()?; - if info.finalized_number < block_number { - return Ok(None); - } - - // early-return if we sure that the block is NOT a part of canonical chain - let canonical_block = blockchain.expect_block_hash_from_id(&BlockId::Number(block_number))?; - if block != canonical_block { - return Err(ClientErrorKind::Backend( - "Cannot generate finality proof for non-canonical block".into() - ).into()); - } - - // now that we know that the block is finalized, we can generate finalization proof - - // we need to prove grandpa authorities set that has generated justification - // BUT since `GrandpaApi::grandpa_authorities` call returns the set that becames actual - // at the next block, the proof-of execution is generated using parent block' state - // (this will fail if we're trying to prove genesis finality, but such the call itself is redundant) - let mut current_header = blockchain.expect_header(BlockId::Hash(block))?; - let parent_block_id = BlockId::Hash(*current_header.parent_hash()); - let authorities_proof = generate_execution_proof( - &parent_block_id, - "GrandpaApi_grandpa_authorities", - &[], - )?; - - // search for earliest post-block (inclusive) justification - let mut finalization_path = Vec::new(); - loop { - finalization_path.push(current_header); - - match blockchain.justification(BlockId::Number(block_number))? { - Some(justification) => return Ok(Some(FinalityProof { - finalization_path, - justification, - authorities_proof, - }.encode())), - None if block_number == info.finalized_number => break, - None => { - block_number = block_number + One::one(); - current_header = blockchain.expect_header(BlockId::Number(block_number))?; - }, - } - } - - Err(ClientErrorKind::Backend( - "cannot find justification for finalized block".into() - ).into()) + let block_id = BlockId::Hash(block); + let mut block_number = blockchain.expect_block_number_from_id(&block_id)?; + + // early-return if we sure that the block isn't finalized yet + let info = blockchain.info()?; + if info.finalized_number < block_number { + return Ok(None); + } + + // early-return if we sure that the block is NOT a part of canonical chain + let canonical_block = blockchain.expect_block_hash_from_id(&BlockId::Number(block_number))?; + if block != canonical_block { + return Err(ClientErrorKind::Backend( + "Cannot generate finality proof for non-canonical block".into(), + ) + .into()); + } + + // now that we know that the block is finalized, we can generate finalization proof + + // we need to prove grandpa authorities set that has generated justification + // BUT since `GrandpaApi::grandpa_authorities` call returns the set that becames actual + // at the next block, the proof-of execution is generated using parent block' state + // (this will fail if we're trying to prove genesis finality, but such the call itself is redundant) + let mut current_header = blockchain.expect_header(BlockId::Hash(block))?; + let parent_block_id = BlockId::Hash(*current_header.parent_hash()); + let authorities_proof = + generate_execution_proof(&parent_block_id, "GrandpaApi_grandpa_authorities", &[])?; + + // search for earliest post-block (inclusive) justification + let mut finalization_path = Vec::new(); + loop { + finalization_path.push(current_header); + + match blockchain.justification(BlockId::Number(block_number))? { + Some(justification) => { + return Ok(Some( + FinalityProof { + finalization_path, + justification, + authorities_proof, + } + .encode(), + )); + } + None if block_number == info.finalized_number => break, + None => { + block_number = block_number + One::one(); + current_header = blockchain.expect_header(BlockId::Number(block_number))?; + } + } + } + + Err(ClientErrorKind::Backend("cannot find justification for finalized block".into()).into()) } /// Check proof-of-finality for the given block. @@ -121,85 +120,98 @@ pub fn prove_finality( /// Returns the vector of headers (including `block` header, ordered by ASC block number) that MUST be /// validated + imported at once (i.e. within single db transaction). If at least one of those headers /// is invalid, all other MUST be considered invalid. -pub fn check_finality_proof, C>( - check_execution_proof: C, - parent_header: Block::Header, - block: (NumberFor, Block::Hash), - set_id: u64, - remote_proof: Vec, +pub fn check_finality_proof, C>( + check_execution_proof: C, + parent_header: Block::Header, + block: (NumberFor, Block::Hash), + set_id: u64, + remote_proof: Vec, ) -> ClientResult> - where - NumberFor: grandpa::BlockNumberOps, - C: Fn(&RemoteCallRequest) -> ClientResult>, +where + NumberFor: grandpa::BlockNumberOps, + C: Fn(&RemoteCallRequest) -> ClientResult>, { - do_check_finality_proof::>( - check_execution_proof, - parent_header, - block, - set_id, - remote_proof, - ) + do_check_finality_proof::>( + check_execution_proof, + parent_header, + block, + set_id, + remote_proof, + ) } /// Check proof-of-finality using given justification type. -fn do_check_finality_proof, C, J>( - check_execution_proof: C, - parent_header: Block::Header, - block: (NumberFor, Block::Hash), - set_id: u64, - remote_proof: Vec, +fn do_check_finality_proof, C, J>( + check_execution_proof: C, + parent_header: Block::Header, + block: (NumberFor, Block::Hash), + set_id: u64, + remote_proof: Vec, ) -> ClientResult> - where - NumberFor: grandpa::BlockNumberOps, - C: Fn(&RemoteCallRequest) -> ClientResult>, - J: ProvableJustification, +where + NumberFor: grandpa::BlockNumberOps, + C: Fn(&RemoteCallRequest) -> ClientResult>, + J: ProvableJustification, { - // decode finality proof - let proof = FinalityProof::::decode(&mut &remote_proof[..]) - .ok_or_else(|| ClientErrorKind::BadJustification("failed to decode finality proof".into()))?; - - // check that the first header in finalization path is the block itself - { - let finalized_header = proof.finalization_path.first() - .ok_or_else(|| ClientError::from(ClientErrorKind::BadJustification( - "finality proof: finalized path is empty".into() - )))?; - if *finalized_header.number() != block.0 || finalized_header.hash() != block.1 { - return Err(ClientErrorKind::BadJustification( - "finality proof: block is not a part of finalized path".into() - ).into()); - } - } - - // check that the last header in finalization path is the justification target block - let just_block = proof.justification.target_block(); - { - let finalized_header = proof.finalization_path.last() - .expect("checked above that proof.finalization_path is not empty; qed"); - if *finalized_header.number() != just_block.0 || finalized_header.hash() != just_block.1 { - return Err(ClientErrorKind::BadJustification( - "finality proof: target justification block is not a part of finalized path".into() - ).into()); - } - } - - // check authorities set proof && get grandpa authorities that should have signed justification - let grandpa_authorities = check_execution_proof(&RemoteCallRequest { - block: just_block.1, - header: parent_header, - method: "GrandpaApi_grandpa_authorities".into(), - call_data: vec![], - retry_count: None, - })?; - let grandpa_authorities: Vec<(AuthorityId, u64)> = Decode::decode(&mut &grandpa_authorities[..]) - .ok_or_else(|| ClientErrorKind::BadJustification("failed to decode GRANDPA authorities set proof".into()))?; - - // and now check justification - proof.justification.verify(set_id, &grandpa_authorities.into_iter().collect())?; - - telemetry!(CONSENSUS_INFO; "afg.finality_proof_ok"; + // decode finality proof + let proof = + FinalityProof::::decode(&mut &remote_proof[..]).ok_or_else(|| { + ClientErrorKind::BadJustification("failed to decode finality proof".into()) + })?; + + // check that the first header in finalization path is the block itself + { + let finalized_header = proof.finalization_path.first().ok_or_else(|| { + ClientError::from(ClientErrorKind::BadJustification( + "finality proof: finalized path is empty".into(), + )) + })?; + if *finalized_header.number() != block.0 || finalized_header.hash() != block.1 { + return Err(ClientErrorKind::BadJustification( + "finality proof: block is not a part of finalized path".into(), + ) + .into()); + } + } + + // check that the last header in finalization path is the justification target block + let just_block = proof.justification.target_block(); + { + let finalized_header = proof + .finalization_path + .last() + .expect("checked above that proof.finalization_path is not empty; qed"); + if *finalized_header.number() != just_block.0 || finalized_header.hash() != just_block.1 { + return Err(ClientErrorKind::BadJustification( + "finality proof: target justification block is not a part of finalized path".into(), + ) + .into()); + } + } + + // check authorities set proof && get grandpa authorities that should have signed justification + let grandpa_authorities = check_execution_proof(&RemoteCallRequest { + block: just_block.1, + header: parent_header, + method: "GrandpaApi_grandpa_authorities".into(), + call_data: vec![], + retry_count: None, + })?; + let grandpa_authorities: Vec<(AuthorityId, u64)> = + Decode::decode(&mut &grandpa_authorities[..]).ok_or_else(|| { + ClientErrorKind::BadJustification( + "failed to decode GRANDPA authorities set proof".into(), + ) + })?; + + // and now check justification + proof + .justification + .verify(set_id, &grandpa_authorities.into_iter().collect())?; + + telemetry!(CONSENSUS_INFO; "afg.finality_proof_ok"; "set_id" => ?set_id, "finalized_header_hash" => ?block.1); - Ok(proof.finalization_path) + Ok(proof.finalization_path) } /// Proof of finality. @@ -210,223 +222,356 @@ fn do_check_finality_proof, C, J>( /// 3) valid (with respect to proved authorities) GRANDPA justification of the block F. #[derive(Debug, PartialEq, Encode, Decode)] struct FinalityProof { - /// Headers-path (ordered by block number, ascending) from the block we're gathering proof for - /// (inclusive) to the target block of the justification (inclusive). - pub finalization_path: Vec

, - /// Justification (finalization) of the last block from the `finalization_path`. - pub justification: Justification, - /// Proof of `GrandpaApi::grandpa_authorities` call execution at the - /// justification' target block. - pub authorities_proof: Vec>, + /// Headers-path (ordered by block number, ascending) from the block we're gathering proof for + /// (inclusive) to the target block of the justification (inclusive). + pub finalization_path: Vec
, + /// Justification (finalization) of the last block from the `finalization_path`. + pub justification: Justification, + /// Proof of `GrandpaApi::grandpa_authorities` call execution at the + /// justification' target block. + pub authorities_proof: Vec>, } /// Justification used to prove block finality. trait ProvableJustification: Encode + Decode { - /// Get target block of this justification. - fn target_block(&self) -> (Header::Number, Header::Hash); + /// Get target block of this justification. + fn target_block(&self) -> (Header::Number, Header::Hash); - /// Verify justification with respect to authorities set and authorities set id. - fn verify(&self, set_id: u64, authorities: &VoterSet) -> ClientResult<()>; + /// Verify justification with respect to authorities set and authorities set id. + fn verify(&self, set_id: u64, authorities: &VoterSet) -> ClientResult<()>; } -impl> ProvableJustification for GrandpaJustification - where - NumberFor: BlockNumberOps, +impl> ProvableJustification + for GrandpaJustification +where + NumberFor: BlockNumberOps, { - fn target_block(&self) -> (NumberFor, Block::Hash) { - (self.commit.target_number, self.commit.target_hash) - } + fn target_block(&self) -> (NumberFor, Block::Hash) { + (self.commit.target_number, self.commit.target_hash) + } - fn verify(&self, set_id: u64, authorities: &VoterSet) -> ClientResult<()> { - GrandpaJustification::verify(self, set_id, authorities) - } + fn verify(&self, set_id: u64, authorities: &VoterSet) -> ClientResult<()> { + GrandpaJustification::verify(self, set_id, authorities) + } } #[cfg(test)] mod tests { - use test_client::runtime::{Block, Header}; - use test_client::client::backend::NewBlockState; - use test_client::client::in_mem::Blockchain as InMemoryBlockchain; - use super::*; - - type FinalityProof = super::FinalityProof>; - - #[derive(Encode, Decode)] - struct ValidFinalityProof(Vec); - - impl ProvableJustification
for ValidFinalityProof { - fn target_block(&self) -> (u64, H256) { (3, header(3).hash()) } - - fn verify(&self, set_id: u64, authorities: &VoterSet) -> ClientResult<()> { - assert_eq!(set_id, 1); - assert_eq!(authorities, &vec![ - (AuthorityId([1u8; 32]), 1), - (AuthorityId([2u8; 32]), 2), - (AuthorityId([3u8; 32]), 3), - ].into_iter().collect()); - Ok(()) - } - } - - fn header(number: u64) -> Header { - let parent_hash = match number { - 0 => Default::default(), - _ => header(number - 1).hash(), - }; - Header::new(number, H256::from_low_u64_be(0), H256::from_low_u64_be(0), parent_hash, Default::default()) - } - - fn side_header(number: u64) -> Header { - Header::new(number, H256::from_low_u64_be(0), H256::from_low_u64_be(1), header(number - 1).hash(), Default::default()) - } - - fn test_blockchain() -> InMemoryBlockchain { - let blockchain = InMemoryBlockchain::::new(); - blockchain.insert(header(0).hash(), header(0), Some(vec![0]), None, NewBlockState::Final).unwrap(); - blockchain.insert(header(1).hash(), header(1), Some(vec![1]), None, NewBlockState::Final).unwrap(); - blockchain.insert(header(2).hash(), header(2), None, None, NewBlockState::Best).unwrap(); - blockchain.insert(header(3).hash(), header(3), Some(vec![3]), None, NewBlockState::Final).unwrap(); - blockchain - } - - #[test] - fn finality_proof_is_not_generated_for_non_final_block() { - let blockchain = test_blockchain(); - blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Best).unwrap(); - - // when asking for finality of block 4, None is returned - let proof_of_4 = prove_finality(&blockchain, |_, _, _| Ok(vec![vec![42]]), header(4).hash()) - .unwrap(); - assert_eq!(proof_of_4, None); - } - - #[test] - fn finality_proof_fails_for_non_canonical_block() { - let blockchain = test_blockchain(); - blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Best).unwrap(); - blockchain.insert(side_header(4).hash(), side_header(4), None, None, NewBlockState::Best).unwrap(); - blockchain.insert(header(5).hash(), header(5), Some(vec![5]), None, NewBlockState::Final).unwrap(); - - // when asking for finality of side-block 42, None is returned - let proof_of_side_4_fails = prove_finality(&blockchain, |_, _, _| Ok(vec![vec![42]]), H256::from_low_u64_be(42)).is_err(); - assert_eq!(proof_of_side_4_fails, true); - } - - #[test] - fn finality_proof_fails_if_no_justification_known() { - let blockchain = test_blockchain(); - blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Final).unwrap(); - - // when asking for finality of block 4, search for justification failing - let proof_of_4_fails = prove_finality(&blockchain, |_, _, _| Ok(vec![vec![42]]), H256::from_low_u64_be(42)).is_err(); - assert_eq!(proof_of_4_fails, true); - } - - #[test] - fn prove_finality_is_generated() { - let blockchain = test_blockchain(); - - // when asking for finality of block 2, justification of 3 is returned - let proof_of_2: FinalityProof = prove_finality(&blockchain, |_, _, _| Ok(vec![vec![42]]), header(2).hash()) - .unwrap().and_then(|p| Decode::decode(&mut &p[..])).unwrap(); - assert_eq!(proof_of_2, FinalityProof { - finalization_path: vec![header(2), header(3)], - justification: vec![3], - authorities_proof: vec![vec![42]], - }); - - // when asking for finality of block 3, justification of 3 is returned - let proof_of_3: FinalityProof = prove_finality(&blockchain, |_, _, _| Ok(vec![vec![42]]), header(3).hash()) - .unwrap().and_then(|p| Decode::decode(&mut &p[..])).unwrap(); - assert_eq!(proof_of_3, FinalityProof { - finalization_path: vec![header(3)], - justification: vec![3], - authorities_proof: vec![vec![42]], - }); - } - - #[test] - fn finality_proof_check_fails_when_block_is_not_included() { - let mut proof_of_2: FinalityProof = prove_finality( - &test_blockchain(), - |_, _, _| Ok(vec![vec![42]]), - header(2).hash(), - ).unwrap().and_then(|p| Decode::decode(&mut &p[..])).unwrap(); - proof_of_2.finalization_path.remove(0); - - // block for which we're trying to request finality proof is missing from finalization_path - assert_eq!(do_check_finality_proof::( - |_| Ok(Vec::::new().encode()), - header(1), - (2, header(2).hash()), - 1, - proof_of_2.encode(), - ).is_err(), true); - } - - #[test] - fn finality_proof_check_fails_when_justified_block_is_not_included() { - let mut proof_of_2: FinalityProof = prove_finality( - &test_blockchain(), - |_, _, _| Ok(vec![vec![42]]), - header(2).hash(), - ).unwrap().and_then(|p| Decode::decode(&mut &p[..])).unwrap(); - proof_of_2.finalization_path.remove(1); - - // justified block is missing from finalization_path - assert_eq!(do_check_finality_proof::( - |_| Ok(Vec::::new().encode()), - header(1), - (2, header(2).hash()), - 1, - proof_of_2.encode(), - ).is_err(), true); - } - - #[test] - fn finality_proof_check_fails_when_justification_verification_fails() { - #[derive(Encode, Decode)] - struct InvalidFinalityProof(Vec); - - impl ProvableJustification
for InvalidFinalityProof { - fn target_block(&self) -> (u64, H256) { (3, header(3).hash()) } - - fn verify(&self, _set_id: u64, _authorities: &VoterSet) -> ClientResult<()> { - Err(ClientErrorKind::Backend("test error".into()).into()) - } - } - - let mut proof_of_2: FinalityProof = prove_finality( - &test_blockchain(), - |_, _, _| Ok(vec![vec![42]]), - header(2).hash(), - ).unwrap().and_then(|p| Decode::decode(&mut &p[..])).unwrap(); - proof_of_2.finalization_path.remove(1); - - // justification is not valid - assert_eq!(do_check_finality_proof::( - |_| Ok(Vec::::new().encode()), - header(1), - (2, header(2).hash()), - 1, - proof_of_2.encode(), - ).is_err(), true); - } - - #[test] - fn finality_proof_check_works() { - let proof_of_2 = prove_finality(&test_blockchain(), |_, _, _| Ok(vec![vec![42]]), header(2).hash()) - .unwrap().unwrap(); - assert_eq!(do_check_finality_proof::( - |_| Ok(vec![ - (AuthorityId([1u8; 32]), 1u64), - (AuthorityId([2u8; 32]), 2u64), - (AuthorityId([3u8; 32]), 3u64), - ].encode()), - header(1), - (2, header(2).hash()), - 1, - proof_of_2, - ).unwrap(), vec![header(2), header(3)]); - } + use super::*; + use test_client::client::backend::NewBlockState; + use test_client::client::in_mem::Blockchain as InMemoryBlockchain; + use test_client::runtime::{Block, Header}; + + type FinalityProof = super::FinalityProof>; + + #[derive(Encode, Decode)] + struct ValidFinalityProof(Vec); + + impl ProvableJustification
for ValidFinalityProof { + fn target_block(&self) -> (u64, H256) { + (3, header(3).hash()) + } + + fn verify(&self, set_id: u64, authorities: &VoterSet) -> ClientResult<()> { + assert_eq!(set_id, 1); + assert_eq!( + authorities, + &vec![ + (AuthorityId([1u8; 32]), 1), + (AuthorityId([2u8; 32]), 2), + (AuthorityId([3u8; 32]), 3), + ] + .into_iter() + .collect() + ); + Ok(()) + } + } + + fn header(number: u64) -> Header { + let parent_hash = match number { + 0 => Default::default(), + _ => header(number - 1).hash(), + }; + Header::new( + number, + H256::from_low_u64_be(0), + H256::from_low_u64_be(0), + parent_hash, + Default::default(), + ) + } + + fn side_header(number: u64) -> Header { + Header::new( + number, + H256::from_low_u64_be(0), + H256::from_low_u64_be(1), + header(number - 1).hash(), + Default::default(), + ) + } + + fn test_blockchain() -> InMemoryBlockchain { + let blockchain = InMemoryBlockchain::::new(); + blockchain + .insert( + header(0).hash(), + header(0), + Some(vec![0]), + None, + NewBlockState::Final, + ) + .unwrap(); + blockchain + .insert( + header(1).hash(), + header(1), + Some(vec![1]), + None, + NewBlockState::Final, + ) + .unwrap(); + blockchain + .insert(header(2).hash(), header(2), None, None, NewBlockState::Best) + .unwrap(); + blockchain + .insert( + header(3).hash(), + header(3), + Some(vec![3]), + None, + NewBlockState::Final, + ) + .unwrap(); + blockchain + } + + #[test] + fn finality_proof_is_not_generated_for_non_final_block() { + let blockchain = test_blockchain(); + blockchain + .insert(header(4).hash(), header(4), None, None, NewBlockState::Best) + .unwrap(); + + // when asking for finality of block 4, None is returned + let proof_of_4 = + prove_finality(&blockchain, |_, _, _| Ok(vec![vec![42]]), header(4).hash()).unwrap(); + assert_eq!(proof_of_4, None); + } + + #[test] + fn finality_proof_fails_for_non_canonical_block() { + let blockchain = test_blockchain(); + blockchain + .insert(header(4).hash(), header(4), None, None, NewBlockState::Best) + .unwrap(); + blockchain + .insert( + side_header(4).hash(), + side_header(4), + None, + None, + NewBlockState::Best, + ) + .unwrap(); + blockchain + .insert( + header(5).hash(), + header(5), + Some(vec![5]), + None, + NewBlockState::Final, + ) + .unwrap(); + + // when asking for finality of side-block 42, None is returned + let proof_of_side_4_fails = prove_finality( + &blockchain, + |_, _, _| Ok(vec![vec![42]]), + H256::from_low_u64_be(42), + ) + .is_err(); + assert_eq!(proof_of_side_4_fails, true); + } + + #[test] + fn finality_proof_fails_if_no_justification_known() { + let blockchain = test_blockchain(); + blockchain + .insert( + header(4).hash(), + header(4), + None, + None, + NewBlockState::Final, + ) + .unwrap(); + + // when asking for finality of block 4, search for justification failing + let proof_of_4_fails = prove_finality( + &blockchain, + |_, _, _| Ok(vec![vec![42]]), + H256::from_low_u64_be(42), + ) + .is_err(); + assert_eq!(proof_of_4_fails, true); + } + + #[test] + fn prove_finality_is_generated() { + let blockchain = test_blockchain(); + + // when asking for finality of block 2, justification of 3 is returned + let proof_of_2: FinalityProof = + prove_finality(&blockchain, |_, _, _| Ok(vec![vec![42]]), header(2).hash()) + .unwrap() + .and_then(|p| Decode::decode(&mut &p[..])) + .unwrap(); + assert_eq!( + proof_of_2, + FinalityProof { + finalization_path: vec![header(2), header(3)], + justification: vec![3], + authorities_proof: vec![vec![42]], + } + ); + + // when asking for finality of block 3, justification of 3 is returned + let proof_of_3: FinalityProof = + prove_finality(&blockchain, |_, _, _| Ok(vec![vec![42]]), header(3).hash()) + .unwrap() + .and_then(|p| Decode::decode(&mut &p[..])) + .unwrap(); + assert_eq!( + proof_of_3, + FinalityProof { + finalization_path: vec![header(3)], + justification: vec![3], + authorities_proof: vec![vec![42]], + } + ); + } + + #[test] + fn finality_proof_check_fails_when_block_is_not_included() { + let mut proof_of_2: FinalityProof = prove_finality( + &test_blockchain(), + |_, _, _| Ok(vec![vec![42]]), + header(2).hash(), + ) + .unwrap() + .and_then(|p| Decode::decode(&mut &p[..])) + .unwrap(); + proof_of_2.finalization_path.remove(0); + + // block for which we're trying to request finality proof is missing from finalization_path + assert_eq!( + do_check_finality_proof::( + |_| Ok(Vec::::new().encode()), + header(1), + (2, header(2).hash()), + 1, + proof_of_2.encode(), + ) + .is_err(), + true + ); + } + + #[test] + fn finality_proof_check_fails_when_justified_block_is_not_included() { + let mut proof_of_2: FinalityProof = prove_finality( + &test_blockchain(), + |_, _, _| Ok(vec![vec![42]]), + header(2).hash(), + ) + .unwrap() + .and_then(|p| Decode::decode(&mut &p[..])) + .unwrap(); + proof_of_2.finalization_path.remove(1); + + // justified block is missing from finalization_path + assert_eq!( + do_check_finality_proof::( + |_| Ok(Vec::::new().encode()), + header(1), + (2, header(2).hash()), + 1, + proof_of_2.encode(), + ) + .is_err(), + true + ); + } + + #[test] + fn finality_proof_check_fails_when_justification_verification_fails() { + #[derive(Encode, Decode)] + struct InvalidFinalityProof(Vec); + + impl ProvableJustification
for InvalidFinalityProof { + fn target_block(&self) -> (u64, H256) { + (3, header(3).hash()) + } + + fn verify( + &self, + _set_id: u64, + _authorities: &VoterSet, + ) -> ClientResult<()> { + Err(ClientErrorKind::Backend("test error".into()).into()) + } + } + + let mut proof_of_2: FinalityProof = prove_finality( + &test_blockchain(), + |_, _, _| Ok(vec![vec![42]]), + header(2).hash(), + ) + .unwrap() + .and_then(|p| Decode::decode(&mut &p[..])) + .unwrap(); + proof_of_2.finalization_path.remove(1); + + // justification is not valid + assert_eq!( + do_check_finality_proof::( + |_| Ok(Vec::::new().encode()), + header(1), + (2, header(2).hash()), + 1, + proof_of_2.encode(), + ) + .is_err(), + true + ); + } + + #[test] + fn finality_proof_check_works() { + let proof_of_2 = prove_finality( + &test_blockchain(), + |_, _, _| Ok(vec![vec![42]]), + header(2).hash(), + ) + .unwrap() + .unwrap(); + assert_eq!( + do_check_finality_proof::( + |_| Ok(vec![ + (AuthorityId([1u8; 32]), 1u64), + (AuthorityId([2u8; 32]), 2u64), + (AuthorityId([3u8; 32]), 3u64), + ] + .encode()), + header(1), + (2, header(2).hash()), + 1, + proof_of_2, + ) + .unwrap(), + vec![header(2), header(3)] + ); + } } diff --git a/core/finality-grandpa/src/import.rs b/core/finality-grandpa/src/import.rs index 5fa20fc2f6..37e25c8e62 100644 --- a/core/finality-grandpa/src/import.rs +++ b/core/finality-grandpa/src/import.rs @@ -14,35 +14,35 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::{sync::Arc, collections::HashMap}; +use std::{collections::HashMap, sync::Arc}; -use log::{debug, trace, info}; -use parity_codec::Encode; use futures::sync::mpsc; +use log::{debug, info, trace}; +use parity_codec::Encode; use parking_lot::RwLockWriteGuard; -use client::{blockchain, CallExecutor, Client}; -use client::blockchain::HeaderBackend; use client::backend::Backend; +use client::blockchain::HeaderBackend; use client::runtime_api::ApiExt; +use client::{blockchain, CallExecutor, Client}; use consensus_common::{ - BlockImport, Error as ConsensusError, ErrorKind as ConsensusErrorKind, - ImportBlock, ImportResult, JustificationImport, well_known_cache_keys, + well_known_cache_keys, BlockImport, Error as ConsensusError, ErrorKind as ConsensusErrorKind, + ImportBlock, ImportResult, JustificationImport, }; use fg_primitives::GrandpaApi; -use runtime_primitives::Justification; use runtime_primitives::generic::BlockId; use runtime_primitives::traits::{ - Block as BlockT, DigestFor, DigestItemFor, DigestItem, - Header as HeaderT, NumberFor, ProvideRuntimeApi, + Block as BlockT, DigestFor, DigestItem, DigestItemFor, Header as HeaderT, NumberFor, + ProvideRuntimeApi, }; -use substrate_primitives::{H256, ed25519, Blake2Hasher}; +use runtime_primitives::Justification; +use substrate_primitives::{ed25519, Blake2Hasher, H256}; -use crate::{Error, CommandOrError, NewAuthoritySet, VoterCommand}; -use crate::authorities::{AuthoritySet, SharedAuthoritySet, DelayKind, PendingChange}; +use crate::authorities::{AuthoritySet, DelayKind, PendingChange, SharedAuthoritySet}; use crate::consensus_changes::SharedConsensusChanges; use crate::environment::{finalize_block, is_descendent_of}; use crate::justification::GrandpaJustification; +use crate::{CommandOrError, Error, NewAuthoritySet, VoterCommand}; use ed25519::Public as AuthorityId; @@ -55,542 +55,578 @@ use ed25519::Public as AuthorityId; /// /// When using GRANDPA, the block import worker should be using this block import /// object. -pub struct GrandpaBlockImport, RA, PRA> { - inner: Arc>, - authority_set: SharedAuthoritySet>, - send_voter_commands: mpsc::UnboundedSender>>, - consensus_changes: SharedConsensusChanges>, - api: Arc, +pub struct GrandpaBlockImport, RA, PRA> { + inner: Arc>, + authority_set: SharedAuthoritySet>, + send_voter_commands: mpsc::UnboundedSender>>, + consensus_changes: SharedConsensusChanges>, + api: Arc, } -impl, RA, PRA> JustificationImport - for GrandpaBlockImport where - NumberFor: grandpa::BlockNumberOps, - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, - DigestFor: Encode, - DigestItemFor: DigestItem, - RA: Send + Sync, - PRA: ProvideRuntimeApi, - PRA::Api: GrandpaApi, +impl, RA, PRA> JustificationImport + for GrandpaBlockImport +where + NumberFor: grandpa::BlockNumberOps, + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + DigestFor: Encode, + DigestItemFor: DigestItem, + RA: Send + Sync, + PRA: ProvideRuntimeApi, + PRA::Api: GrandpaApi, { - type Error = ConsensusError; - - fn on_start(&self, link: &::consensus_common::import_queue::Link) { - let chain_info = match self.inner.info() { - Ok(info) => info.chain, - _ => return, - }; - - // request justifications for all pending changes for which change blocks have already been imported - let authorities = self.authority_set.inner().read(); - for pending_change in authorities.pending_changes() { - if pending_change.delay_kind == DelayKind::Finalized && - pending_change.effective_number() > chain_info.finalized_number && - pending_change.effective_number() <= chain_info.best_number - { - let effective_block_hash = self.inner.best_containing( - pending_change.canon_hash, - Some(pending_change.effective_number()), - ); - - if let Ok(Some(hash)) = effective_block_hash { - if let Ok(Some(header)) = self.inner.header(&BlockId::Hash(hash)) { - if *header.number() == pending_change.effective_number() { - link.request_justification(&header.hash(), *header.number()); - } - } - } - } - } - } - - fn import_justification( - &self, - hash: Block::Hash, - number: NumberFor, - justification: Justification, - ) -> Result<(), Self::Error> { - self.import_justification(hash, number, justification, false) - } + type Error = ConsensusError; + + fn on_start(&self, link: &::consensus_common::import_queue::Link) { + let chain_info = match self.inner.info() { + Ok(info) => info.chain, + _ => return, + }; + + // request justifications for all pending changes for which change blocks have already been imported + let authorities = self.authority_set.inner().read(); + for pending_change in authorities.pending_changes() { + if pending_change.delay_kind == DelayKind::Finalized + && pending_change.effective_number() > chain_info.finalized_number + && pending_change.effective_number() <= chain_info.best_number + { + let effective_block_hash = self.inner.best_containing( + pending_change.canon_hash, + Some(pending_change.effective_number()), + ); + + if let Ok(Some(hash)) = effective_block_hash { + if let Ok(Some(header)) = self.inner.header(&BlockId::Hash(hash)) { + if *header.number() == pending_change.effective_number() { + link.request_justification(&header.hash(), *header.number()); + } + } + } + } + } + } + + fn import_justification( + &self, + hash: Block::Hash, + number: NumberFor, + justification: Justification, + ) -> Result<(), Self::Error> { + self.import_justification(hash, number, justification, false) + } } enum AppliedChanges { - Standard(bool), // true if the change is ready to be applied (i.e. it's a root) - Forced(NewAuthoritySet), - None, + Standard(bool), // true if the change is ready to be applied (i.e. it's a root) + Forced(NewAuthoritySet), + None, } impl AppliedChanges { - fn needs_justification(&self) -> bool { - match *self { - AppliedChanges::Standard(_) => true, - AppliedChanges::Forced(_) | AppliedChanges::None => false, - } - } + fn needs_justification(&self) -> bool { + match *self { + AppliedChanges::Standard(_) => true, + AppliedChanges::Forced(_) | AppliedChanges::None => false, + } + } } struct PendingSetChanges<'a, Block: 'a + BlockT> { - just_in_case: Option<( - AuthoritySet>, - RwLockWriteGuard<'a, AuthoritySet>>, - )>, - applied_changes: AppliedChanges>, - do_pause: bool, + just_in_case: Option<( + AuthoritySet>, + RwLockWriteGuard<'a, AuthoritySet>>, + )>, + applied_changes: AppliedChanges>, + do_pause: bool, } impl<'a, Block: 'a + BlockT> PendingSetChanges<'a, Block> { - // revert the pending set change explicitly. - fn revert(self) { } - - fn defuse(mut self) -> (AppliedChanges>, bool) { - self.just_in_case = None; - let applied_changes = ::std::mem::replace(&mut self.applied_changes, AppliedChanges::None); - (applied_changes, self.do_pause) - } + // revert the pending set change explicitly. + fn revert(self) {} + + fn defuse(mut self) -> (AppliedChanges>, bool) { + self.just_in_case = None; + let applied_changes = ::std::mem::replace(&mut self.applied_changes, AppliedChanges::None); + (applied_changes, self.do_pause) + } } impl<'a, Block: 'a + BlockT> Drop for PendingSetChanges<'a, Block> { - fn drop(&mut self) { - if let Some((old_set, mut authorities)) = self.just_in_case.take() { - *authorities = old_set; - } - } + fn drop(&mut self) { + if let Some((old_set, mut authorities)) = self.just_in_case.take() { + *authorities = old_set; + } + } } -impl, RA, PRA> GrandpaBlockImport where - NumberFor: grandpa::BlockNumberOps, - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, - DigestFor: Encode, - DigestItemFor: DigestItem, - RA: Send + Sync, - PRA: ProvideRuntimeApi, - PRA::Api: GrandpaApi, +impl, RA, PRA> GrandpaBlockImport +where + NumberFor: grandpa::BlockNumberOps, + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + DigestFor: Encode, + DigestItemFor: DigestItem, + RA: Send + Sync, + PRA: ProvideRuntimeApi, + PRA::Api: GrandpaApi, { - // check for a new authority set change. - fn check_new_change(&self, header: &Block::Header, hash: Block::Hash) - -> Result>>, ConsensusError> - { - let at = BlockId::hash(*header.parent_hash()); - let digest = header.digest(); - - let api = self.api.runtime_api(); - - // check for forced change. - { - let maybe_change = api.grandpa_forced_change( - &at, - digest, - ); - - match maybe_change { - Err(e) => match api.has_api_with::, _>(&at, |v| v >= 2) { - Err(e) => return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()), - Ok(true) => { - // API version is high enough to support forced changes - // but got error, so it is legitimate. - return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()) - }, - Ok(false) => { - // API version isn't high enough to support forced changes - }, - }, - Ok(None) => {}, - Ok(Some((median_last_finalized, change))) => return Ok(Some(PendingChange { - next_authorities: change.next_authorities, - delay: change.delay, - canon_height: *header.number(), - canon_hash: hash, - delay_kind: DelayKind::Best { median_last_finalized }, - })), - } - } - - // check normal scheduled change. - { - let maybe_change = api.grandpa_pending_change( - &at, - digest, - ); - - match maybe_change { - Err(e) => Err(ConsensusErrorKind::ClientImport(e.to_string()).into()), - Ok(Some(change)) => Ok(Some(PendingChange { - next_authorities: change.next_authorities, - delay: change.delay, - canon_height: *header.number(), - canon_hash: hash, - delay_kind: DelayKind::Finalized, - })), - Ok(None) => Ok(None), - } - } - } - - fn make_authorities_changes<'a>(&'a self, block: &mut ImportBlock, hash: Block::Hash) - -> Result, ConsensusError> - { - // when we update the authorities, we need to hold the lock - // until the block is written to prevent a race if we need to restore - // the old authority set on error or panic. - struct InnerGuard<'a, T: 'a> { - old: Option, - guard: Option>, - } - - impl<'a, T: 'a> InnerGuard<'a, T> { - fn as_mut(&mut self) -> &mut T { - &mut **self.guard.as_mut().expect("only taken on deconstruction; qed") - } - - fn set_old(&mut self, old: T) { - if self.old.is_none() { - // ignore "newer" old changes. - self.old = Some(old); - } - } - - fn consume(mut self) -> Option<(T, RwLockWriteGuard<'a, T>)> { - if let Some(old) = self.old.take() { - Some((old, self.guard.take().expect("only taken on deconstruction; qed"))) - } else { - None - } - } - } - - impl<'a, T: 'a> Drop for InnerGuard<'a, T> { - fn drop(&mut self) { - if let (Some(mut guard), Some(old)) = (self.guard.take(), self.old.take()) { - *guard = old; - } - } - } - - let number = block.header.number().clone(); - let maybe_change = self.check_new_change( - &block.header, - hash, - )?; - - // returns a function for checking whether a block is a descendent of another - // consistent with querying client directly after importing the block. - let parent_hash = *block.header.parent_hash(); - let is_descendent_of = is_descendent_of(&self.inner, Some((&hash, &parent_hash))); - - let mut guard = InnerGuard { - guard: Some(self.authority_set.inner().write()), - old: None, - }; - - // whether to pause the old authority set -- happens after import - // of a forced change block. - let mut do_pause = false; - - // add any pending changes. - if let Some(change) = maybe_change { - let old = guard.as_mut().clone(); - guard.set_old(old); - - if let DelayKind::Best { .. } = change.delay_kind { - do_pause = true; - } - - guard.as_mut().add_pending_change( - change, - &is_descendent_of, - ).map_err(|e| ConsensusError::from(ConsensusErrorKind::ClientImport(e.to_string())))?; - } - - let applied_changes = { - let forced_change_set = guard.as_mut().apply_forced_changes(hash, number, &is_descendent_of) - .map_err(|e| ConsensusErrorKind::ClientImport(e.to_string())) - .map_err(ConsensusError::from)?; - - if let Some((median_last_finalized_number, new_set)) = forced_change_set { - let new_authorities = { - let (set_id, new_authorities) = new_set.current(); - - // we will use the median last finalized number as a hint - // for the canon block the new authority set should start - // with. we use the minimum between the median and the local - // best finalized block. - let best_finalized_number = self.inner.backend().blockchain().info() - .map_err(|e| ConsensusErrorKind::ClientImport(e.to_string()))? - .finalized_number; - - let canon_number = best_finalized_number.min(median_last_finalized_number); - - let canon_hash = + // check for a new authority set change. + fn check_new_change( + &self, + header: &Block::Header, + hash: Block::Hash, + ) -> Result>>, ConsensusError> { + let at = BlockId::hash(*header.parent_hash()); + let digest = header.digest(); + + let api = self.api.runtime_api(); + + // check for forced change. + { + let maybe_change = api.grandpa_forced_change(&at, digest); + + match maybe_change { + Err(e) => match api.has_api_with::, _>(&at, |v| v >= 2) { + Err(e) => return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()), + Ok(true) => { + // API version is high enough to support forced changes + // but got error, so it is legitimate. + return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()); + } + Ok(false) => { + // API version isn't high enough to support forced changes + } + }, + Ok(None) => {} + Ok(Some((median_last_finalized, change))) => { + return Ok(Some(PendingChange { + next_authorities: change.next_authorities, + delay: change.delay, + canon_height: *header.number(), + canon_hash: hash, + delay_kind: DelayKind::Best { + median_last_finalized, + }, + })); + } + } + } + + // check normal scheduled change. + { + let maybe_change = api.grandpa_pending_change(&at, digest); + + match maybe_change { + Err(e) => Err(ConsensusErrorKind::ClientImport(e.to_string()).into()), + Ok(Some(change)) => Ok(Some(PendingChange { + next_authorities: change.next_authorities, + delay: change.delay, + canon_height: *header.number(), + canon_hash: hash, + delay_kind: DelayKind::Finalized, + })), + Ok(None) => Ok(None), + } + } + } + + fn make_authorities_changes<'a>( + &'a self, + block: &mut ImportBlock, + hash: Block::Hash, + ) -> Result, ConsensusError> { + // when we update the authorities, we need to hold the lock + // until the block is written to prevent a race if we need to restore + // the old authority set on error or panic. + struct InnerGuard<'a, T: 'a> { + old: Option, + guard: Option>, + } + + impl<'a, T: 'a> InnerGuard<'a, T> { + fn as_mut(&mut self) -> &mut T { + &mut **self + .guard + .as_mut() + .expect("only taken on deconstruction; qed") + } + + fn set_old(&mut self, old: T) { + if self.old.is_none() { + // ignore "newer" old changes. + self.old = Some(old); + } + } + + fn consume(mut self) -> Option<(T, RwLockWriteGuard<'a, T>)> { + if let Some(old) = self.old.take() { + Some(( + old, + self.guard + .take() + .expect("only taken on deconstruction; qed"), + )) + } else { + None + } + } + } + + impl<'a, T: 'a> Drop for InnerGuard<'a, T> { + fn drop(&mut self) { + if let (Some(mut guard), Some(old)) = (self.guard.take(), self.old.take()) { + *guard = old; + } + } + } + + let number = block.header.number().clone(); + let maybe_change = self.check_new_change(&block.header, hash)?; + + // returns a function for checking whether a block is a descendent of another + // consistent with querying client directly after importing the block. + let parent_hash = *block.header.parent_hash(); + let is_descendent_of = is_descendent_of(&self.inner, Some((&hash, &parent_hash))); + + let mut guard = InnerGuard { + guard: Some(self.authority_set.inner().write()), + old: None, + }; + + // whether to pause the old authority set -- happens after import + // of a forced change block. + let mut do_pause = false; + + // add any pending changes. + if let Some(change) = maybe_change { + let old = guard.as_mut().clone(); + guard.set_old(old); + + if let DelayKind::Best { .. } = change.delay_kind { + do_pause = true; + } + + guard + .as_mut() + .add_pending_change(change, &is_descendent_of) + .map_err(|e| { + ConsensusError::from(ConsensusErrorKind::ClientImport(e.to_string())) + })?; + } + + let applied_changes = { + let forced_change_set = guard + .as_mut() + .apply_forced_changes(hash, number, &is_descendent_of) + .map_err(|e| ConsensusErrorKind::ClientImport(e.to_string())) + .map_err(ConsensusError::from)?; + + if let Some((median_last_finalized_number, new_set)) = forced_change_set { + let new_authorities = { + let (set_id, new_authorities) = new_set.current(); + + // we will use the median last finalized number as a hint + // for the canon block the new authority set should start + // with. we use the minimum between the median and the local + // best finalized block. + let best_finalized_number = self + .inner + .backend() + .blockchain() + .info() + .map_err(|e| ConsensusErrorKind::ClientImport(e.to_string()))? + .finalized_number; + + let canon_number = best_finalized_number.min(median_last_finalized_number); + + let canon_hash = self.inner.backend().blockchain().header(BlockId::Number(canon_number)) .map_err(|e| ConsensusErrorKind::ClientImport(e.to_string()))? .expect("the given block number is less or equal than the current best finalized number; \ current best finalized number must exist in chain; qed.") .hash(); - NewAuthoritySet { - canon_number, - canon_hash, - set_id, - authorities: new_authorities.to_vec(), - } - }; - let old = ::std::mem::replace(guard.as_mut(), new_set); - guard.set_old(old); - - AppliedChanges::Forced(new_authorities) - } else { - let did_standard = guard.as_mut().enacts_standard_change(hash, number, &is_descendent_of) - .map_err(|e| ConsensusErrorKind::ClientImport(e.to_string())) - .map_err(ConsensusError::from)?; - - if let Some(root) = did_standard { - AppliedChanges::Standard(root) - } else { - AppliedChanges::None - } - } - }; - - // consume the guard safely and write necessary changes. - let just_in_case = guard.consume(); - if let Some((_, ref authorities)) = just_in_case { - let authorities_change = match applied_changes { - AppliedChanges::Forced(ref new) => Some(new), - AppliedChanges::Standard(_) => None, // the change isn't actually applied yet. - AppliedChanges::None => None, - }; - - crate::aux_schema::update_authority_set( - authorities, - authorities_change, - |insert| block.auxiliary.extend( - insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) - ) - ); - } - - Ok(PendingSetChanges { just_in_case, applied_changes, do_pause }) - } + NewAuthoritySet { + canon_number, + canon_hash, + set_id, + authorities: new_authorities.to_vec(), + } + }; + let old = ::std::mem::replace(guard.as_mut(), new_set); + guard.set_old(old); + + AppliedChanges::Forced(new_authorities) + } else { + let did_standard = guard + .as_mut() + .enacts_standard_change(hash, number, &is_descendent_of) + .map_err(|e| ConsensusErrorKind::ClientImport(e.to_string())) + .map_err(ConsensusError::from)?; + + if let Some(root) = did_standard { + AppliedChanges::Standard(root) + } else { + AppliedChanges::None + } + } + }; + + // consume the guard safely and write necessary changes. + let just_in_case = guard.consume(); + if let Some((_, ref authorities)) = just_in_case { + let authorities_change = match applied_changes { + AppliedChanges::Forced(ref new) => Some(new), + AppliedChanges::Standard(_) => None, // the change isn't actually applied yet. + AppliedChanges::None => None, + }; + + crate::aux_schema::update_authority_set(authorities, authorities_change, |insert| { + block + .auxiliary + .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + } + + Ok(PendingSetChanges { + just_in_case, + applied_changes, + do_pause, + }) + } } -impl, RA, PRA> BlockImport - for GrandpaBlockImport where - NumberFor: grandpa::BlockNumberOps, - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, - DigestFor: Encode, - DigestItemFor: DigestItem, - RA: Send + Sync, - PRA: ProvideRuntimeApi, - PRA::Api: GrandpaApi, +impl, RA, PRA> BlockImport + for GrandpaBlockImport +where + NumberFor: grandpa::BlockNumberOps, + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + DigestFor: Encode, + DigestItemFor: DigestItem, + RA: Send + Sync, + PRA: ProvideRuntimeApi, + PRA::Api: GrandpaApi, { - type Error = ConsensusError; - - fn import_block(&self, mut block: ImportBlock, new_cache: HashMap>) - -> Result - { - let hash = block.post_header().hash(); - let number = block.header.number().clone(); - - // early exit if block already in chain, otherwise the check for - // authority changes will error when trying to re-import a change block - match self.inner.backend().blockchain().status(BlockId::Hash(hash)) { - Ok(blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain), - Ok(blockchain::BlockStatus::Unknown) => {}, - Err(e) => return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()), - } - - let pending_changes = self.make_authorities_changes(&mut block, hash)?; - - // we don't want to finalize on `inner.import_block` - let mut justification = block.justification.take(); - let enacts_consensus_change = !new_cache.is_empty(); - let import_result = self.inner.import_block(block, new_cache); - - let mut imported_aux = { - match import_result { - Ok(ImportResult::Imported(aux)) => aux, - Ok(r) => { - debug!(target: "afg", "Restoring old authority set after block import result: {:?}", r); - pending_changes.revert(); - return Ok(r); - }, - Err(e) => { - debug!(target: "afg", "Restoring old authority set after block import error: {:?}", e); - pending_changes.revert(); - return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()); - }, - } - }; - - let (applied_changes, do_pause) = pending_changes.defuse(); - - // Send the pause signal after import but BEFORE sending a `ChangeAuthorities` message. - if do_pause { - let _ = self.send_voter_commands.unbounded_send( - VoterCommand::Pause(format!("Forced change scheduled after inactivity")) - ); - } - - let needs_justification = applied_changes.needs_justification(); - - match applied_changes { - AppliedChanges::Forced(new) => { - // NOTE: when we do a force change we are "discrediting" the old set so we - // ignore any justifications from them. this block may contain a justification - // which should be checked and imported below against the new authority - // triggered by this forced change. the new grandpa voter will start at the - // last median finalized block (which is before the block that enacts the - // change), full nodes syncing the chain will not be able to successfully - // import justifications for those blocks since their local authority set view - // is still of the set before the forced change was enacted, still after #1867 - // they should import the block and discard the justification, and they will - // then request a justification from sync if it's necessary (which they should - // then be able to successfully validate). - let _ = self.send_voter_commands.unbounded_send(VoterCommand::ChangeAuthorities(new)); - - // we must clear all pending justifications requests, presumably they won't be - // finalized hence why this forced changes was triggered - imported_aux.clear_justification_requests = true; - }, - AppliedChanges::Standard(false) => { - // we can't apply this change yet since there are other dependent changes that we - // need to apply first, drop any justification that might have been provided with - // the block to make sure we request them from `sync` which will ensure they'll be - // applied in-order. - justification.take(); - }, - _ => {}, - } - - if !needs_justification && !enacts_consensus_change { - return Ok(ImportResult::Imported(imported_aux)); - } - - match justification { - Some(justification) => { - self.import_justification(hash, number, justification, needs_justification).unwrap_or_else(|err| { + type Error = ConsensusError; + + fn import_block( + &self, + mut block: ImportBlock, + new_cache: HashMap>, + ) -> Result { + let hash = block.post_header().hash(); + let number = block.header.number().clone(); + + // early exit if block already in chain, otherwise the check for + // authority changes will error when trying to re-import a change block + match self + .inner + .backend() + .blockchain() + .status(BlockId::Hash(hash)) + { + Ok(blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain), + Ok(blockchain::BlockStatus::Unknown) => {} + Err(e) => return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()), + } + + let pending_changes = self.make_authorities_changes(&mut block, hash)?; + + // we don't want to finalize on `inner.import_block` + let mut justification = block.justification.take(); + let enacts_consensus_change = !new_cache.is_empty(); + let import_result = self.inner.import_block(block, new_cache); + + let mut imported_aux = { + match import_result { + Ok(ImportResult::Imported(aux)) => aux, + Ok(r) => { + debug!(target: "afg", "Restoring old authority set after block import result: {:?}", r); + pending_changes.revert(); + return Ok(r); + } + Err(e) => { + debug!(target: "afg", "Restoring old authority set after block import error: {:?}", e); + pending_changes.revert(); + return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()); + } + } + }; + + let (applied_changes, do_pause) = pending_changes.defuse(); + + // Send the pause signal after import but BEFORE sending a `ChangeAuthorities` message. + if do_pause { + let _ = self + .send_voter_commands + .unbounded_send(VoterCommand::Pause(format!( + "Forced change scheduled after inactivity" + ))); + } + + let needs_justification = applied_changes.needs_justification(); + + match applied_changes { + AppliedChanges::Forced(new) => { + // NOTE: when we do a force change we are "discrediting" the old set so we + // ignore any justifications from them. this block may contain a justification + // which should be checked and imported below against the new authority + // triggered by this forced change. the new grandpa voter will start at the + // last median finalized block (which is before the block that enacts the + // change), full nodes syncing the chain will not be able to successfully + // import justifications for those blocks since their local authority set view + // is still of the set before the forced change was enacted, still after #1867 + // they should import the block and discard the justification, and they will + // then request a justification from sync if it's necessary (which they should + // then be able to successfully validate). + let _ = self + .send_voter_commands + .unbounded_send(VoterCommand::ChangeAuthorities(new)); + + // we must clear all pending justifications requests, presumably they won't be + // finalized hence why this forced changes was triggered + imported_aux.clear_justification_requests = true; + } + AppliedChanges::Standard(false) => { + // we can't apply this change yet since there are other dependent changes that we + // need to apply first, drop any justification that might have been provided with + // the block to make sure we request them from `sync` which will ensure they'll be + // applied in-order. + justification.take(); + } + _ => {} + } + + if !needs_justification && !enacts_consensus_change { + return Ok(ImportResult::Imported(imported_aux)); + } + + match justification { + Some(justification) => { + self.import_justification(hash, number, justification, needs_justification).unwrap_or_else(|err| { debug!(target: "finality", "Imported block #{} that enacts authority set change with \ invalid justification: {:?}, requesting justification from peers.", number, err); imported_aux.bad_justification = true; imported_aux.needs_justification = true; }); - }, - None => { - if needs_justification { - trace!( - target: "finality", - "Imported unjustified block #{} that enacts authority set change, waiting for finality for enactment.", - number, - ); - } - - // we have imported block with consensus data changes, but without justification - // => remember to create justification when next block will be finalized - if enacts_consensus_change { - self.consensus_changes.lock().note_change((number, hash)); - } - - imported_aux.needs_justification = true; - } - } - - Ok(ImportResult::Imported(imported_aux)) - } - - fn check_block( - &self, - hash: Block::Hash, - parent_hash: Block::Hash, - ) -> Result { - self.inner.check_block(hash, parent_hash) - } + } + None => { + if needs_justification { + trace!( + target: "finality", + "Imported unjustified block #{} that enacts authority set change, waiting for finality for enactment.", + number, + ); + } + + // we have imported block with consensus data changes, but without justification + // => remember to create justification when next block will be finalized + if enacts_consensus_change { + self.consensus_changes.lock().note_change((number, hash)); + } + + imported_aux.needs_justification = true; + } + } + + Ok(ImportResult::Imported(imported_aux)) + } + + fn check_block( + &self, + hash: Block::Hash, + parent_hash: Block::Hash, + ) -> Result { + self.inner.check_block(hash, parent_hash) + } } -impl, RA, PRA> GrandpaBlockImport { - pub(crate) fn new( - inner: Arc>, - authority_set: SharedAuthoritySet>, - send_voter_commands: mpsc::UnboundedSender>>, - consensus_changes: SharedConsensusChanges>, - api: Arc, - ) -> GrandpaBlockImport { - GrandpaBlockImport { - inner, - authority_set, - send_voter_commands, - consensus_changes, - api, - } - } +impl, RA, PRA> GrandpaBlockImport { + pub(crate) fn new( + inner: Arc>, + authority_set: SharedAuthoritySet>, + send_voter_commands: mpsc::UnboundedSender>>, + consensus_changes: SharedConsensusChanges>, + api: Arc, + ) -> GrandpaBlockImport { + GrandpaBlockImport { + inner, + authority_set, + send_voter_commands, + consensus_changes, + api, + } + } } -impl, RA, PRA> GrandpaBlockImport - where - NumberFor: grandpa::BlockNumberOps, - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, - RA: Send + Sync, +impl, RA, PRA> GrandpaBlockImport +where + NumberFor: grandpa::BlockNumberOps, + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + RA: Send + Sync, { - - /// Import a block justification and finalize the block. - /// - /// If `enacts_change` is set to true, then finalizing this block *must* - /// enact an authority set change, the function will panic otherwise. - fn import_justification( - &self, - hash: Block::Hash, - number: NumberFor, - justification: Justification, - enacts_change: bool, - ) -> Result<(), ConsensusError> { - let justification = GrandpaJustification::decode_and_verify( - justification, - self.authority_set.set_id(), - &self.authority_set.current_authorities(), - ); - - let justification = match justification { - Err(e) => return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()), - Ok(justification) => justification, - }; - - let result = finalize_block( - &*self.inner, - &self.authority_set, - &self.consensus_changes, - None, - hash, - number, - justification.into(), - ); - - match result { - Err(CommandOrError::VoterCommand(command)) => { - info!(target: "finality", "Imported justification for block #{} that triggers \ + /// Import a block justification and finalize the block. + /// + /// If `enacts_change` is set to true, then finalizing this block *must* + /// enact an authority set change, the function will panic otherwise. + fn import_justification( + &self, + hash: Block::Hash, + number: NumberFor, + justification: Justification, + enacts_change: bool, + ) -> Result<(), ConsensusError> { + let justification = GrandpaJustification::decode_and_verify( + justification, + self.authority_set.set_id(), + &self.authority_set.current_authorities(), + ); + + let justification = match justification { + Err(e) => return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()), + Ok(justification) => justification, + }; + + let result = finalize_block( + &*self.inner, + &self.authority_set, + &self.consensus_changes, + None, + hash, + number, + justification.into(), + ); + + match result { + Err(CommandOrError::VoterCommand(command)) => { + info!(target: "finality", "Imported justification for block #{} that triggers \ command {}, signaling voter.", number, command); - if let Err(e) = self.send_voter_commands.unbounded_send(command) { - return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()); - } - }, - Err(CommandOrError::Error(e)) => { - return Err(match e { - Error::Grandpa(error) => ConsensusErrorKind::ClientImport(error.to_string()), - Error::Network(error) => ConsensusErrorKind::ClientImport(error), - Error::Blockchain(error) => ConsensusErrorKind::ClientImport(error), - Error::Client(error) => ConsensusErrorKind::ClientImport(error.to_string()), - Error::Safety(error) => ConsensusErrorKind::ClientImport(error), - Error::Timer(error) => ConsensusErrorKind::ClientImport(error.to_string()), - }.into()); - }, - Ok(_) => { - assert!(!enacts_change, "returns Ok when no authority set change should be enacted; qed;"); - }, - } - - Ok(()) - } + if let Err(e) = self.send_voter_commands.unbounded_send(command) { + return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()); + } + } + Err(CommandOrError::Error(e)) => { + return Err(match e { + Error::Grandpa(error) => ConsensusErrorKind::ClientImport(error.to_string()), + Error::Network(error) => ConsensusErrorKind::ClientImport(error), + Error::Blockchain(error) => ConsensusErrorKind::ClientImport(error), + Error::Client(error) => ConsensusErrorKind::ClientImport(error.to_string()), + Error::Safety(error) => ConsensusErrorKind::ClientImport(error), + Error::Timer(error) => ConsensusErrorKind::ClientImport(error.to_string()), + } + .into()); + } + Ok(_) => { + assert!( + !enacts_change, + "returns Ok when no authority set change should be enacted; qed;" + ); + } + } + + Ok(()) + } } diff --git a/core/finality-grandpa/src/justification.rs b/core/finality-grandpa/src/justification.rs index d837e6a308..1e3ea58236 100644 --- a/core/finality-grandpa/src/justification.rs +++ b/core/finality-grandpa/src/justification.rs @@ -16,19 +16,19 @@ use std::collections::{HashMap, HashSet}; -use client::{CallExecutor, Client}; use client::backend::Backend; use client::blockchain::HeaderBackend; use client::error::{Error as ClientError, ErrorKind as ClientErrorKind}; -use parity_codec::{Encode, Decode}; +use client::{CallExecutor, Client}; +use grandpa::Error as GrandpaError; use grandpa::VoterSet; -use grandpa::{Error as GrandpaError}; +use parity_codec::{Decode, Encode}; use runtime_primitives::generic::BlockId; -use runtime_primitives::traits::{NumberFor, Block as BlockT, Header as HeaderT}; -use substrate_primitives::{H256, ed25519, Blake2Hasher}; +use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use substrate_primitives::{ed25519, Blake2Hasher, H256}; -use crate::{Commit, Error}; use crate::communication; +use crate::{Commit, Error}; use ed25519::Public as AuthorityId; @@ -42,179 +42,211 @@ use ed25519::Public as AuthorityId; /// nodes, and are used by syncing nodes to prove authority set handoffs. #[derive(Encode, Decode)] pub(crate) struct GrandpaJustification { - round: u64, - pub(crate) commit: Commit, - votes_ancestries: Vec, + round: u64, + pub(crate) commit: Commit, + votes_ancestries: Vec, } -impl> GrandpaJustification { - /// Create a GRANDPA justification from the given commit. This method - /// assumes the commit is valid and well-formed. - pub(crate) fn from_commit( - client: &Client, - round: u64, - commit: Commit, - ) -> Result, Error> where - B: Backend, - E: CallExecutor + Send + Sync, - RA: Send + Sync, - { - let mut votes_ancestries_hashes = HashSet::new(); - let mut votes_ancestries = Vec::new(); - - let error = || { - let msg = "invalid precommits for target commit".to_string(); - Err(Error::Client(ClientErrorKind::BadJustification(msg).into())) - }; - - for signed in commit.precommits.iter() { - let mut current_hash = signed.precommit.target_hash.clone(); - loop { - if current_hash == commit.target_hash { break; } - - match client.backend().blockchain().header(BlockId::Hash(current_hash))? { - Some(current_header) => { - if *current_header.number() <= commit.target_number { - return error(); - } - - let parent_hash = current_header.parent_hash().clone(); - if votes_ancestries_hashes.insert(current_hash) { - votes_ancestries.push(current_header); - } - current_hash = parent_hash; - }, - _ => return error(), - } - } - } - - Ok(GrandpaJustification { round, commit, votes_ancestries }) - } - - /// Decode a GRANDPA justification and validate the commit and the votes' - /// ancestry proofs. - pub(crate) fn decode_and_verify( - encoded: Vec, - set_id: u64, - voters: &VoterSet, - ) -> Result, ClientError> where - NumberFor: grandpa::BlockNumberOps, - { - GrandpaJustification::::decode(&mut &*encoded).ok_or_else(|| { - let msg = "failed to decode grandpa justification".to_string(); - ClientErrorKind::BadJustification(msg).into() - }).and_then(|just| just.verify(set_id, voters).map(|_| just)) - } - - /// Validate the commit and the votes' ancestry proofs. - pub(crate) fn verify(&self, set_id: u64, voters: &VoterSet) -> Result<(), ClientError> - where - NumberFor: grandpa::BlockNumberOps, - { - use grandpa::Chain; - - let ancestry_chain = AncestryChain::::new(&self.votes_ancestries); - - match grandpa::validate_commit( - &self.commit, - voters, - &ancestry_chain, - ) { - Ok(Some(_)) => {}, - _ => { - let msg = "invalid commit in grandpa justification".to_string(); - return Err(ClientErrorKind::BadJustification(msg).into()); - } - } - - let mut visited_hashes = HashSet::new(); - for signed in self.commit.precommits.iter() { - if let Err(_) = communication::check_message_sig::( - &grandpa::Message::Precommit(signed.precommit.clone()), - &signed.id, - &signed.signature, - self.round, - set_id, - ) { - return Err(ClientErrorKind::BadJustification( - "invalid signature for precommit in grandpa justification".to_string()).into()); - } - - if self.commit.target_hash == signed.precommit.target_hash { - continue; - } - - match ancestry_chain.ancestry(self.commit.target_hash, signed.precommit.target_hash) { - Ok(route) => { - // ancestry starts from parent hash but the precommit target hash has been visited - visited_hashes.insert(signed.precommit.target_hash); - for hash in route { - visited_hashes.insert(hash); - } - }, - _ => { - return Err(ClientErrorKind::BadJustification( - "invalid precommit ancestry proof in grandpa justification".to_string()).into()); - }, - } - } - - let ancestry_hashes = self.votes_ancestries - .iter() - .map(|h: &Block::Header| h.hash()) - .collect(); - - if visited_hashes != ancestry_hashes { - return Err(ClientErrorKind::BadJustification( - "invalid precommit ancestries in grandpa justification with unused headers".to_string()).into()); - } - - Ok(()) - } +impl> GrandpaJustification { + /// Create a GRANDPA justification from the given commit. This method + /// assumes the commit is valid and well-formed. + pub(crate) fn from_commit( + client: &Client, + round: u64, + commit: Commit, + ) -> Result, Error> + where + B: Backend, + E: CallExecutor + Send + Sync, + RA: Send + Sync, + { + let mut votes_ancestries_hashes = HashSet::new(); + let mut votes_ancestries = Vec::new(); + + let error = || { + let msg = "invalid precommits for target commit".to_string(); + Err(Error::Client(ClientErrorKind::BadJustification(msg).into())) + }; + + for signed in commit.precommits.iter() { + let mut current_hash = signed.precommit.target_hash.clone(); + loop { + if current_hash == commit.target_hash { + break; + } + + match client + .backend() + .blockchain() + .header(BlockId::Hash(current_hash))? + { + Some(current_header) => { + if *current_header.number() <= commit.target_number { + return error(); + } + + let parent_hash = current_header.parent_hash().clone(); + if votes_ancestries_hashes.insert(current_hash) { + votes_ancestries.push(current_header); + } + current_hash = parent_hash; + } + _ => return error(), + } + } + } + + Ok(GrandpaJustification { + round, + commit, + votes_ancestries, + }) + } + + /// Decode a GRANDPA justification and validate the commit and the votes' + /// ancestry proofs. + pub(crate) fn decode_and_verify( + encoded: Vec, + set_id: u64, + voters: &VoterSet, + ) -> Result, ClientError> + where + NumberFor: grandpa::BlockNumberOps, + { + GrandpaJustification::::decode(&mut &*encoded) + .ok_or_else(|| { + let msg = "failed to decode grandpa justification".to_string(); + ClientErrorKind::BadJustification(msg).into() + }) + .and_then(|just| just.verify(set_id, voters).map(|_| just)) + } + + /// Validate the commit and the votes' ancestry proofs. + pub(crate) fn verify( + &self, + set_id: u64, + voters: &VoterSet, + ) -> Result<(), ClientError> + where + NumberFor: grandpa::BlockNumberOps, + { + use grandpa::Chain; + + let ancestry_chain = AncestryChain::::new(&self.votes_ancestries); + + match grandpa::validate_commit(&self.commit, voters, &ancestry_chain) { + Ok(Some(_)) => {} + _ => { + let msg = "invalid commit in grandpa justification".to_string(); + return Err(ClientErrorKind::BadJustification(msg).into()); + } + } + + let mut visited_hashes = HashSet::new(); + for signed in self.commit.precommits.iter() { + if let Err(_) = communication::check_message_sig::( + &grandpa::Message::Precommit(signed.precommit.clone()), + &signed.id, + &signed.signature, + self.round, + set_id, + ) { + return Err(ClientErrorKind::BadJustification( + "invalid signature for precommit in grandpa justification".to_string(), + ) + .into()); + } + + if self.commit.target_hash == signed.precommit.target_hash { + continue; + } + + match ancestry_chain.ancestry(self.commit.target_hash, signed.precommit.target_hash) { + Ok(route) => { + // ancestry starts from parent hash but the precommit target hash has been visited + visited_hashes.insert(signed.precommit.target_hash); + for hash in route { + visited_hashes.insert(hash); + } + } + _ => { + return Err(ClientErrorKind::BadJustification( + "invalid precommit ancestry proof in grandpa justification".to_string(), + ) + .into()); + } + } + } + + let ancestry_hashes = self + .votes_ancestries + .iter() + .map(|h: &Block::Header| h.hash()) + .collect(); + + if visited_hashes != ancestry_hashes { + return Err(ClientErrorKind::BadJustification( + "invalid precommit ancestries in grandpa justification with unused headers" + .to_string(), + ) + .into()); + } + + Ok(()) + } } /// A utility trait implementing `grandpa::Chain` using a given set of headers. /// This is useful when validating commits, using the given set of headers to /// verify a valid ancestry route to the target commit block. struct AncestryChain { - ancestry: HashMap, + ancestry: HashMap, } impl AncestryChain { - fn new(ancestry: &[Block::Header]) -> AncestryChain { - let ancestry: HashMap<_, _> = ancestry - .iter() - .cloned() - .map(|h: Block::Header| (h.hash(), h)) - .collect(); - - AncestryChain { ancestry } - } + fn new(ancestry: &[Block::Header]) -> AncestryChain { + let ancestry: HashMap<_, _> = ancestry + .iter() + .cloned() + .map(|h: Block::Header| (h.hash(), h)) + .collect(); + + AncestryChain { ancestry } + } } -impl grandpa::Chain> for AncestryChain where - NumberFor: grandpa::BlockNumberOps +impl grandpa::Chain> for AncestryChain +where + NumberFor: grandpa::BlockNumberOps, { - fn ancestry(&self, base: Block::Hash, block: Block::Hash) -> Result, GrandpaError> { - let mut route = Vec::new(); - let mut current_hash = block; - loop { - if current_hash == base { break; } - match self.ancestry.get(¤t_hash) { - Some(current_header) => { - current_hash = *current_header.parent_hash(); - route.push(current_hash); - }, - _ => return Err(GrandpaError::NotDescendent), - } - } - route.pop(); // remove the base - - Ok(route) - } - - fn best_chain_containing(&self, _block: Block::Hash) -> Option<(Block::Hash, NumberFor)> { - None - } + fn ancestry( + &self, + base: Block::Hash, + block: Block::Hash, + ) -> Result, GrandpaError> { + let mut route = Vec::new(); + let mut current_hash = block; + loop { + if current_hash == base { + break; + } + match self.ancestry.get(¤t_hash) { + Some(current_header) => { + current_hash = *current_header.parent_hash(); + route.push(current_hash); + } + _ => return Err(GrandpaError::NotDescendent), + } + } + route.pop(); // remove the base + + Ok(route) + } + + fn best_chain_containing( + &self, + _block: Block::Hash, + ) -> Option<(Block::Hash, NumberFor)> { + None + } } diff --git a/core/finality-grandpa/src/lib.rs b/core/finality-grandpa/src/lib.rs index daf37357a0..a22983839a 100644 --- a/core/finality-grandpa/src/lib.rs +++ b/core/finality-grandpa/src/lib.rs @@ -52,32 +52,33 @@ //! or prune any signaled changes based on whether the signaling block is //! included in the newly-finalized chain. -use futures::prelude::*; -use log::{debug, info, warn, trace}; -use futures::sync::{self, mpsc, oneshot}; -use client::{ - BlockchainEvents, CallExecutor, Client, backend::Backend, - error::Error as ClientError, -}; use client::blockchain::HeaderBackend; -use parity_codec::{Encode, Decode}; -use runtime_primitives::traits::{ - NumberFor, Block as BlockT, Header as HeaderT, DigestFor, ProvideRuntimeApi, Hash as HashT, - DigestItemFor, DigestItem, +use client::{ + backend::Backend, error::Error as ClientError, BlockchainEvents, CallExecutor, Client, }; use fg_primitives::GrandpaApi; +use futures::prelude::*; +use futures::sync::{self, mpsc, oneshot}; use inherents::InherentDataProviders; +use log::{debug, info, trace, warn}; +use parity_codec::{Decode, Encode}; use runtime_primitives::generic::BlockId; -use substrate_primitives::{ed25519, H256, Blake2Hasher, Pair}; -use substrate_telemetry::{telemetry, CONSENSUS_TRACE, CONSENSUS_DEBUG, CONSENSUS_WARN, CONSENSUS_INFO}; +use runtime_primitives::traits::{ + Block as BlockT, DigestFor, DigestItem, DigestItemFor, Hash as HashT, Header as HeaderT, + NumberFor, ProvideRuntimeApi, +}; +use substrate_primitives::{ed25519, Blake2Hasher, Pair, H256}; +use substrate_telemetry::{ + telemetry, CONSENSUS_DEBUG, CONSENSUS_INFO, CONSENSUS_TRACE, CONSENSUS_WARN, +}; use srml_finality_tracker; use grandpa::Error as GrandpaError; -use grandpa::{voter, round::State as RoundState, BlockNumberOps, VoterSet}; +use grandpa::{round::State as RoundState, voter, BlockNumberOps, VoterSet}; -use network::Service as NetworkService; use network::consensus_gossip as network_gossip; +use network::Service as NetworkService; use std::fmt; use std::sync::Arc; @@ -95,14 +96,14 @@ mod import; mod justification; mod until_imported; -#[cfg(feature="service-integration")] +#[cfg(feature = "service-integration")] mod service_integration; -#[cfg(feature="service-integration")] -pub use service_integration::{LinkHalfForService, BlockImportForService}; +#[cfg(feature = "service-integration")] +pub use service_integration::{BlockImportForService, LinkHalfForService}; use aux_schema::{PersistentData, VoterSetState}; use environment::Environment; -pub use finality_proof::{prove_finality, check_finality_proof}; +pub use finality_proof::{check_finality_proof, prove_finality}; use import::GrandpaBlockImport; use until_imported::UntilCommitBlocksImported; @@ -118,28 +119,28 @@ const MESSAGE_ROUND_TOLERANCE: u64 = 2; pub type Message = grandpa::Message<::Hash, NumberFor>; /// A signed message. pub type SignedMessage = grandpa::SignedMessage< - ::Hash, - NumberFor, - AuthoritySignature, - AuthorityId, + ::Hash, + NumberFor, + AuthoritySignature, + AuthorityId, >; /// Grandpa gossip message type. /// This is the root type that gets encoded and sent on the network. #[derive(Debug, Encode, Decode)] pub enum GossipMessage { - /// Grandpa message with round and set info. - VoteOrPrecommit(VoteOrPrecommitMessage), - /// Grandpa commit message with round and set info. - Commit(FullCommitMessage), + /// Grandpa message with round and set info. + VoteOrPrecommit(VoteOrPrecommitMessage), + /// Grandpa commit message with round and set info. + Commit(FullCommitMessage), } /// Network level message with topic information. #[derive(Debug, Encode, Decode)] pub struct VoteOrPrecommitMessage { - pub round: u64, - pub set_id: u64, - pub message: SignedMessage, + pub round: u64, + pub set_id: u64, + pub message: SignedMessage, } /// A prevote message for this chain's block type. @@ -147,289 +148,296 @@ pub type Prevote = grandpa::Prevote<::Hash, NumberFor = grandpa::Precommit<::Hash, NumberFor>; /// A commit message for this chain's block type. -pub type Commit = grandpa::Commit< - ::Hash, - NumberFor, - AuthoritySignature, - AuthorityId ->; +pub type Commit = + grandpa::Commit<::Hash, NumberFor, AuthoritySignature, AuthorityId>; /// A compact commit message for this chain's block type. pub type CompactCommit = grandpa::CompactCommit< - ::Hash, - NumberFor, - AuthoritySignature, - AuthorityId + ::Hash, + NumberFor, + AuthoritySignature, + AuthorityId, >; /// Network level commit message with topic information. #[derive(Debug, Encode, Decode)] pub struct FullCommitMessage { - pub round: u64, - pub set_id: u64, - pub message: CompactCommit, + pub round: u64, + pub set_id: u64, + pub message: CompactCommit, } /// Configuration for the GRANDPA service. #[derive(Clone)] pub struct Config { - /// The expected duration for a message to be gossiped across the network. - pub gossip_duration: Duration, - /// Justification generation period (in blocks). GRANDPA will try to generate justifications - /// at least every justification_period blocks. There are some other events which might cause - /// justification generation. - pub justification_period: u64, - /// The local signing key. - pub local_key: Option>, - /// Some local identifier of the voter. - pub name: Option, + /// The expected duration for a message to be gossiped across the network. + pub gossip_duration: Duration, + /// Justification generation period (in blocks). GRANDPA will try to generate justifications + /// at least every justification_period blocks. There are some other events which might cause + /// justification generation. + pub justification_period: u64, + /// The local signing key. + pub local_key: Option>, + /// Some local identifier of the voter. + pub name: Option, } impl Config { - fn name(&self) -> &str { - self.name.as_ref().map(|s| s.as_str()).unwrap_or("") - } + fn name(&self) -> &str { + self.name + .as_ref() + .map(|s| s.as_str()) + .unwrap_or("") + } } /// Errors that can occur while voting in GRANDPA. #[derive(Debug)] pub enum Error { - /// An error within grandpa. - Grandpa(GrandpaError), - /// A network error. - Network(String), - /// A blockchain error. - Blockchain(String), - /// Could not complete a round on disk. - Client(ClientError), - /// An invariant has been violated (e.g. not finalizing pending change blocks in-order) - Safety(String), - /// A timer failed to fire. - Timer(::tokio::timer::Error), + /// An error within grandpa. + Grandpa(GrandpaError), + /// A network error. + Network(String), + /// A blockchain error. + Blockchain(String), + /// Could not complete a round on disk. + Client(ClientError), + /// An invariant has been violated (e.g. not finalizing pending change blocks in-order) + Safety(String), + /// A timer failed to fire. + Timer(::tokio::timer::Error), } impl From for Error { - fn from(e: GrandpaError) -> Self { - Error::Grandpa(e) - } + fn from(e: GrandpaError) -> Self { + Error::Grandpa(e) + } } impl From for Error { - fn from(e: ClientError) -> Self { - Error::Client(e) - } + fn from(e: ClientError) -> Self { + Error::Client(e) + } } /// A stream used by NetworkBridge in its implementation of Network. pub struct NetworkStream { - inner: Option>>, - outer: oneshot::Receiver>> + inner: Option>>, + outer: oneshot::Receiver>>, } impl Stream for NetworkStream { - type Item = Vec; - type Error = (); - - fn poll(&mut self) -> Poll, Self::Error> { - if let Some(ref mut inner) = self.inner { - return inner.poll(); - } - match self.outer.poll() { - Ok(futures::Async::Ready(mut inner)) => { - let poll_result = inner.poll(); - self.inner = Some(inner); - poll_result - }, - Ok(futures::Async::NotReady) => Ok(futures::Async::NotReady), - Err(_) => Err(()) - } - } + type Item = Vec; + type Error = (); + + fn poll(&mut self) -> Poll, Self::Error> { + if let Some(ref mut inner) = self.inner { + return inner.poll(); + } + match self.outer.poll() { + Ok(futures::Async::Ready(mut inner)) => { + let poll_result = inner.poll(); + self.inner = Some(inner); + poll_result + } + Ok(futures::Async::NotReady) => Ok(futures::Async::NotReady), + Err(_) => Err(()), + } + } } struct TopicTracker { - min_live_round: u64, - max_round: u64, - set_id: u64, + min_live_round: u64, + max_round: u64, + set_id: u64, } impl TopicTracker { - fn is_expired(&self, round: u64, set_id: u64) -> bool { - if set_id < self.set_id { - trace!(target: "afg", "Expired: Message with expired set_id {} (ours {})", set_id, self.set_id); - telemetry!(CONSENSUS_TRACE; "afg.expired_set_id"; - "set_id" => ?set_id, "ours" => ?self.set_id - ); - return true; - } else if set_id == self.set_id + 1 { - // allow a few first rounds of future set. - if round > MESSAGE_ROUND_TOLERANCE { - trace!(target: "afg", "Expired: Message too far in the future set, round {} (ours set_id {})", round, self.set_id); - telemetry!(CONSENSUS_TRACE; "afg.expired_msg_too_far_in_future_set"; - "round" => ?round, "ours" => ?self.set_id - ); - return true; - } - } else if set_id == self.set_id { - if round < self.min_live_round.saturating_sub(MESSAGE_ROUND_TOLERANCE) { - trace!(target: "afg", "Expired: Message round is out of bounds {} (ours {}-{})", round, self.min_live_round, self.max_round); - telemetry!(CONSENSUS_TRACE; "afg.msg_round_oob"; - "round" => ?round, "our_min_live_round" => ?self.min_live_round, "our_max_round" => ?self.max_round - ); - return true; - } - } else { - trace!(target: "afg", "Expired: Message in invalid future set {} (ours {})", set_id, self.set_id); - telemetry!(CONSENSUS_TRACE; "afg.expired_msg_in_invalid_future_set"; - "set_id" => ?set_id, "ours" => ?self.set_id - ); - return true; - } - - false - } + fn is_expired(&self, round: u64, set_id: u64) -> bool { + if set_id < self.set_id { + trace!(target: "afg", "Expired: Message with expired set_id {} (ours {})", set_id, self.set_id); + telemetry!(CONSENSUS_TRACE; "afg.expired_set_id"; + "set_id" => ?set_id, "ours" => ?self.set_id + ); + return true; + } else if set_id == self.set_id + 1 { + // allow a few first rounds of future set. + if round > MESSAGE_ROUND_TOLERANCE { + trace!(target: "afg", "Expired: Message too far in the future set, round {} (ours set_id {})", round, self.set_id); + telemetry!(CONSENSUS_TRACE; "afg.expired_msg_too_far_in_future_set"; + "round" => ?round, "ours" => ?self.set_id + ); + return true; + } + } else if set_id == self.set_id { + if round < self.min_live_round.saturating_sub(MESSAGE_ROUND_TOLERANCE) { + trace!(target: "afg", "Expired: Message round is out of bounds {} (ours {}-{})", round, self.min_live_round, self.max_round); + telemetry!(CONSENSUS_TRACE; "afg.msg_round_oob"; + "round" => ?round, "our_min_live_round" => ?self.min_live_round, "our_max_round" => ?self.max_round + ); + return true; + } + } else { + trace!(target: "afg", "Expired: Message in invalid future set {} (ours {})", set_id, self.set_id); + telemetry!(CONSENSUS_TRACE; "afg.expired_msg_in_invalid_future_set"; + "set_id" => ?set_id, "ours" => ?self.set_id + ); + return true; + } + + false + } } struct GossipValidator { - rounds: parking_lot::RwLock, - _marker: ::std::marker::PhantomData, + rounds: parking_lot::RwLock, + _marker: ::std::marker::PhantomData, } impl GossipValidator { - fn new() -> GossipValidator { - GossipValidator { - rounds: parking_lot::RwLock::new(TopicTracker { - min_live_round: 0, - max_round: 0, - set_id: 0, - }), - _marker: Default::default(), - } - } - - fn note_round(&self, round: u64, set_id: u64) { - let mut rounds = self.rounds.write(); - if set_id > rounds.set_id { - rounds.set_id = set_id; - rounds.max_round = 0; - rounds.min_live_round = 0; - } - rounds.max_round = rounds.max_round.max(round); - } - - fn note_set(&self, _set_id: u64) { - } - - fn drop_round(&self, round: u64, set_id: u64) { - let mut rounds = self.rounds.write(); - if set_id == rounds.set_id && round >= rounds.min_live_round { - rounds.min_live_round = round + 1; - } - } - - fn drop_set(&self, _set_id: u64) { - } - - fn is_expired(&self, round: u64, set_id: u64) -> bool { - self.rounds.read().is_expired(round, set_id) - } - - fn validate_round_message(&self, full: VoteOrPrecommitMessage) - -> network_gossip::ValidationResult - { - if self.is_expired(full.round, full.set_id) { - return network_gossip::ValidationResult::Expired; - } - - if let Err(()) = communication::check_message_sig::( - &full.message.message, - &full.message.id, - &full.message.signature, - full.round, - full.set_id - ) { - debug!(target: "afg", "Bad message signature {}", full.message.id); - telemetry!(CONSENSUS_DEBUG; "afg.bad_msg_signature"; "signature" => ?full.message.id); - return network_gossip::ValidationResult::Invalid; - } - - let topic = message_topic::(full.round, full.set_id); - network_gossip::ValidationResult::Valid(topic) - } - - fn validate_commit_message(&self, full: FullCommitMessage) - -> network_gossip::ValidationResult - { - use grandpa::Message as GrandpaMessage; - - if self.is_expired(full.round, full.set_id) { - return network_gossip::ValidationResult::Expired; - } - - if full.message.precommits.len() != full.message.auth_data.len() || full.message.precommits.is_empty() { - debug!(target: "afg", "Malformed compact commit"); - telemetry!(CONSENSUS_DEBUG; "afg.malformed_compact_commit"; - "precommits_len" => ?full.message.precommits.len(), - "auth_data_len" => ?full.message.auth_data.len(), - "precommits_is_empty" => ?full.message.precommits.is_empty(), - ); - return network_gossip::ValidationResult::Invalid; - } - - // check signatures on all contained precommits. - for (precommit, &(ref sig, ref id)) in full.message.precommits.iter().zip(&full.message.auth_data) { - if let Err(()) = communication::check_message_sig::( - &GrandpaMessage::Precommit(precommit.clone()), - id, - sig, - full.round, - full.set_id, - ) { - debug!(target: "afg", "Bad commit message signature {}", id); - telemetry!(CONSENSUS_DEBUG; "afg.bad_commit_msg_signature"; "id" => ?id); - return network_gossip::ValidationResult::Invalid; - } - } - - let topic = commit_topic::(full.set_id); - - let precommits_signed_by: Vec = full.message.auth_data.iter().map(move |(_, a)| { - format!("{}", a) - }).collect(); - - telemetry!(CONSENSUS_INFO; "afg.received_commit_msg"; - "contains_precommits_signed_by" => ?precommits_signed_by, - "round" => ?full.round, - "set_id" => ?full.set_id, - "topic" => ?topic, - "block_hash" => ?full.message, - ); - network_gossip::ValidationResult::Valid(topic) - } + fn new() -> GossipValidator { + GossipValidator { + rounds: parking_lot::RwLock::new(TopicTracker { + min_live_round: 0, + max_round: 0, + set_id: 0, + }), + _marker: Default::default(), + } + } + + fn note_round(&self, round: u64, set_id: u64) { + let mut rounds = self.rounds.write(); + if set_id > rounds.set_id { + rounds.set_id = set_id; + rounds.max_round = 0; + rounds.min_live_round = 0; + } + rounds.max_round = rounds.max_round.max(round); + } + + fn note_set(&self, _set_id: u64) {} + + fn drop_round(&self, round: u64, set_id: u64) { + let mut rounds = self.rounds.write(); + if set_id == rounds.set_id && round >= rounds.min_live_round { + rounds.min_live_round = round + 1; + } + } + + fn drop_set(&self, _set_id: u64) {} + + fn is_expired(&self, round: u64, set_id: u64) -> bool { + self.rounds.read().is_expired(round, set_id) + } + + fn validate_round_message( + &self, + full: VoteOrPrecommitMessage, + ) -> network_gossip::ValidationResult { + if self.is_expired(full.round, full.set_id) { + return network_gossip::ValidationResult::Expired; + } + + if let Err(()) = communication::check_message_sig::( + &full.message.message, + &full.message.id, + &full.message.signature, + full.round, + full.set_id, + ) { + debug!(target: "afg", "Bad message signature {}", full.message.id); + telemetry!(CONSENSUS_DEBUG; "afg.bad_msg_signature"; "signature" => ?full.message.id); + return network_gossip::ValidationResult::Invalid; + } + + let topic = message_topic::(full.round, full.set_id); + network_gossip::ValidationResult::Valid(topic) + } + + fn validate_commit_message( + &self, + full: FullCommitMessage, + ) -> network_gossip::ValidationResult { + use grandpa::Message as GrandpaMessage; + + if self.is_expired(full.round, full.set_id) { + return network_gossip::ValidationResult::Expired; + } + + if full.message.precommits.len() != full.message.auth_data.len() + || full.message.precommits.is_empty() + { + debug!(target: "afg", "Malformed compact commit"); + telemetry!(CONSENSUS_DEBUG; "afg.malformed_compact_commit"; + "precommits_len" => ?full.message.precommits.len(), + "auth_data_len" => ?full.message.auth_data.len(), + "precommits_is_empty" => ?full.message.precommits.is_empty(), + ); + return network_gossip::ValidationResult::Invalid; + } + + // check signatures on all contained precommits. + for (precommit, &(ref sig, ref id)) in + full.message.precommits.iter().zip(&full.message.auth_data) + { + if let Err(()) = communication::check_message_sig::( + &GrandpaMessage::Precommit(precommit.clone()), + id, + sig, + full.round, + full.set_id, + ) { + debug!(target: "afg", "Bad commit message signature {}", id); + telemetry!(CONSENSUS_DEBUG; "afg.bad_commit_msg_signature"; "id" => ?id); + return network_gossip::ValidationResult::Invalid; + } + } + + let topic = commit_topic::(full.set_id); + + let precommits_signed_by: Vec = full + .message + .auth_data + .iter() + .map(move |(_, a)| format!("{}", a)) + .collect(); + + telemetry!(CONSENSUS_INFO; "afg.received_commit_msg"; + "contains_precommits_signed_by" => ?precommits_signed_by, + "round" => ?full.round, + "set_id" => ?full.set_id, + "topic" => ?topic, + "block_hash" => ?full.message, + ); + network_gossip::ValidationResult::Valid(topic) + } } impl network_gossip::Validator for GossipValidator { - fn validate(&self, mut data: &[u8]) -> network_gossip::ValidationResult { - match GossipMessage::::decode(&mut data) { - Some(GossipMessage::VoteOrPrecommit(message)) => self.validate_round_message(message), - Some(GossipMessage::Commit(message)) => self.validate_commit_message(message), - None => { - debug!(target: "afg", "Error decoding message"); - telemetry!(CONSENSUS_DEBUG; "afg.err_decoding_msg"; "" => ""); - network_gossip::ValidationResult::Invalid - } - } - } - - fn message_expired<'a>(&'a self) -> Box bool + 'a> { - let rounds = self.rounds.read(); - Box::new(move |_topic, mut data| { - match GossipMessage::::decode(&mut data) { - None => true, - Some(GossipMessage::Commit(full)) => rounds.is_expired(full.round, full.set_id), - Some(GossipMessage::VoteOrPrecommit(full)) => - rounds.is_expired(full.round, full.set_id), - } - }) - } + fn validate(&self, mut data: &[u8]) -> network_gossip::ValidationResult { + match GossipMessage::::decode(&mut data) { + Some(GossipMessage::VoteOrPrecommit(message)) => self.validate_round_message(message), + Some(GossipMessage::Commit(message)) => self.validate_commit_message(message), + None => { + debug!(target: "afg", "Error decoding message"); + telemetry!(CONSENSUS_DEBUG; "afg.err_decoding_msg"; "" => ""); + network_gossip::ValidationResult::Invalid + } + } + } + + fn message_expired<'a>(&'a self) -> Box bool + 'a> { + let rounds = self.rounds.read(); + Box::new( + move |_topic, mut data| match GossipMessage::::decode(&mut data) { + None => true, + Some(GossipMessage::Commit(full)) => rounds.is_expired(full.round, full.set_id), + Some(GossipMessage::VoteOrPrecommit(full)) => { + rounds.is_expired(full.round, full.set_id) + } + }, + ) + } } /// A handle to the network. This is generally implemented by providing some @@ -437,537 +445,583 @@ impl network_gossip::Validator for GossipValidator: Clone { - /// A stream of input messages for a topic. - type In: Stream,Error=()>; + /// A stream of input messages for a topic. + type In: Stream, Error = ()>; - /// Get a stream of messages for a specific round. This stream should - /// never logically conclude. - fn messages_for(&self, round: u64, set_id: u64) -> Self::In; + /// Get a stream of messages for a specific round. This stream should + /// never logically conclude. + fn messages_for(&self, round: u64, set_id: u64) -> Self::In; - /// Send a message at a specific round out. - fn send_message(&self, round: u64, set_id: u64, message: Vec, force: bool); + /// Send a message at a specific round out. + fn send_message(&self, round: u64, set_id: u64, message: Vec, force: bool); - /// Clean up messages for a round. - fn drop_round_messages(&self, round: u64, set_id: u64); + /// Clean up messages for a round. + fn drop_round_messages(&self, round: u64, set_id: u64); - /// Clean up messages for a given authority set id (e.g. commit messages). - fn drop_set_messages(&self, set_id: u64); + /// Clean up messages for a given authority set id (e.g. commit messages). + fn drop_set_messages(&self, set_id: u64); - /// Get a stream of commit messages for a specific set-id. This stream - /// should never logically conclude. - fn commit_messages(&self, set_id: u64) -> Self::In; + /// Get a stream of commit messages for a specific set-id. This stream + /// should never logically conclude. + fn commit_messages(&self, set_id: u64) -> Self::In; - /// Send message over the commit channel. - fn send_commit(&self, round: u64, set_id: u64, message: Vec, force: bool); + /// Send message over the commit channel. + fn send_commit(&self, round: u64, set_id: u64, message: Vec, force: bool); - /// Inform peers that a block with given hash should be downloaded. - fn announce(&self, round: u64, set_id: u64, block: Block::Hash); + /// Inform peers that a block with given hash should be downloaded. + fn announce(&self, round: u64, set_id: u64, block: Block::Hash); } /// Bridge between NetworkService, gossiping consensus messages and Grandpa pub struct NetworkBridge> { - service: Arc>, - validator: Arc>, + service: Arc>, + validator: Arc>, } impl> NetworkBridge { - /// Create a new NetworkBridge to the given NetworkService - pub fn new(service: Arc>) -> Self { - let validator = Arc::new(GossipValidator::new()); - let v = validator.clone(); - service.with_gossip(move |gossip, _| { - gossip.register_validator(GRANDPA_ENGINE_ID, v); - }); - NetworkBridge { service, validator: validator } - } -} - -impl,> Clone for NetworkBridge { - fn clone(&self) -> Self { - NetworkBridge { - service: Arc::clone(&self.service), - validator: Arc::clone(&self.validator), - } - } + /// Create a new NetworkBridge to the given NetworkService + pub fn new(service: Arc>) -> Self { + let validator = Arc::new(GossipValidator::new()); + let v = validator.clone(); + service.with_gossip(move |gossip, _| { + gossip.register_validator(GRANDPA_ENGINE_ID, v); + }); + NetworkBridge { + service, + validator: validator, + } + } +} + +impl> Clone + for NetworkBridge +{ + fn clone(&self) -> Self { + NetworkBridge { + service: Arc::clone(&self.service), + validator: Arc::clone(&self.validator), + } + } } fn message_topic(round: u64, set_id: u64) -> B::Hash { - <::Hashing as HashT>::hash(format!("{}-{}", set_id, round).as_bytes()) + <::Hashing as HashT>::hash(format!("{}-{}", set_id, round).as_bytes()) } fn commit_topic(set_id: u64) -> B::Hash { - <::Hashing as HashT>::hash(format!("{}-COMMITS", set_id).as_bytes()) -} - -impl,> Network for NetworkBridge { - type In = NetworkStream; - fn messages_for(&self, round: u64, set_id: u64) -> Self::In { - self.validator.note_round(round, set_id); - let (tx, rx) = sync::oneshot::channel(); - self.service.with_gossip(move |gossip, _| { - let inner_rx = gossip.messages_for(GRANDPA_ENGINE_ID, message_topic::(round, set_id)); - let _ = tx.send(inner_rx); - }); - NetworkStream { outer: rx, inner: None } - } - - fn send_message(&self, round: u64, set_id: u64, message: Vec, force: bool) { - let topic = message_topic::(round, set_id); - self.service.gossip_consensus_message(topic, GRANDPA_ENGINE_ID, message, force); - } - - fn drop_round_messages(&self, round: u64, set_id: u64) { - self.validator.drop_round(round, set_id); - self.service.with_gossip(move |gossip, _| gossip.collect_garbage()); - } - - fn drop_set_messages(&self, set_id: u64) { - self.validator.drop_set(set_id); - self.service.with_gossip(move |gossip, _| gossip.collect_garbage()); - } - - fn commit_messages(&self, set_id: u64) -> Self::In { - self.validator.note_set(set_id); - let (tx, rx) = sync::oneshot::channel(); - self.service.with_gossip(move |gossip, _| { - let inner_rx = gossip.messages_for(GRANDPA_ENGINE_ID, commit_topic::(set_id)); - let _ = tx.send(inner_rx); - }); - NetworkStream { outer: rx, inner: None } - } - - fn send_commit(&self, _round: u64, set_id: u64, message: Vec, force: bool) { - let topic = commit_topic::(set_id); - self.service.gossip_consensus_message(topic, GRANDPA_ENGINE_ID, message, force); - } - - fn announce(&self, round: u64, _set_id: u64, block: B::Hash) { - debug!(target: "afg", "Announcing block {} to peers which we voted on in round {}", block, round); - telemetry!(CONSENSUS_DEBUG; "afg.announcing_blocks_to_voted_peers"; - "block" => ?block, "round" => ?round - ); - self.service.announce_block(block) - } + <::Hashing as HashT>::hash(format!("{}-COMMITS", set_id).as_bytes()) +} + +impl> Network + for NetworkBridge +{ + type In = NetworkStream; + fn messages_for(&self, round: u64, set_id: u64) -> Self::In { + self.validator.note_round(round, set_id); + let (tx, rx) = sync::oneshot::channel(); + self.service.with_gossip(move |gossip, _| { + let inner_rx = + gossip.messages_for(GRANDPA_ENGINE_ID, message_topic::(round, set_id)); + let _ = tx.send(inner_rx); + }); + NetworkStream { + outer: rx, + inner: None, + } + } + + fn send_message(&self, round: u64, set_id: u64, message: Vec, force: bool) { + let topic = message_topic::(round, set_id); + self.service + .gossip_consensus_message(topic, GRANDPA_ENGINE_ID, message, force); + } + + fn drop_round_messages(&self, round: u64, set_id: u64) { + self.validator.drop_round(round, set_id); + self.service + .with_gossip(move |gossip, _| gossip.collect_garbage()); + } + + fn drop_set_messages(&self, set_id: u64) { + self.validator.drop_set(set_id); + self.service + .with_gossip(move |gossip, _| gossip.collect_garbage()); + } + + fn commit_messages(&self, set_id: u64) -> Self::In { + self.validator.note_set(set_id); + let (tx, rx) = sync::oneshot::channel(); + self.service.with_gossip(move |gossip, _| { + let inner_rx = gossip.messages_for(GRANDPA_ENGINE_ID, commit_topic::(set_id)); + let _ = tx.send(inner_rx); + }); + NetworkStream { + outer: rx, + inner: None, + } + } + + fn send_commit(&self, _round: u64, set_id: u64, message: Vec, force: bool) { + let topic = commit_topic::(set_id); + self.service + .gossip_consensus_message(topic, GRANDPA_ENGINE_ID, message, force); + } + + fn announce(&self, round: u64, _set_id: u64, block: B::Hash) { + debug!(target: "afg", "Announcing block {} to peers which we voted on in round {}", block, round); + telemetry!(CONSENSUS_DEBUG; "afg.announcing_blocks_to_voted_peers"; + "block" => ?block, "round" => ?round + ); + self.service.announce_block(block) + } } /// Something which can determine if a block is known. pub trait BlockStatus { - /// Return `Ok(Some(number))` or `Ok(None)` depending on whether the block - /// is definitely known and has been imported. - /// If an unexpected error occurs, return that. - fn block_number(&self, hash: Block::Hash) -> Result>, Error>; + /// Return `Ok(Some(number))` or `Ok(None)` depending on whether the block + /// is definitely known and has been imported. + /// If an unexpected error occurs, return that. + fn block_number(&self, hash: Block::Hash) -> Result>, Error>; } -impl, RA> BlockStatus for Arc> where - B: Backend, - E: CallExecutor + Send + Sync, - RA: Send + Sync, - NumberFor: BlockNumberOps, +impl, RA> BlockStatus for Arc> +where + B: Backend, + E: CallExecutor + Send + Sync, + RA: Send + Sync, + NumberFor: BlockNumberOps, { - fn block_number(&self, hash: Block::Hash) -> Result>, Error> { - self.block_number_from_id(&BlockId::Hash(hash)) - .map_err(|e| Error::Blockchain(format!("{:?}", e))) - } + fn block_number(&self, hash: Block::Hash) -> Result>, Error> { + self.block_number_from_id(&BlockId::Hash(hash)) + .map_err(|e| Error::Blockchain(format!("{:?}", e))) + } } /// A new authority set along with the canonical block it changed at. #[derive(Debug)] pub(crate) struct NewAuthoritySet { - pub(crate) canon_number: N, - pub(crate) canon_hash: H, - pub(crate) set_id: u64, - pub(crate) authorities: Vec<(AuthorityId, u64)>, + pub(crate) canon_number: N, + pub(crate) canon_hash: H, + pub(crate) set_id: u64, + pub(crate) authorities: Vec<(AuthorityId, u64)>, } /// Commands issued to the voter. #[derive(Debug)] pub(crate) enum VoterCommand { - /// Pause the voter for given reason. - Pause(String), - /// New authorities. - ChangeAuthorities(NewAuthoritySet) + /// Pause the voter for given reason. + Pause(String), + /// New authorities. + ChangeAuthorities(NewAuthoritySet), } impl fmt::Display for VoterCommand { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - VoterCommand::Pause(ref reason) => write!(f, "Pausing voter: {}", reason), - VoterCommand::ChangeAuthorities(_) => write!(f, "Changing authorities"), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + VoterCommand::Pause(ref reason) => write!(f, "Pausing voter: {}", reason), + VoterCommand::ChangeAuthorities(_) => write!(f, "Changing authorities"), + } + } } /// Signals either an early exit of a voter or an error. #[derive(Debug)] pub(crate) enum CommandOrError { - /// An error occurred. - Error(Error), - /// A command to the voter. - VoterCommand(VoterCommand), + /// An error occurred. + Error(Error), + /// A command to the voter. + VoterCommand(VoterCommand), } impl From for CommandOrError { - fn from(e: Error) -> Self { - CommandOrError::Error(e) - } + fn from(e: Error) -> Self { + CommandOrError::Error(e) + } } impl From for CommandOrError { - fn from(e: ClientError) -> Self { - CommandOrError::Error(Error::Client(e)) - } + fn from(e: ClientError) -> Self { + CommandOrError::Error(Error::Client(e)) + } } impl From for CommandOrError { - fn from(e: grandpa::Error) -> Self { - CommandOrError::Error(Error::from(e)) - } + fn from(e: grandpa::Error) -> Self { + CommandOrError::Error(Error::from(e)) + } } impl From> for CommandOrError { - fn from(e: VoterCommand) -> Self { - CommandOrError::VoterCommand(e) - } + fn from(e: VoterCommand) -> Self { + CommandOrError::VoterCommand(e) + } } -impl ::std::error::Error for CommandOrError { } +impl ::std::error::Error for CommandOrError {} impl fmt::Display for CommandOrError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - CommandOrError::Error(ref e) => write!(f, "{:?}", e), - CommandOrError::VoterCommand(ref cmd) => write!(f, "{}", cmd), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + CommandOrError::Error(ref e) => write!(f, "{:?}", e), + CommandOrError::VoterCommand(ref cmd) => write!(f, "{}", cmd), + } + } } -pub struct LinkHalf, RA> { - client: Arc>, - persistent_data: PersistentData>, - voter_commands_rx: mpsc::UnboundedReceiver>>, +pub struct LinkHalf, RA> { + client: Arc>, + persistent_data: PersistentData>, + voter_commands_rx: mpsc::UnboundedReceiver>>, } /// Make block importer and link half necessary to tie the background voter /// to it. -pub fn block_import, RA, PRA>( - client: Arc>, - api: Arc -) -> Result<(GrandpaBlockImport, LinkHalf), ClientError> - where - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, - RA: Send + Sync, - PRA: ProvideRuntimeApi, - PRA::Api: GrandpaApi, +pub fn block_import, RA, PRA>( + client: Arc>, + api: Arc, +) -> Result< + ( + GrandpaBlockImport, + LinkHalf, + ), + ClientError, +> +where + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + RA: Send + Sync, + PRA: ProvideRuntimeApi, + PRA::Api: GrandpaApi, { - use runtime_primitives::traits::Zero; - - let chain_info = client.info()?; - let genesis_hash = chain_info.chain.genesis_hash; - - let persistent_data = aux_schema::load_persistent( - &**client.backend(), - genesis_hash, - >::zero(), - || { - let genesis_authorities = api.runtime_api() - .grandpa_authorities(&BlockId::number(Zero::zero()))?; - telemetry!(CONSENSUS_DEBUG; "afg.loading_authorities"; - "authorities_len" => ?genesis_authorities.len() - ); - Ok(genesis_authorities) - } - )?; - - let (voter_commands_tx, voter_commands_rx) = mpsc::unbounded(); - - Ok(( - GrandpaBlockImport::new( - client.clone(), - persistent_data.authority_set.clone(), - voter_commands_tx, - persistent_data.consensus_changes.clone(), - api, - ), - LinkHalf { - client, - persistent_data, - voter_commands_rx, - }, - )) -} - -fn committer_communication, B, E, N, RA>( - local_key: Option>, - set_id: u64, - voters: &Arc>, - client: &Arc>, - network: &N, + use runtime_primitives::traits::Zero; + + let chain_info = client.info()?; + let genesis_hash = chain_info.chain.genesis_hash; + + let persistent_data = aux_schema::load_persistent( + &**client.backend(), + genesis_hash, + >::zero(), + || { + let genesis_authorities = api + .runtime_api() + .grandpa_authorities(&BlockId::number(Zero::zero()))?; + telemetry!(CONSENSUS_DEBUG; "afg.loading_authorities"; + "authorities_len" => ?genesis_authorities.len() + ); + Ok(genesis_authorities) + }, + )?; + + let (voter_commands_tx, voter_commands_rx) = mpsc::unbounded(); + + Ok(( + GrandpaBlockImport::new( + client.clone(), + persistent_data.authority_set.clone(), + voter_commands_tx, + persistent_data.consensus_changes.clone(), + api, + ), + LinkHalf { + client, + persistent_data, + voter_commands_rx, + }, + )) +} + +fn committer_communication, B, E, N, RA>( + local_key: Option>, + set_id: u64, + voters: &Arc>, + client: &Arc>, + network: &N, ) -> ( - impl Stream< - Item = (u64, ::grandpa::CompactCommit, AuthoritySignature, AuthorityId>), - Error = CommandOrError>, - >, - impl Sink< - SinkItem = (u64, ::grandpa::Commit, AuthoritySignature, AuthorityId>), - SinkError = CommandOrError>, - >, -) where - B: Backend, - E: CallExecutor + Send + Sync, - N: Network, - RA: Send + Sync, - NumberFor: BlockNumberOps, - DigestItemFor: DigestItem, + impl Stream< + Item = ( + u64, + ::grandpa::CompactCommit, AuthoritySignature, AuthorityId>, + ), + Error = CommandOrError>, + >, + impl Sink< + SinkItem = ( + u64, + ::grandpa::Commit, AuthoritySignature, AuthorityId>, + ), + SinkError = CommandOrError>, + >, +) +where + B: Backend, + E: CallExecutor + Send + Sync, + N: Network, + RA: Send + Sync, + NumberFor: BlockNumberOps, + DigestItemFor: DigestItem, { - // verification stream - let commit_in = crate::communication::checked_commit_stream::( - network.commit_messages(set_id), - voters.clone(), - ); - - // block commit messages until relevant blocks are imported. - let commit_in = UntilCommitBlocksImported::new( - client.import_notification_stream(), - client.clone(), - commit_in, - ); - - let is_voter = local_key - .map(|pair| voters.contains_key(&pair.public().into())) - .unwrap_or(false); - - let commit_out = crate::communication::CommitsOut::::new( - network.clone(), - set_id, - is_voter, - ); - - let commit_in = commit_in.map_err(Into::into); - let commit_out = commit_out.sink_map_err(Into::into); - - (commit_in, commit_out) + // verification stream + let commit_in = crate::communication::checked_commit_stream::( + network.commit_messages(set_id), + voters.clone(), + ); + + // block commit messages until relevant blocks are imported. + let commit_in = UntilCommitBlocksImported::new( + client.import_notification_stream(), + client.clone(), + commit_in, + ); + + let is_voter = local_key + .map(|pair| voters.contains_key(&pair.public().into())) + .unwrap_or(false); + + let commit_out = + crate::communication::CommitsOut::::new(network.clone(), set_id, is_voter); + + let commit_in = commit_in.map_err(Into::into); + let commit_out = commit_out.sink_map_err(Into::into); + + (commit_in, commit_out) } /// Register the finality tracker inherent data provider (which is used by /// GRANDPA), if not registered already. -fn register_finality_tracker_inherent_data_provider, RA>( - client: Arc>, - inherent_data_providers: &InherentDataProviders, -) -> Result<(), consensus_common::Error> where - B: Backend + 'static, - E: CallExecutor + Send + Sync + 'static, - RA: Send + Sync + 'static, +fn register_finality_tracker_inherent_data_provider, RA>( + client: Arc>, + inherent_data_providers: &InherentDataProviders, +) -> Result<(), consensus_common::Error> +where + B: Backend + 'static, + E: CallExecutor + Send + Sync + 'static, + RA: Send + Sync + 'static, { - if !inherent_data_providers.has_provider(&srml_finality_tracker::INHERENT_IDENTIFIER) { - inherent_data_providers - .register_provider(srml_finality_tracker::InherentDataProvider::new(move || { - match client.backend().blockchain().info() { - Err(e) => Err(std::borrow::Cow::Owned(e.to_string())), - Ok(info) => { - telemetry!(CONSENSUS_INFO; "afg.finalized"; - "finalized_number" => ?info.finalized_number, - "finalized_hash" => ?info.finalized_hash, - ); - Ok(info.finalized_number) - }, - } - })) - .map_err(|err| consensus_common::ErrorKind::InherentData(err.into()).into()) - } else { - Ok(()) - } + if !inherent_data_providers.has_provider(&srml_finality_tracker::INHERENT_IDENTIFIER) { + inherent_data_providers + .register_provider(srml_finality_tracker::InherentDataProvider::new( + move || match client.backend().blockchain().info() { + Err(e) => Err(std::borrow::Cow::Owned(e.to_string())), + Ok(info) => { + telemetry!(CONSENSUS_INFO; "afg.finalized"; + "finalized_number" => ?info.finalized_number, + "finalized_hash" => ?info.finalized_hash, + ); + Ok(info.finalized_number) + } + }, + )) + .map_err(|err| consensus_common::ErrorKind::InherentData(err.into()).into()) + } else { + Ok(()) + } } /// Run a GRANDPA voter as a task. Provide configuration and a link to a /// block import worker that has already been instantiated with `block_import`. -pub fn run_grandpa, N, RA>( - config: Config, - link: LinkHalf, - network: N, - inherent_data_providers: InherentDataProviders, - on_exit: impl Future + Send + 'static, -) -> ::client::error::Result + Send + 'static> where - Block::Hash: Ord, - B: Backend + 'static, - E: CallExecutor + Send + Sync + 'static, - N: Network + Send + Sync + 'static, - N::In: Send + 'static, - NumberFor: BlockNumberOps, - DigestFor: Encode, - DigestItemFor: DigestItem, - RA: Send + Sync + 'static, +pub fn run_grandpa, N, RA>( + config: Config, + link: LinkHalf, + network: N, + inherent_data_providers: InherentDataProviders, + on_exit: impl Future + Send + 'static, +) -> ::client::error::Result + Send + 'static> +where + Block::Hash: Ord, + B: Backend + 'static, + E: CallExecutor + Send + Sync + 'static, + N: Network + Send + Sync + 'static, + N::In: Send + 'static, + NumberFor: BlockNumberOps, + DigestFor: Encode, + DigestItemFor: DigestItem, + RA: Send + Sync + 'static, { - use futures::future::{self, Loop as FutureLoop}; - - let LinkHalf { - client, - persistent_data, - voter_commands_rx, - } = link; - // we shadow network with the wrapping/rebroadcasting network to avoid - // accidental reuse. - let (broadcast_worker, network) = communication::rebroadcasting_network(network); - let PersistentData { authority_set, set_state, consensus_changes } = persistent_data; - - register_finality_tracker_inherent_data_provider(client.clone(), &inherent_data_providers)?; - - let voters = authority_set.current_authorities(); - - let initial_environment = Arc::new(Environment { - inner: client.clone(), - config: config.clone(), - voters: Arc::new(voters), - network: network.clone(), - set_id: authority_set.set_id(), - authority_set: authority_set.clone(), - consensus_changes: consensus_changes.clone(), - last_completed: environment::LastCompletedRound::new(set_state.round()), - }); - - let initial_state = (initial_environment, set_state, voter_commands_rx.into_future()); - let voter_work = future::loop_fn(initial_state, move |params| { - let (env, set_state, voter_commands_rx) = params; - debug!(target: "afg", "{}: Starting new voter with set ID {}", config.name(), env.set_id); - telemetry!(CONSENSUS_DEBUG; "afg.starting_new_voter"; - "name" => ?config.name(), "set_id" => ?env.set_id - ); - - let mut maybe_voter = match set_state.clone() { - VoterSetState::Live(last_round_number, last_round_state) => { - let chain_info = match client.info() { - Ok(i) => i, - Err(e) => return future::Either::B(future::err(Error::Client(e))), - }; - - let last_finalized = ( - chain_info.chain.finalized_hash, - chain_info.chain.finalized_number, - ); - - let committer_data = committer_communication( - config.local_key.clone(), - env.set_id, - &env.voters, - &client, - &network, - ); - - let voters = (*env.voters).clone(); - - Some(voter::Voter::new( - env.clone(), - voters, - committer_data, - last_round_number, - last_round_state, - last_finalized, - )) - } - VoterSetState::Paused(_, _) => None, - }; - - // needs to be combined with another future otherwise it can deadlock. - let poll_voter = future::poll_fn(move || match maybe_voter { - Some(ref mut voter) => voter.poll(), - None => Ok(Async::NotReady), - }); - - let client = client.clone(); - let config = config.clone(); - let network = network.clone(); - let authority_set = authority_set.clone(); - let consensus_changes = consensus_changes.clone(); - - let handle_voter_command = move |command: VoterCommand<_, _>, voter_commands_rx| { - match command { - VoterCommand::ChangeAuthorities(new) => { - let voters: Vec = new.authorities.iter().map(move |(a, _)| { - format!("{}", a) - }).collect(); - telemetry!(CONSENSUS_INFO; "afg.voter_command_change_authorities"; - "number" => ?new.canon_number, - "hash" => ?new.canon_hash, - "voters" => ?voters, - "set_id" => ?new.set_id, - ); - - // start the new authority set using the block where the - // set changed (not where the signal happened!) as the base. - let genesis_state = RoundState::genesis((new.canon_hash, new.canon_number)); - let env = Arc::new(Environment { - inner: client, - config, - voters: Arc::new(new.authorities.into_iter().collect()), - set_id: new.set_id, - network, - authority_set, - consensus_changes, - last_completed: environment::LastCompletedRound::new( - (0, genesis_state.clone()) - ), - }); - - - let set_state = VoterSetState::Live( - 0, // always start at round 0 when changing sets. - genesis_state, - ); - - Ok(FutureLoop::Continue((env, set_state, voter_commands_rx))) - } - VoterCommand::Pause(reason) => { - info!(target: "afg", "Pausing old validator set: {}", reason); - - // not racing because old voter is shut down. - let (last_round_number, last_round_state) = env.last_completed.read(); - let set_state = VoterSetState::Paused( - last_round_number, - last_round_state, - ); - - aux_schema::write_voter_set_state(&**client.backend(), &set_state)?; - - Ok(FutureLoop::Continue((env, set_state, voter_commands_rx))) - }, - } - }; - - future::Either::A(poll_voter.select2(voter_commands_rx).then(move |res| match res { - Ok(future::Either::A(((), _))) => { - // voters don't conclude naturally; this could reasonably be an error. - Ok(FutureLoop::Break(())) - }, - Err(future::Either::B(_)) => { - // the `voter_commands_rx` stream should not fail. - Ok(FutureLoop::Break(())) - }, - Ok(future::Either::B(((None, _), _))) => { - // the `voter_commands_rx` stream should never conclude since it's never closed. - Ok(FutureLoop::Break(())) - }, - Err(future::Either::A((CommandOrError::Error(e), _))) => { - // return inner voter error - Err(e) - } - Ok(future::Either::B(((Some(command), voter_commands_rx), _))) => { - // some command issued externally. - handle_voter_command(command, voter_commands_rx.into_future()) - } - Err(future::Either::A((CommandOrError::VoterCommand(command), voter_commands_rx))) => { - // some command issued internally. - handle_voter_command(command, voter_commands_rx) - }, - })) - }); - - let voter_work = voter_work - .join(broadcast_worker) - .map(|((), ())| ()) - .map_err(|e| { - warn!("GRANDPA Voter failed: {:?}", e); - telemetry!(CONSENSUS_WARN; "afg.voter_failed"; "e" => ?e); - }); - - Ok(voter_work.select(on_exit).then(|_| Ok(()))) + use futures::future::{self, Loop as FutureLoop}; + + let LinkHalf { + client, + persistent_data, + voter_commands_rx, + } = link; + // we shadow network with the wrapping/rebroadcasting network to avoid + // accidental reuse. + let (broadcast_worker, network) = communication::rebroadcasting_network(network); + let PersistentData { + authority_set, + set_state, + consensus_changes, + } = persistent_data; + + register_finality_tracker_inherent_data_provider(client.clone(), &inherent_data_providers)?; + + let voters = authority_set.current_authorities(); + + let initial_environment = Arc::new(Environment { + inner: client.clone(), + config: config.clone(), + voters: Arc::new(voters), + network: network.clone(), + set_id: authority_set.set_id(), + authority_set: authority_set.clone(), + consensus_changes: consensus_changes.clone(), + last_completed: environment::LastCompletedRound::new(set_state.round()), + }); + + let initial_state = ( + initial_environment, + set_state, + voter_commands_rx.into_future(), + ); + let voter_work = future::loop_fn(initial_state, move |params| { + let (env, set_state, voter_commands_rx) = params; + debug!(target: "afg", "{}: Starting new voter with set ID {}", config.name(), env.set_id); + telemetry!(CONSENSUS_DEBUG; "afg.starting_new_voter"; + "name" => ?config.name(), "set_id" => ?env.set_id + ); + + let mut maybe_voter = match set_state.clone() { + VoterSetState::Live(last_round_number, last_round_state) => { + let chain_info = match client.info() { + Ok(i) => i, + Err(e) => return future::Either::B(future::err(Error::Client(e))), + }; + + let last_finalized = ( + chain_info.chain.finalized_hash, + chain_info.chain.finalized_number, + ); + + let committer_data = committer_communication( + config.local_key.clone(), + env.set_id, + &env.voters, + &client, + &network, + ); + + let voters = (*env.voters).clone(); + + Some(voter::Voter::new( + env.clone(), + voters, + committer_data, + last_round_number, + last_round_state, + last_finalized, + )) + } + VoterSetState::Paused(_, _) => None, + }; + + // needs to be combined with another future otherwise it can deadlock. + let poll_voter = future::poll_fn(move || match maybe_voter { + Some(ref mut voter) => voter.poll(), + None => Ok(Async::NotReady), + }); + + let client = client.clone(); + let config = config.clone(); + let network = network.clone(); + let authority_set = authority_set.clone(); + let consensus_changes = consensus_changes.clone(); + + let handle_voter_command = move |command: VoterCommand<_, _>, voter_commands_rx| { + match command { + VoterCommand::ChangeAuthorities(new) => { + let voters: Vec = new + .authorities + .iter() + .map(move |(a, _)| format!("{}", a)) + .collect(); + telemetry!(CONSENSUS_INFO; "afg.voter_command_change_authorities"; + "number" => ?new.canon_number, + "hash" => ?new.canon_hash, + "voters" => ?voters, + "set_id" => ?new.set_id, + ); + + // start the new authority set using the block where the + // set changed (not where the signal happened!) as the base. + let genesis_state = RoundState::genesis((new.canon_hash, new.canon_number)); + let env = Arc::new(Environment { + inner: client, + config, + voters: Arc::new(new.authorities.into_iter().collect()), + set_id: new.set_id, + network, + authority_set, + consensus_changes, + last_completed: environment::LastCompletedRound::new(( + 0, + genesis_state.clone(), + )), + }); + + let set_state = VoterSetState::Live( + 0, // always start at round 0 when changing sets. + genesis_state, + ); + + Ok(FutureLoop::Continue((env, set_state, voter_commands_rx))) + } + VoterCommand::Pause(reason) => { + info!(target: "afg", "Pausing old validator set: {}", reason); + + // not racing because old voter is shut down. + let (last_round_number, last_round_state) = env.last_completed.read(); + let set_state = VoterSetState::Paused(last_round_number, last_round_state); + + aux_schema::write_voter_set_state(&**client.backend(), &set_state)?; + + Ok(FutureLoop::Continue((env, set_state, voter_commands_rx))) + } + } + }; + + future::Either::A( + poll_voter + .select2(voter_commands_rx) + .then(move |res| match res { + Ok(future::Either::A(((), _))) => { + // voters don't conclude naturally; this could reasonably be an error. + Ok(FutureLoop::Break(())) + } + Err(future::Either::B(_)) => { + // the `voter_commands_rx` stream should not fail. + Ok(FutureLoop::Break(())) + } + Ok(future::Either::B(((None, _), _))) => { + // the `voter_commands_rx` stream should never conclude since it's never closed. + Ok(FutureLoop::Break(())) + } + Err(future::Either::A((CommandOrError::Error(e), _))) => { + // return inner voter error + Err(e) + } + Ok(future::Either::B(((Some(command), voter_commands_rx), _))) => { + // some command issued externally. + handle_voter_command(command, voter_commands_rx.into_future()) + } + Err(future::Either::A(( + CommandOrError::VoterCommand(command), + voter_commands_rx, + ))) => { + // some command issued internally. + handle_voter_command(command, voter_commands_rx) + } + }), + ) + }); + + let voter_work = voter_work + .join(broadcast_worker) + .map(|((), ())| ()) + .map_err(|e| { + warn!("GRANDPA Voter failed: {:?}", e); + telemetry!(CONSENSUS_WARN; "afg.voter_failed"; "e" => ?e); + }); + + Ok(voter_work.select(on_exit).then(|_| Ok(()))) } diff --git a/core/finality-grandpa/src/service_integration.rs b/core/finality-grandpa/src/service_integration.rs index 3eee1dd940..406802f8b4 100644 --- a/core/finality-grandpa/src/service_integration.rs +++ b/core/finality-grandpa/src/service_integration.rs @@ -15,26 +15,25 @@ // along with Substrate. If not, see . /// Integrate grandpa finality with substrate service - use client; use service::{FullBackend, FullExecutor, ServiceFactory}; pub type BlockImportForService = crate::GrandpaBlockImport< - FullBackend, - FullExecutor, - ::Block, - ::RuntimeApi, - client::Client< + FullBackend, + FullExecutor, + ::Block, + ::RuntimeApi, + client::Client< FullBackend, FullExecutor, ::Block, - ::RuntimeApi + ::RuntimeApi, >, >; pub type LinkHalfForService = crate::LinkHalf< - FullBackend, - FullExecutor, - ::Block, - ::RuntimeApi + FullBackend, + FullExecutor, + ::Block, + ::RuntimeApi, >; diff --git a/core/finality-grandpa/src/tests.rs b/core/finality-grandpa/src/tests.rs index f0d5894bb6..f520649c1b 100644 --- a/core/finality-grandpa/src/tests.rs +++ b/core/finality-grandpa/src/tests.rs @@ -17,1020 +17,1250 @@ //! Tests and test helpers for GRANDPA. use super::*; -use network::test::{Block, DummySpecialization, Hash, TestNetFactory, Peer, PeersClient}; -use network::test::{PassThroughVerifier}; -use network::config::{ProtocolConfig, Roles}; -use parking_lot::Mutex; -use tokio::runtime::current_thread; -use keyring::AuthorityKeyring; use client::{ - BlockchainEvents, error::Result, - blockchain::Backend as BlockchainBackend, - runtime_api::{Core, RuntimeVersion, ApiExt}, + blockchain::Backend as BlockchainBackend, + error::Result, + runtime_api::{ApiExt, Core, RuntimeVersion}, + BlockchainEvents, }; -use test_client::{self, runtime::BlockNumber}; -use consensus_common::{BlockOrigin, ForkChoiceStrategy, ImportedAux, ImportBlock, ImportResult}; use consensus_common::import_queue::{SharedBlockImport, SharedJustificationImport}; +use consensus_common::{BlockOrigin, ForkChoiceStrategy, ImportBlock, ImportResult, ImportedAux}; +use keyring::AuthorityKeyring; +use network::config::{ProtocolConfig, Roles}; +use network::test::PassThroughVerifier; +use network::test::{Block, DummySpecialization, Hash, Peer, PeersClient, TestNetFactory}; +use parking_lot::Mutex; +use runtime_primitives::generic::BlockId; +use runtime_primitives::traits::{ApiRef, ProvideRuntimeApi}; use std::collections::{HashMap, HashSet}; use std::result; -use runtime_primitives::traits::{ApiRef, ProvideRuntimeApi}; -use runtime_primitives::generic::BlockId; -use substrate_primitives::{NativeOrEncoded, ExecutionContext}; +use substrate_primitives::{ExecutionContext, NativeOrEncoded}; +use test_client::{self, runtime::BlockNumber}; +use tokio::runtime::current_thread; use authorities::AuthoritySet; use consensus_changes::ConsensusChanges; -type PeerData = - Mutex< - Option< - LinkHalf< - test_client::Backend, - test_client::Executor, - Block, - test_client::runtime::RuntimeApi, - > - > - >; +type PeerData = Mutex< + Option< + LinkHalf< + test_client::Backend, + test_client::Executor, + Block, + test_client::runtime::RuntimeApi, + >, + >, +>; type GrandpaPeer = Peer; struct GrandpaTestNet { - peers: Vec>, - test_config: TestApi, - started: bool, + peers: Vec>, + test_config: TestApi, + started: bool, } impl GrandpaTestNet { - fn new(test_config: TestApi, n_peers: usize) -> Self { - let mut net = GrandpaTestNet { - peers: Vec::with_capacity(n_peers), - started: false, - test_config, - }; - let config = Self::default_config(); - for _ in 0..n_peers { - net.add_peer(&config); - } - net - } + fn new(test_config: TestApi, n_peers: usize) -> Self { + let mut net = GrandpaTestNet { + peers: Vec::with_capacity(n_peers), + started: false, + test_config, + }; + let config = Self::default_config(); + for _ in 0..n_peers { + net.add_peer(&config); + } + net + } } impl TestNetFactory for GrandpaTestNet { - type Specialization = DummySpecialization; - type Verifier = PassThroughVerifier; - type PeerData = PeerData; - - /// Create new test network with peers and given config. - fn from_config(_config: &ProtocolConfig) -> Self { - GrandpaTestNet { - peers: Vec::new(), - test_config: Default::default(), - started: false, - } - } - - fn default_config() -> ProtocolConfig { - // the authority role ensures gossip hits all nodes here. - ProtocolConfig { - roles: Roles::AUTHORITY, - } - } - - fn make_verifier(&self, _client: Arc, _cfg: &ProtocolConfig) - -> Arc - { - Arc::new(PassThroughVerifier(false)) // use non-instant finality. - } - - fn make_block_import(&self, client: Arc) - -> (SharedBlockImport, Option>, PeerData) - { - let (import, link) = block_import( - client, - Arc::new(self.test_config.clone()) - ).expect("Could not create block import for fresh peer."); - let shared_import = Arc::new(import); - (shared_import.clone(), Some(shared_import), Mutex::new(Some(link))) - } - - fn peer(&self, i: usize) -> &GrandpaPeer { - &self.peers[i] - } - - fn peers(&self) -> &Vec> { - &self.peers - } - - fn mut_peers>)>(&mut self, closure: F) { - closure(&mut self.peers); - } - - fn started(&self) -> bool { - self.started - } - - fn set_started(&mut self, new: bool) { - self.started = new; - } + type Specialization = DummySpecialization; + type Verifier = PassThroughVerifier; + type PeerData = PeerData; + + /// Create new test network with peers and given config. + fn from_config(_config: &ProtocolConfig) -> Self { + GrandpaTestNet { + peers: Vec::new(), + test_config: Default::default(), + started: false, + } + } + + fn default_config() -> ProtocolConfig { + // the authority role ensures gossip hits all nodes here. + ProtocolConfig { + roles: Roles::AUTHORITY, + } + } + + fn make_verifier( + &self, + _client: Arc, + _cfg: &ProtocolConfig, + ) -> Arc { + Arc::new(PassThroughVerifier(false)) // use non-instant finality. + } + + fn make_block_import( + &self, + client: Arc, + ) -> ( + SharedBlockImport, + Option>, + PeerData, + ) { + let (import, link) = block_import(client, Arc::new(self.test_config.clone())) + .expect("Could not create block import for fresh peer."); + let shared_import = Arc::new(import); + ( + shared_import.clone(), + Some(shared_import), + Mutex::new(Some(link)), + ) + } + + fn peer(&self, i: usize) -> &GrandpaPeer { + &self.peers[i] + } + + fn peers(&self) -> &Vec> { + &self.peers + } + + fn mut_peers>)>(&mut self, closure: F) { + closure(&mut self.peers); + } + + fn started(&self) -> bool { + self.started + } + + fn set_started(&mut self, new: bool) { + self.started = new; + } } #[derive(Clone)] struct MessageRouting { - inner: Arc>, - peer_id: usize, - validator: Arc>, + inner: Arc>, + peer_id: usize, + validator: Arc>, } impl MessageRouting { - fn new(inner: Arc>, peer_id: usize,) -> Self { - let validator = Arc::new(GossipValidator::new()); - let v = validator.clone(); - { - let inner = inner.lock(); - let peer = inner.peer(peer_id); - peer.with_gossip(move |gossip, _| { - gossip.register_validator(GRANDPA_ENGINE_ID, v); - }); - } - MessageRouting { - inner, - peer_id, - validator, - } - } - - fn drop_messages(&self, topic: Hash) { - let inner = self.inner.lock(); - let peer = inner.peer(self.peer_id); - peer.consensus_gossip_collect_garbage_for_topic(topic); - } + fn new(inner: Arc>, peer_id: usize) -> Self { + let validator = Arc::new(GossipValidator::new()); + let v = validator.clone(); + { + let inner = inner.lock(); + let peer = inner.peer(peer_id); + peer.with_gossip(move |gossip, _| { + gossip.register_validator(GRANDPA_ENGINE_ID, v); + }); + } + MessageRouting { + inner, + peer_id, + validator, + } + } + + fn drop_messages(&self, topic: Hash) { + let inner = self.inner.lock(); + let peer = inner.peer(self.peer_id); + peer.consensus_gossip_collect_garbage_for_topic(topic); + } } fn make_topic(round: u64, set_id: u64) -> Hash { - message_topic::(round, set_id) + message_topic::(round, set_id) } fn make_commit_topic(set_id: u64) -> Hash { - commit_topic::(set_id) + commit_topic::(set_id) } impl Network for MessageRouting { - type In = Box,Error=()> + Send>; - - fn messages_for(&self, round: u64, set_id: u64) -> Self::In { - self.validator.note_round(round, set_id); - let inner = self.inner.lock(); - let peer = inner.peer(self.peer_id); - let messages = peer.consensus_gossip_messages_for( - GRANDPA_ENGINE_ID, - make_topic(round, set_id), - ); - - let messages = messages.map_err( - move |_| panic!("Messages for round {} dropped too early", round) - ); - - Box::new(messages) - } - - fn send_message(&self, round: u64, set_id: u64, message: Vec, force: bool) { - let inner = self.inner.lock(); - inner.peer(self.peer_id) - .gossip_message(make_topic(round, set_id), GRANDPA_ENGINE_ID, message, force); - } - - fn drop_round_messages(&self, round: u64, set_id: u64) { - self.validator.drop_round(round, set_id); - let topic = make_topic(round, set_id); - self.drop_messages(topic); - } - - fn drop_set_messages(&self, set_id: u64) { - self.validator.drop_set(set_id); - let topic = make_commit_topic(set_id); - self.drop_messages(topic); - } - - fn commit_messages(&self, set_id: u64) -> Self::In { - self.validator.note_set(set_id); - let inner = self.inner.lock(); - let peer = inner.peer(self.peer_id); - let messages = peer.consensus_gossip_messages_for( - GRANDPA_ENGINE_ID, - make_commit_topic(set_id), - ); - - let messages = messages.map_err( - move |_| panic!("Commit messages for set {} dropped too early", set_id) - ); - - Box::new(messages) - } - - fn send_commit(&self, _round: u64, set_id: u64, message: Vec, force: bool) { - let inner = self.inner.lock(); - inner.peer(self.peer_id) - .gossip_message(make_commit_topic(set_id), GRANDPA_ENGINE_ID, message, force); - } - - fn announce(&self, _round: u64, _set_id: u64, _block: H256) { - - } + type In = Box, Error = ()> + Send>; + + fn messages_for(&self, round: u64, set_id: u64) -> Self::In { + self.validator.note_round(round, set_id); + let inner = self.inner.lock(); + let peer = inner.peer(self.peer_id); + let messages = + peer.consensus_gossip_messages_for(GRANDPA_ENGINE_ID, make_topic(round, set_id)); + + let messages = + messages.map_err(move |_| panic!("Messages for round {} dropped too early", round)); + + Box::new(messages) + } + + fn send_message(&self, round: u64, set_id: u64, message: Vec, force: bool) { + let inner = self.inner.lock(); + inner.peer(self.peer_id).gossip_message( + make_topic(round, set_id), + GRANDPA_ENGINE_ID, + message, + force, + ); + } + + fn drop_round_messages(&self, round: u64, set_id: u64) { + self.validator.drop_round(round, set_id); + let topic = make_topic(round, set_id); + self.drop_messages(topic); + } + + fn drop_set_messages(&self, set_id: u64) { + self.validator.drop_set(set_id); + let topic = make_commit_topic(set_id); + self.drop_messages(topic); + } + + fn commit_messages(&self, set_id: u64) -> Self::In { + self.validator.note_set(set_id); + let inner = self.inner.lock(); + let peer = inner.peer(self.peer_id); + let messages = + peer.consensus_gossip_messages_for(GRANDPA_ENGINE_ID, make_commit_topic(set_id)); + + let messages = messages + .map_err(move |_| panic!("Commit messages for set {} dropped too early", set_id)); + + Box::new(messages) + } + + fn send_commit(&self, _round: u64, set_id: u64, message: Vec, force: bool) { + let inner = self.inner.lock(); + inner.peer(self.peer_id).gossip_message( + make_commit_topic(set_id), + GRANDPA_ENGINE_ID, + message, + force, + ); + } + + fn announce(&self, _round: u64, _set_id: u64, _block: H256) {} } #[derive(Default, Clone)] struct TestApi { - genesis_authorities: Vec<(AuthorityId, u64)>, - scheduled_changes: Arc>>>, - forced_changes: Arc)>>>, + genesis_authorities: Vec<(AuthorityId, u64)>, + scheduled_changes: Arc>>>, + forced_changes: Arc)>>>, } impl TestApi { - fn new(genesis_authorities: Vec<(AuthorityId, u64)>) -> Self { - TestApi { - genesis_authorities, - scheduled_changes: Arc::new(Mutex::new(HashMap::new())), - forced_changes: Arc::new(Mutex::new(HashMap::new())), - } - } + fn new(genesis_authorities: Vec<(AuthorityId, u64)>) -> Self { + TestApi { + genesis_authorities, + scheduled_changes: Arc::new(Mutex::new(HashMap::new())), + forced_changes: Arc::new(Mutex::new(HashMap::new())), + } + } } struct RuntimeApi { - inner: TestApi, + inner: TestApi, } impl ProvideRuntimeApi for TestApi { - type Api = RuntimeApi; - - fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { - RuntimeApi { inner: self.clone() }.into() - } + type Api = RuntimeApi; + + fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { + RuntimeApi { + inner: self.clone(), + } + .into() + } } impl Core for RuntimeApi { - fn version_runtime_api_impl( - &self, - _: &BlockId, - _: ExecutionContext, - _: Option<()>, - _: Vec, - ) -> Result> { - unimplemented!("Not required for testing!") - } - - fn execute_block_runtime_api_impl( - &self, - _: &BlockId, - _: ExecutionContext, - _: Option<(Block)>, - _: Vec, - ) -> Result> { - unimplemented!("Not required for testing!") - } - - fn initialize_block_runtime_api_impl( - &self, - _: &BlockId, - _: ExecutionContext, - _: Option<&::Header>, - _: Vec, - ) -> Result> { - unimplemented!("Not required for testing!") - } + fn version_runtime_api_impl( + &self, + _: &BlockId, + _: ExecutionContext, + _: Option<()>, + _: Vec, + ) -> Result> { + unimplemented!("Not required for testing!") + } + + fn execute_block_runtime_api_impl( + &self, + _: &BlockId, + _: ExecutionContext, + _: Option<(Block)>, + _: Vec, + ) -> Result> { + unimplemented!("Not required for testing!") + } + + fn initialize_block_runtime_api_impl( + &self, + _: &BlockId, + _: ExecutionContext, + _: Option<&::Header>, + _: Vec, + ) -> Result> { + unimplemented!("Not required for testing!") + } } impl ApiExt for RuntimeApi { - fn map_api_result result::Result, R, E>( - &self, - _: F - ) -> result::Result { - unimplemented!("Not required for testing!") - } - - fn runtime_version_at(&self, _: &BlockId) -> Result { - unimplemented!("Not required for testing!") - } + fn map_api_result result::Result, R, E>( + &self, + _: F, + ) -> result::Result { + unimplemented!("Not required for testing!") + } + + fn runtime_version_at(&self, _: &BlockId) -> Result { + unimplemented!("Not required for testing!") + } } impl GrandpaApi for RuntimeApi { - fn grandpa_authorities_runtime_api_impl( - &self, - at: &BlockId, - _: ExecutionContext, - _: Option<()>, - _: Vec, - ) -> Result>> { - if at == &BlockId::Number(0) { - Ok(self.inner.genesis_authorities.clone()).map(NativeOrEncoded::Native) - } else { - panic!("should generally only request genesis authorities") - } - } - - fn grandpa_pending_change_runtime_api_impl( - &self, - at: &BlockId, - _: ExecutionContext, - _: Option<(&DigestFor)>, - _: Vec, - ) -> Result>>>> { - let parent_hash = match at { - &BlockId::Hash(at) => at, - _ => panic!("not requested by block hash!!"), - }; - - // we take only scheduled changes at given block number where there are no - // extrinsics. - Ok(self.inner.scheduled_changes.lock().get(&parent_hash).map(|c| c.clone())).map(NativeOrEncoded::Native) - } - - fn grandpa_forced_change_runtime_api_impl( - &self, - at: &BlockId, - _: ExecutionContext, - _: Option<(&DigestFor)>, - _: Vec, - ) - -> Result, ScheduledChange>)>>> { - let parent_hash = match at { - &BlockId::Hash(at) => at, - _ => panic!("not requested by block hash!!"), - }; - - // we take only scheduled changes at given block number where there are no - // extrinsics. - Ok(self.inner.forced_changes.lock().get(&parent_hash).map(|c| c.clone())).map(NativeOrEncoded::Native) - } + fn grandpa_authorities_runtime_api_impl( + &self, + at: &BlockId, + _: ExecutionContext, + _: Option<()>, + _: Vec, + ) -> Result>> { + if at == &BlockId::Number(0) { + Ok(self.inner.genesis_authorities.clone()).map(NativeOrEncoded::Native) + } else { + panic!("should generally only request genesis authorities") + } + } + + fn grandpa_pending_change_runtime_api_impl( + &self, + at: &BlockId, + _: ExecutionContext, + _: Option<(&DigestFor)>, + _: Vec, + ) -> Result>>>> { + let parent_hash = match at { + &BlockId::Hash(at) => at, + _ => panic!("not requested by block hash!!"), + }; + + // we take only scheduled changes at given block number where there are no + // extrinsics. + Ok(self + .inner + .scheduled_changes + .lock() + .get(&parent_hash) + .map(|c| c.clone())) + .map(NativeOrEncoded::Native) + } + + fn grandpa_forced_change_runtime_api_impl( + &self, + at: &BlockId, + _: ExecutionContext, + _: Option<(&DigestFor)>, + _: Vec, + ) -> Result, ScheduledChange>)>>> + { + let parent_hash = match at { + &BlockId::Hash(at) => at, + _ => panic!("not requested by block hash!!"), + }; + + // we take only scheduled changes at given block number where there are no + // extrinsics. + Ok(self + .inner + .forced_changes + .lock() + .get(&parent_hash) + .map(|c| c.clone())) + .map(NativeOrEncoded::Native) + } } const TEST_GOSSIP_DURATION: Duration = Duration::from_millis(500); const TEST_ROUTING_INTERVAL: Duration = Duration::from_millis(50); fn make_ids(keys: &[AuthorityKeyring]) -> Vec<(AuthorityId, u64)> { - keys.iter() - .map(|key| AuthorityId(key.to_raw_public())) - .map(|id| (id, 1)) - .collect() + keys.iter() + .map(|key| AuthorityId(key.to_raw_public())) + .map(|id| (id, 1)) + .collect() } // run the voters to completion. provide a closure to be invoked after // the voters are spawned but before blocking on them. fn run_to_completion_with( - blocks: u64, - net: Arc>, - peers: &[AuthorityKeyring], - before_waiting: F, + blocks: u64, + net: Arc>, + peers: &[AuthorityKeyring], + before_waiting: F, ) -> u64 { - use parking_lot::RwLock; - - let mut finality_notifications = Vec::new(); - let mut runtime = current_thread::Runtime::new().unwrap(); - - let highest_finalized = Arc::new(RwLock::new(0)); - - for (peer_id, key) in peers.iter().enumerate() { - let highest_finalized = highest_finalized.clone(); - let (client, link) = { - let net = net.lock(); - // temporary needed for some reason - let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[peer_id].client().clone(), - link, - ) - }; - finality_notifications.push( - client.finality_notification_stream() - .take_while(move |n| { - let mut highest_finalized = highest_finalized.write(); - if *n.header.number() > *highest_finalized { - *highest_finalized = *n.header.number(); - } - Ok(n.header.number() < &blocks) - }) - .for_each(|_| Ok(())) - ); - fn assert_send(_: &T) { } - - let voter = run_grandpa( - Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - local_key: Some(Arc::new(key.clone().into())), - name: Some(format!("peer#{}", peer_id)), - }, - link, - MessageRouting::new(net.clone(), peer_id), - InherentDataProviders::new(), - futures::empty(), - ).expect("all in order with client and network"); - - assert_send(&voter); - - runtime.spawn(voter); - } - - // wait for all finalized on each. - let wait_for = ::futures::future::join_all(finality_notifications) - .map(|_| ()) - .map_err(|_| ()); - - let drive_to_completion = ::tokio::timer::Interval::new_interval(TEST_ROUTING_INTERVAL) - .for_each(move |_| { - net.lock().send_import_notifications(); - net.lock().send_finality_notifications(); - net.lock().route_fast(); - Ok(()) - }) - .map(|_| ()) - .map_err(|_| ()); - - (before_waiting)(); - - runtime.block_on(wait_for.select(drive_to_completion).map_err(|_| ())).unwrap(); - - let highest_finalized = *highest_finalized.read(); - - highest_finalized + use parking_lot::RwLock; + + let mut finality_notifications = Vec::new(); + let mut runtime = current_thread::Runtime::new().unwrap(); + + let highest_finalized = Arc::new(RwLock::new(0)); + + for (peer_id, key) in peers.iter().enumerate() { + let highest_finalized = highest_finalized.clone(); + let (client, link) = { + let net = net.lock(); + // temporary needed for some reason + let link = net.peers[peer_id] + .data + .lock() + .take() + .expect("link initialized at startup; qed"); + (net.peers[peer_id].client().clone(), link) + }; + finality_notifications.push( + client + .finality_notification_stream() + .take_while(move |n| { + let mut highest_finalized = highest_finalized.write(); + if *n.header.number() > *highest_finalized { + *highest_finalized = *n.header.number(); + } + Ok(n.header.number() < &blocks) + }) + .for_each(|_| Ok(())), + ); + fn assert_send(_: &T) {} + + let voter = run_grandpa( + Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + local_key: Some(Arc::new(key.clone().into())), + name: Some(format!("peer#{}", peer_id)), + }, + link, + MessageRouting::new(net.clone(), peer_id), + InherentDataProviders::new(), + futures::empty(), + ) + .expect("all in order with client and network"); + + assert_send(&voter); + + runtime.spawn(voter); + } + + // wait for all finalized on each. + let wait_for = ::futures::future::join_all(finality_notifications) + .map(|_| ()) + .map_err(|_| ()); + + let drive_to_completion = ::tokio::timer::Interval::new_interval(TEST_ROUTING_INTERVAL) + .for_each(move |_| { + net.lock().send_import_notifications(); + net.lock().send_finality_notifications(); + net.lock().route_fast(); + Ok(()) + }) + .map(|_| ()) + .map_err(|_| ()); + + (before_waiting)(); + + runtime + .block_on(wait_for.select(drive_to_completion).map_err(|_| ())) + .unwrap(); + + let highest_finalized = *highest_finalized.read(); + + highest_finalized } -fn run_to_completion(blocks: u64, net: Arc>, peers: &[AuthorityKeyring]) -> u64 { - run_to_completion_with(blocks, net, peers, || {}) +fn run_to_completion( + blocks: u64, + net: Arc>, + peers: &[AuthorityKeyring], +) -> u64 { + run_to_completion_with(blocks, net, peers, || {}) } #[test] fn finalize_3_voters_no_observers() { - let _ = env_logger::try_init(); - let peers = &[AuthorityKeyring::Alice, AuthorityKeyring::Bob, AuthorityKeyring::Charlie]; - let voters = make_ids(peers); - - let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); - net.peer(0).push_blocks(20, false); - net.sync(); - - for i in 0..3 { - assert_eq!(net.peer(i).client().info().unwrap().chain.best_number, 20, - "Peer #{} failed to sync", i); - } - - let net = Arc::new(Mutex::new(net)); - run_to_completion(20, net.clone(), peers); - - // normally there's no justification for finalized blocks - assert!(net.lock().peer(0).client().backend().blockchain().justification(BlockId::Number(20)).unwrap().is_none(), - "Extra justification for block#1"); + let _ = env_logger::try_init(); + let peers = &[ + AuthorityKeyring::Alice, + AuthorityKeyring::Bob, + AuthorityKeyring::Charlie, + ]; + let voters = make_ids(peers); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); + net.peer(0).push_blocks(20, false); + net.sync(); + + for i in 0..3 { + assert_eq!( + net.peer(i).client().info().unwrap().chain.best_number, + 20, + "Peer #{} failed to sync", + i + ); + } + + let net = Arc::new(Mutex::new(net)); + run_to_completion(20, net.clone(), peers); + + // normally there's no justification for finalized blocks + assert!( + net.lock() + .peer(0) + .client() + .backend() + .blockchain() + .justification(BlockId::Number(20)) + .unwrap() + .is_none(), + "Extra justification for block#1" + ); } #[test] fn finalize_3_voters_1_observer() { - let peers = &[AuthorityKeyring::Alice, AuthorityKeyring::Bob, AuthorityKeyring::Charlie]; - let voters = make_ids(peers); - - let mut net = GrandpaTestNet::new(TestApi::new(voters), 4); - net.peer(0).push_blocks(20, false); - net.sync(); - - let net = Arc::new(Mutex::new(net)); - let mut finality_notifications = Vec::new(); - - let mut runtime = current_thread::Runtime::new().unwrap(); - let all_peers = peers.iter() - .cloned() - .map(|key| Some(Arc::new(key.into()))) - .chain(::std::iter::once(None)); - - for (peer_id, local_key) in all_peers.enumerate() { - let (client, link) = { - let net = net.lock(); - let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[peer_id].client().clone(), - link, - ) - }; - finality_notifications.push( - client.finality_notification_stream() - .take_while(|n| Ok(n.header.number() < &20)) - .for_each(move |_| Ok(())) - ); - let voter = run_grandpa( - Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - local_key, - name: Some(format!("peer#{}", peer_id)), - }, - link, - MessageRouting::new(net.clone(), peer_id), - InherentDataProviders::new(), - futures::empty(), - ).expect("all in order with client and network"); - - runtime.spawn(voter); - } - - // wait for all finalized on each. - let wait_for = ::futures::future::join_all(finality_notifications) - .map(|_| ()) - .map_err(|_| ()); - - let drive_to_completion = ::tokio::timer::Interval::new_interval(TEST_ROUTING_INTERVAL) - .for_each(move |_| { net.lock().route_fast(); Ok(()) }) - .map(|_| ()) - .map_err(|_| ()); - - runtime.block_on(wait_for.select(drive_to_completion).map_err(|_| ())).unwrap(); + let peers = &[ + AuthorityKeyring::Alice, + AuthorityKeyring::Bob, + AuthorityKeyring::Charlie, + ]; + let voters = make_ids(peers); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 4); + net.peer(0).push_blocks(20, false); + net.sync(); + + let net = Arc::new(Mutex::new(net)); + let mut finality_notifications = Vec::new(); + + let mut runtime = current_thread::Runtime::new().unwrap(); + let all_peers = peers + .iter() + .cloned() + .map(|key| Some(Arc::new(key.into()))) + .chain(::std::iter::once(None)); + + for (peer_id, local_key) in all_peers.enumerate() { + let (client, link) = { + let net = net.lock(); + let link = net.peers[peer_id] + .data + .lock() + .take() + .expect("link initialized at startup; qed"); + (net.peers[peer_id].client().clone(), link) + }; + finality_notifications.push( + client + .finality_notification_stream() + .take_while(|n| Ok(n.header.number() < &20)) + .for_each(move |_| Ok(())), + ); + let voter = run_grandpa( + Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + local_key, + name: Some(format!("peer#{}", peer_id)), + }, + link, + MessageRouting::new(net.clone(), peer_id), + InherentDataProviders::new(), + futures::empty(), + ) + .expect("all in order with client and network"); + + runtime.spawn(voter); + } + + // wait for all finalized on each. + let wait_for = ::futures::future::join_all(finality_notifications) + .map(|_| ()) + .map_err(|_| ()); + + let drive_to_completion = ::tokio::timer::Interval::new_interval(TEST_ROUTING_INTERVAL) + .for_each(move |_| { + net.lock().route_fast(); + Ok(()) + }) + .map(|_| ()) + .map_err(|_| ()); + + runtime + .block_on(wait_for.select(drive_to_completion).map_err(|_| ())) + .unwrap(); } #[test] fn transition_3_voters_twice_1_observer() { - let _ = env_logger::try_init(); - let peers_a = &[ - AuthorityKeyring::Alice, - AuthorityKeyring::Bob, - AuthorityKeyring::Charlie, - ]; - - let peers_b = &[ - AuthorityKeyring::Dave, - AuthorityKeyring::Eve, - AuthorityKeyring::Ferdie, - ]; - - let peers_c = &[ - AuthorityKeyring::Alice, - AuthorityKeyring::Eve, - AuthorityKeyring::Two, - ]; - - let observer = &[AuthorityKeyring::One]; - - let genesis_voters = make_ids(peers_a); - - let api = TestApi::new(genesis_voters); - let transitions = api.scheduled_changes.clone(); - let net = Arc::new(Mutex::new(GrandpaTestNet::new(api, 8))); - - let mut runtime = current_thread::Runtime::new().unwrap(); - - net.lock().peer(0).push_blocks(1, false); - net.lock().sync(); - - for (i, peer) in net.lock().peers().iter().enumerate() { - assert_eq!(peer.client().info().unwrap().chain.best_number, 1, - "Peer #{} failed to sync", i); - - let set: AuthoritySet = crate::aux_schema::load_authorities( - &**peer.client().backend() - ).unwrap(); - - assert_eq!(set.current(), (0, make_ids(peers_a).as_slice())); - assert_eq!(set.pending_changes().count(), 0); - } - - { - let net = net.clone(); - let client = net.lock().peers[0].client().clone(); - let transitions = transitions.clone(); - let add_transition = move |parent_hash, change| { - transitions.lock().insert(parent_hash, change); - }; - let peers_c = peers_c.clone(); - - // wait for blocks to be finalized before generating new ones - let block_production = client.finality_notification_stream() - .take_while(|n| Ok(n.header.number() < &30)) - .for_each(move |n| { - match n.header.number() { - 1 => { - // first 14 blocks. - net.lock().peer(0).push_blocks(13, false); - }, - 14 => { - // generate transition at block 15, applied at 20. - net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |builder| { - let block = builder.bake().unwrap(); - add_transition(*block.header.parent_hash(), ScheduledChange { - next_authorities: make_ids(peers_b), - delay: 4, - }); - - block - }); - net.lock().peer(0).push_blocks(5, false); - }, - 20 => { - // at block 21 we do another transition, but this time instant. - // add more until we have 30. - net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |builder| { - let block = builder.bake().unwrap(); - add_transition(*block.header.parent_hash(), ScheduledChange { - next_authorities: make_ids(&peers_c), - delay: 0, - }); - - block - }); - net.lock().peer(0).push_blocks(9, false); - }, - _ => {}, - } - - Ok(()) - }); - - runtime.spawn(block_production); - } - - let mut finality_notifications = Vec::new(); - let all_peers = peers_a.iter() - .chain(peers_b) - .chain(peers_c) - .chain(observer) - .cloned() - .collect::>() // deduplicate - .into_iter() - .map(|key| Some(Arc::new(key.into()))) - .enumerate(); - - for (peer_id, local_key) in all_peers { - let (client, link) = { - let net = net.lock(); - let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[peer_id].client().clone(), - link, - ) - }; - finality_notifications.push( - client.finality_notification_stream() - .take_while(|n| Ok(n.header.number() < &30)) - .for_each(move |_| Ok(())) - .map(move |()| { - let set: AuthoritySet = crate::aux_schema::load_authorities( - &**client.backend() - ).unwrap(); - - assert_eq!(set.current(), (2, make_ids(peers_c).as_slice())); - assert_eq!(set.pending_changes().count(), 0); - }) - ); - let voter = run_grandpa( - Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - local_key, - name: Some(format!("peer#{}", peer_id)), - }, - link, - MessageRouting::new(net.clone(), peer_id), - InherentDataProviders::new(), - futures::empty(), - ).expect("all in order with client and network"); - - runtime.spawn(voter); - } - - // wait for all finalized on each. - let wait_for = ::futures::future::join_all(finality_notifications) - .map(|_| ()) - .map_err(|_| ()); - - let drive_to_completion = ::tokio::timer::Interval::new_interval(TEST_ROUTING_INTERVAL) - .for_each(move |_| { - net.lock().send_import_notifications(); - net.lock().send_finality_notifications(); - net.lock().route_fast(); - Ok(()) - }) - .map(|_| ()) - .map_err(|_| ()); - - runtime.block_on(wait_for.select(drive_to_completion).map_err(|_| ())).unwrap(); + let _ = env_logger::try_init(); + let peers_a = &[ + AuthorityKeyring::Alice, + AuthorityKeyring::Bob, + AuthorityKeyring::Charlie, + ]; + + let peers_b = &[ + AuthorityKeyring::Dave, + AuthorityKeyring::Eve, + AuthorityKeyring::Ferdie, + ]; + + let peers_c = &[ + AuthorityKeyring::Alice, + AuthorityKeyring::Eve, + AuthorityKeyring::Two, + ]; + + let observer = &[AuthorityKeyring::One]; + + let genesis_voters = make_ids(peers_a); + + let api = TestApi::new(genesis_voters); + let transitions = api.scheduled_changes.clone(); + let net = Arc::new(Mutex::new(GrandpaTestNet::new(api, 8))); + + let mut runtime = current_thread::Runtime::new().unwrap(); + + net.lock().peer(0).push_blocks(1, false); + net.lock().sync(); + + for (i, peer) in net.lock().peers().iter().enumerate() { + assert_eq!( + peer.client().info().unwrap().chain.best_number, + 1, + "Peer #{} failed to sync", + i + ); + + let set: AuthoritySet = + crate::aux_schema::load_authorities(&**peer.client().backend()).unwrap(); + + assert_eq!(set.current(), (0, make_ids(peers_a).as_slice())); + assert_eq!(set.pending_changes().count(), 0); + } + + { + let net = net.clone(); + let client = net.lock().peers[0].client().clone(); + let transitions = transitions.clone(); + let add_transition = move |parent_hash, change| { + transitions.lock().insert(parent_hash, change); + }; + let peers_c = peers_c.clone(); + + // wait for blocks to be finalized before generating new ones + let block_production = client + .finality_notification_stream() + .take_while(|n| Ok(n.header.number() < &30)) + .for_each(move |n| { + match n.header.number() { + 1 => { + // first 14 blocks. + net.lock().peer(0).push_blocks(13, false); + } + 14 => { + // generate transition at block 15, applied at 20. + net.lock() + .peer(0) + .generate_blocks(1, BlockOrigin::File, |builder| { + let block = builder.bake().unwrap(); + add_transition( + *block.header.parent_hash(), + ScheduledChange { + next_authorities: make_ids(peers_b), + delay: 4, + }, + ); + + block + }); + net.lock().peer(0).push_blocks(5, false); + } + 20 => { + // at block 21 we do another transition, but this time instant. + // add more until we have 30. + net.lock() + .peer(0) + .generate_blocks(1, BlockOrigin::File, |builder| { + let block = builder.bake().unwrap(); + add_transition( + *block.header.parent_hash(), + ScheduledChange { + next_authorities: make_ids(&peers_c), + delay: 0, + }, + ); + + block + }); + net.lock().peer(0).push_blocks(9, false); + } + _ => {} + } + + Ok(()) + }); + + runtime.spawn(block_production); + } + + let mut finality_notifications = Vec::new(); + let all_peers = peers_a + .iter() + .chain(peers_b) + .chain(peers_c) + .chain(observer) + .cloned() + .collect::>() // deduplicate + .into_iter() + .map(|key| Some(Arc::new(key.into()))) + .enumerate(); + + for (peer_id, local_key) in all_peers { + let (client, link) = { + let net = net.lock(); + let link = net.peers[peer_id] + .data + .lock() + .take() + .expect("link initialized at startup; qed"); + (net.peers[peer_id].client().clone(), link) + }; + finality_notifications.push( + client + .finality_notification_stream() + .take_while(|n| Ok(n.header.number() < &30)) + .for_each(move |_| Ok(())) + .map(move |()| { + let set: AuthoritySet = + crate::aux_schema::load_authorities(&**client.backend()).unwrap(); + + assert_eq!(set.current(), (2, make_ids(peers_c).as_slice())); + assert_eq!(set.pending_changes().count(), 0); + }), + ); + let voter = run_grandpa( + Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + local_key, + name: Some(format!("peer#{}", peer_id)), + }, + link, + MessageRouting::new(net.clone(), peer_id), + InherentDataProviders::new(), + futures::empty(), + ) + .expect("all in order with client and network"); + + runtime.spawn(voter); + } + + // wait for all finalized on each. + let wait_for = ::futures::future::join_all(finality_notifications) + .map(|_| ()) + .map_err(|_| ()); + + let drive_to_completion = ::tokio::timer::Interval::new_interval(TEST_ROUTING_INTERVAL) + .for_each(move |_| { + net.lock().send_import_notifications(); + net.lock().send_finality_notifications(); + net.lock().route_fast(); + Ok(()) + }) + .map(|_| ()) + .map_err(|_| ()); + + runtime + .block_on(wait_for.select(drive_to_completion).map_err(|_| ())) + .unwrap(); } #[test] fn justification_is_emitted_when_consensus_data_changes() { - let peers = &[AuthorityKeyring::Alice, AuthorityKeyring::Bob, AuthorityKeyring::Charlie]; - let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 3); - - // import block#1 WITH consensus data change - let new_authorities = vec![AuthorityId::from_raw([42; 32])]; - net.peer(0).push_authorities_change_block(new_authorities); - net.sync(); - let net = Arc::new(Mutex::new(net)); - run_to_completion(1, net.clone(), peers); - - // ... and check that there's no justification for block#1 - assert!(net.lock().peer(0).client().backend().blockchain().justification(BlockId::Number(1)).unwrap().is_some(), - "Missing justification for block#1"); + let peers = &[ + AuthorityKeyring::Alice, + AuthorityKeyring::Bob, + AuthorityKeyring::Charlie, + ]; + let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 3); + + // import block#1 WITH consensus data change + let new_authorities = vec![AuthorityId::from_raw([42; 32])]; + net.peer(0).push_authorities_change_block(new_authorities); + net.sync(); + let net = Arc::new(Mutex::new(net)); + run_to_completion(1, net.clone(), peers); + + // ... and check that there's no justification for block#1 + assert!( + net.lock() + .peer(0) + .client() + .backend() + .blockchain() + .justification(BlockId::Number(1)) + .unwrap() + .is_some(), + "Missing justification for block#1" + ); } #[test] fn justification_is_generated_periodically() { - let peers = &[AuthorityKeyring::Alice, AuthorityKeyring::Bob, AuthorityKeyring::Charlie]; - let voters = make_ids(peers); - - let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); - net.peer(0).push_blocks(32, false); - net.sync(); - - let net = Arc::new(Mutex::new(net)); - run_to_completion(32, net.clone(), peers); - - // when block#32 (justification_period) is finalized, justification - // is required => generated - for i in 0..3 { - assert!(net.lock().peer(i).client().backend().blockchain() - .justification(BlockId::Number(32)).unwrap().is_some()); - } + let peers = &[ + AuthorityKeyring::Alice, + AuthorityKeyring::Bob, + AuthorityKeyring::Charlie, + ]; + let voters = make_ids(peers); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); + net.peer(0).push_blocks(32, false); + net.sync(); + + let net = Arc::new(Mutex::new(net)); + run_to_completion(32, net.clone(), peers); + + // when block#32 (justification_period) is finalized, justification + // is required => generated + for i in 0..3 { + assert!(net + .lock() + .peer(i) + .client() + .backend() + .blockchain() + .justification(BlockId::Number(32)) + .unwrap() + .is_some()); + } } #[test] fn consensus_changes_works() { - let mut changes = ConsensusChanges::::empty(); - - // pending changes are not finalized - changes.note_change((10, H256::from_low_u64_be(1))); - assert_eq!(changes.finalize((5, H256::from_low_u64_be(5)), |_| Ok(None)).unwrap(), (false, false)); - - // no change is selected from competing pending changes - changes.note_change((1, H256::from_low_u64_be(1))); - changes.note_change((1, H256::from_low_u64_be(101))); - assert_eq!(changes.finalize((10, H256::from_low_u64_be(10)), |_| Ok(Some(H256::from_low_u64_be(1001)))).unwrap(), (true, false)); - - // change is selected from competing pending changes - changes.note_change((1, H256::from_low_u64_be(1))); - changes.note_change((1, H256::from_low_u64_be(101))); - assert_eq!(changes.finalize((10, H256::from_low_u64_be(10)), |_| Ok(Some(H256::from_low_u64_be(1)))).unwrap(), (true, true)); + let mut changes = ConsensusChanges::::empty(); + + // pending changes are not finalized + changes.note_change((10, H256::from_low_u64_be(1))); + assert_eq!( + changes + .finalize((5, H256::from_low_u64_be(5)), |_| Ok(None)) + .unwrap(), + (false, false) + ); + + // no change is selected from competing pending changes + changes.note_change((1, H256::from_low_u64_be(1))); + changes.note_change((1, H256::from_low_u64_be(101))); + assert_eq!( + changes + .finalize((10, H256::from_low_u64_be(10)), |_| Ok(Some( + H256::from_low_u64_be(1001) + ))) + .unwrap(), + (true, false) + ); + + // change is selected from competing pending changes + changes.note_change((1, H256::from_low_u64_be(1))); + changes.note_change((1, H256::from_low_u64_be(101))); + assert_eq!( + changes + .finalize((10, H256::from_low_u64_be(10)), |_| Ok(Some( + H256::from_low_u64_be(1) + ))) + .unwrap(), + (true, true) + ); } #[test] fn sync_justifications_on_change_blocks() { - let peers_a = &[AuthorityKeyring::Alice, AuthorityKeyring::Bob, AuthorityKeyring::Charlie]; - let peers_b = &[AuthorityKeyring::Alice, AuthorityKeyring::Bob]; - let voters = make_ids(peers_b); - - // 4 peers, 3 of them are authorities and participate in grandpa - let api = TestApi::new(voters); - let transitions = api.scheduled_changes.clone(); - let mut net = GrandpaTestNet::new(api, 4); - - // add 20 blocks - net.peer(0).push_blocks(20, false); - - // at block 21 we do add a transition which is instant - net.peer(0).generate_blocks(1, BlockOrigin::File, |builder| { - let block = builder.bake().unwrap(); - transitions.lock().insert(*block.header.parent_hash(), ScheduledChange { - next_authorities: make_ids(peers_b), - delay: 0, - }); - block - }); - - // add more blocks on top of it (until we have 25) - net.peer(0).push_blocks(4, false); - net.sync(); - - for i in 0..4 { - assert_eq!(net.peer(i).client().info().unwrap().chain.best_number, 25, - "Peer #{} failed to sync", i); - } - - let net = Arc::new(Mutex::new(net)); - run_to_completion(25, net.clone(), peers_a); - - // the first 3 peers are grandpa voters and therefore have already finalized - // block 21 and stored a justification - for i in 0..3 { - assert!(net.lock().peer(i).client().justification(&BlockId::Number(21)).unwrap().is_some()); - } - - // the last peer should get the justification by syncing from other peers - while net.lock().peer(3).client().justification(&BlockId::Number(21)).unwrap().is_none() { - net.lock().route_fast(); - } + let peers_a = &[ + AuthorityKeyring::Alice, + AuthorityKeyring::Bob, + AuthorityKeyring::Charlie, + ]; + let peers_b = &[AuthorityKeyring::Alice, AuthorityKeyring::Bob]; + let voters = make_ids(peers_b); + + // 4 peers, 3 of them are authorities and participate in grandpa + let api = TestApi::new(voters); + let transitions = api.scheduled_changes.clone(); + let mut net = GrandpaTestNet::new(api, 4); + + // add 20 blocks + net.peer(0).push_blocks(20, false); + + // at block 21 we do add a transition which is instant + net.peer(0) + .generate_blocks(1, BlockOrigin::File, |builder| { + let block = builder.bake().unwrap(); + transitions.lock().insert( + *block.header.parent_hash(), + ScheduledChange { + next_authorities: make_ids(peers_b), + delay: 0, + }, + ); + block + }); + + // add more blocks on top of it (until we have 25) + net.peer(0).push_blocks(4, false); + net.sync(); + + for i in 0..4 { + assert_eq!( + net.peer(i).client().info().unwrap().chain.best_number, + 25, + "Peer #{} failed to sync", + i + ); + } + + let net = Arc::new(Mutex::new(net)); + run_to_completion(25, net.clone(), peers_a); + + // the first 3 peers are grandpa voters and therefore have already finalized + // block 21 and stored a justification + for i in 0..3 { + assert!(net + .lock() + .peer(i) + .client() + .justification(&BlockId::Number(21)) + .unwrap() + .is_some()); + } + + // the last peer should get the justification by syncing from other peers + while net + .lock() + .peer(3) + .client() + .justification(&BlockId::Number(21)) + .unwrap() + .is_none() + { + net.lock().route_fast(); + } } #[test] fn finalizes_multiple_pending_changes_in_order() { - let _ = env_logger::try_init(); - - let peers_a = &[AuthorityKeyring::Alice, AuthorityKeyring::Bob, AuthorityKeyring::Charlie]; - let peers_b = &[AuthorityKeyring::Dave, AuthorityKeyring::Eve, AuthorityKeyring::Ferdie]; - let peers_c = &[AuthorityKeyring::Dave, AuthorityKeyring::Alice, AuthorityKeyring::Bob]; - - let all_peers = &[ - AuthorityKeyring::Alice, AuthorityKeyring::Bob, AuthorityKeyring::Charlie, - AuthorityKeyring::Dave, AuthorityKeyring::Eve, AuthorityKeyring::Ferdie, - ]; - let genesis_voters = make_ids(peers_a); - - // 6 peers, 3 of them are authorities and participate in grandpa from genesis - let api = TestApi::new(genesis_voters); - let transitions = api.scheduled_changes.clone(); - let mut net = GrandpaTestNet::new(api, 6); - - // add 20 blocks - net.peer(0).push_blocks(20, false); - - // at block 21 we do add a transition which is instant - net.peer(0).generate_blocks(1, BlockOrigin::File, |builder| { - let block = builder.bake().unwrap(); - transitions.lock().insert(*block.header.parent_hash(), ScheduledChange { - next_authorities: make_ids(peers_b), - delay: 0, - }); - block - }); - - // add more blocks on top of it (until we have 25) - net.peer(0).push_blocks(4, false); - - // at block 26 we add another which is enacted at block 30 - net.peer(0).generate_blocks(1, BlockOrigin::File, |builder| { - let block = builder.bake().unwrap(); - transitions.lock().insert(*block.header.parent_hash(), ScheduledChange { - next_authorities: make_ids(peers_c), - delay: 4, - }); - block - }); - - // add more blocks on top of it (until we have 30) - net.peer(0).push_blocks(4, false); - - net.sync(); - - // all peers imported both change blocks - for i in 0..6 { - assert_eq!(net.peer(i).client().info().unwrap().chain.best_number, 30, - "Peer #{} failed to sync", i); - } - - let net = Arc::new(Mutex::new(net)); - run_to_completion(30, net.clone(), all_peers); + let _ = env_logger::try_init(); + + let peers_a = &[ + AuthorityKeyring::Alice, + AuthorityKeyring::Bob, + AuthorityKeyring::Charlie, + ]; + let peers_b = &[ + AuthorityKeyring::Dave, + AuthorityKeyring::Eve, + AuthorityKeyring::Ferdie, + ]; + let peers_c = &[ + AuthorityKeyring::Dave, + AuthorityKeyring::Alice, + AuthorityKeyring::Bob, + ]; + + let all_peers = &[ + AuthorityKeyring::Alice, + AuthorityKeyring::Bob, + AuthorityKeyring::Charlie, + AuthorityKeyring::Dave, + AuthorityKeyring::Eve, + AuthorityKeyring::Ferdie, + ]; + let genesis_voters = make_ids(peers_a); + + // 6 peers, 3 of them are authorities and participate in grandpa from genesis + let api = TestApi::new(genesis_voters); + let transitions = api.scheduled_changes.clone(); + let mut net = GrandpaTestNet::new(api, 6); + + // add 20 blocks + net.peer(0).push_blocks(20, false); + + // at block 21 we do add a transition which is instant + net.peer(0) + .generate_blocks(1, BlockOrigin::File, |builder| { + let block = builder.bake().unwrap(); + transitions.lock().insert( + *block.header.parent_hash(), + ScheduledChange { + next_authorities: make_ids(peers_b), + delay: 0, + }, + ); + block + }); + + // add more blocks on top of it (until we have 25) + net.peer(0).push_blocks(4, false); + + // at block 26 we add another which is enacted at block 30 + net.peer(0) + .generate_blocks(1, BlockOrigin::File, |builder| { + let block = builder.bake().unwrap(); + transitions.lock().insert( + *block.header.parent_hash(), + ScheduledChange { + next_authorities: make_ids(peers_c), + delay: 4, + }, + ); + block + }); + + // add more blocks on top of it (until we have 30) + net.peer(0).push_blocks(4, false); + + net.sync(); + + // all peers imported both change blocks + for i in 0..6 { + assert_eq!( + net.peer(i).client().info().unwrap().chain.best_number, + 30, + "Peer #{} failed to sync", + i + ); + } + + let net = Arc::new(Mutex::new(net)); + run_to_completion(30, net.clone(), all_peers); } #[test] fn doesnt_vote_on_the_tip_of_the_chain() { - let peers_a = &[AuthorityKeyring::Alice, AuthorityKeyring::Bob, AuthorityKeyring::Charlie]; - let voters = make_ids(peers_a); - let api = TestApi::new(voters); - let mut net = GrandpaTestNet::new(api, 3); - - // add 100 blocks - net.peer(0).push_blocks(100, false); - net.sync(); - - for i in 0..3 { - assert_eq!(net.peer(i).client().info().unwrap().chain.best_number, 100, - "Peer #{} failed to sync", i); - } - - let net = Arc::new(Mutex::new(net)); - let highest = run_to_completion(75, net.clone(), peers_a); - - // the highest block to be finalized will be 3/4 deep in the unfinalized chain - assert_eq!(highest, 75); + let peers_a = &[ + AuthorityKeyring::Alice, + AuthorityKeyring::Bob, + AuthorityKeyring::Charlie, + ]; + let voters = make_ids(peers_a); + let api = TestApi::new(voters); + let mut net = GrandpaTestNet::new(api, 3); + + // add 100 blocks + net.peer(0).push_blocks(100, false); + net.sync(); + + for i in 0..3 { + assert_eq!( + net.peer(i).client().info().unwrap().chain.best_number, + 100, + "Peer #{} failed to sync", + i + ); + } + + let net = Arc::new(Mutex::new(net)); + let highest = run_to_completion(75, net.clone(), peers_a); + + // the highest block to be finalized will be 3/4 deep in the unfinalized chain + assert_eq!(highest, 75); } #[test] fn force_change_to_new_set() { - // two of these guys are offline. - let genesis_authorities = &[AuthorityKeyring::Alice, AuthorityKeyring::Bob, AuthorityKeyring::Charlie, AuthorityKeyring::One, AuthorityKeyring::Two]; - let peers_a = &[AuthorityKeyring::Alice, AuthorityKeyring::Bob, AuthorityKeyring::Charlie]; - let api = TestApi::new(make_ids(genesis_authorities)); - - let voters = make_ids(peers_a); - let normal_transitions = api.scheduled_changes.clone(); - let forced_transitions = api.forced_changes.clone(); - let net = GrandpaTestNet::new(api, 3); - let net = Arc::new(Mutex::new(net)); - - let runner_net = net.clone(); - let add_blocks = move || { - net.lock().peer(0).push_blocks(1, false); - - { - // add a forced transition at block 12. - let parent_hash = net.lock().peer(0).client().info().unwrap().chain.best_hash; - forced_transitions.lock().insert(parent_hash, (0, ScheduledChange { - next_authorities: voters.clone(), - delay: 10, - })); - - // add a normal transition too to ensure that forced changes take priority. - normal_transitions.lock().insert(parent_hash, ScheduledChange { - next_authorities: make_ids(genesis_authorities), - delay: 5, - }); - } - - net.lock().peer(0).push_blocks(25, false); - net.lock().sync(); - - for (i, peer) in net.lock().peers().iter().enumerate() { - assert_eq!(peer.client().info().unwrap().chain.best_number, 26, - "Peer #{} failed to sync", i); - - let set: AuthoritySet = crate::aux_schema::load_authorities( - &**peer.client().backend() - ).unwrap(); - - assert_eq!(set.current(), (1, voters.as_slice())); - assert_eq!(set.pending_changes().count(), 0); - } - }; - - // it will only finalize if the forced transition happens. - // we add_blocks after the voters are spawned because otherwise - // the link-halfs have the wrong AuthoritySet - run_to_completion_with(25, runner_net, peers_a, add_blocks); + // two of these guys are offline. + let genesis_authorities = &[ + AuthorityKeyring::Alice, + AuthorityKeyring::Bob, + AuthorityKeyring::Charlie, + AuthorityKeyring::One, + AuthorityKeyring::Two, + ]; + let peers_a = &[ + AuthorityKeyring::Alice, + AuthorityKeyring::Bob, + AuthorityKeyring::Charlie, + ]; + let api = TestApi::new(make_ids(genesis_authorities)); + + let voters = make_ids(peers_a); + let normal_transitions = api.scheduled_changes.clone(); + let forced_transitions = api.forced_changes.clone(); + let net = GrandpaTestNet::new(api, 3); + let net = Arc::new(Mutex::new(net)); + + let runner_net = net.clone(); + let add_blocks = move || { + net.lock().peer(0).push_blocks(1, false); + + { + // add a forced transition at block 12. + let parent_hash = net.lock().peer(0).client().info().unwrap().chain.best_hash; + forced_transitions.lock().insert( + parent_hash, + ( + 0, + ScheduledChange { + next_authorities: voters.clone(), + delay: 10, + }, + ), + ); + + // add a normal transition too to ensure that forced changes take priority. + normal_transitions.lock().insert( + parent_hash, + ScheduledChange { + next_authorities: make_ids(genesis_authorities), + delay: 5, + }, + ); + } + + net.lock().peer(0).push_blocks(25, false); + net.lock().sync(); + + for (i, peer) in net.lock().peers().iter().enumerate() { + assert_eq!( + peer.client().info().unwrap().chain.best_number, + 26, + "Peer #{} failed to sync", + i + ); + + let set: AuthoritySet = + crate::aux_schema::load_authorities(&**peer.client().backend()).unwrap(); + + assert_eq!(set.current(), (1, voters.as_slice())); + assert_eq!(set.pending_changes().count(), 0); + } + }; + + // it will only finalize if the forced transition happens. + // we add_blocks after the voters are spawned because otherwise + // the link-halfs have the wrong AuthoritySet + run_to_completion_with(25, runner_net, peers_a, add_blocks); } #[test] fn allows_reimporting_change_blocks() { - let peers_a = &[AuthorityKeyring::Alice, AuthorityKeyring::Bob, AuthorityKeyring::Charlie]; - let peers_b = &[AuthorityKeyring::Alice, AuthorityKeyring::Bob]; - let voters = make_ids(peers_a); - let api = TestApi::new(voters); - let net = GrandpaTestNet::new(api.clone(), 3); - - let client = net.peer(0).client().clone(); - let (block_import, ..) = net.make_block_import(client.clone()); - - let builder = client.new_block_at(&BlockId::Number(0)).unwrap(); - let block = builder.bake().unwrap(); - api.scheduled_changes.lock().insert(*block.header.parent_hash(), ScheduledChange { - next_authorities: make_ids(peers_b), - delay: 0, - }); - - let block = || { - let block = block.clone(); - ImportBlock { - origin: BlockOrigin::File, - header: block.header, - justification: None, - post_digests: Vec::new(), - body: Some(block.extrinsics), - finalized: false, - auxiliary: Vec::new(), - fork_choice: ForkChoiceStrategy::LongestChain, - } - }; - - assert_eq!( - block_import.import_block(block(), HashMap::new()).unwrap(), - ImportResult::Imported(ImportedAux { needs_justification: true, clear_justification_requests: false, bad_justification: false }), - ); - - assert_eq!( - block_import.import_block(block(), HashMap::new()).unwrap(), - ImportResult::AlreadyInChain - ); + let peers_a = &[ + AuthorityKeyring::Alice, + AuthorityKeyring::Bob, + AuthorityKeyring::Charlie, + ]; + let peers_b = &[AuthorityKeyring::Alice, AuthorityKeyring::Bob]; + let voters = make_ids(peers_a); + let api = TestApi::new(voters); + let net = GrandpaTestNet::new(api.clone(), 3); + + let client = net.peer(0).client().clone(); + let (block_import, ..) = net.make_block_import(client.clone()); + + let builder = client.new_block_at(&BlockId::Number(0)).unwrap(); + let block = builder.bake().unwrap(); + api.scheduled_changes.lock().insert( + *block.header.parent_hash(), + ScheduledChange { + next_authorities: make_ids(peers_b), + delay: 0, + }, + ); + + let block = || { + let block = block.clone(); + ImportBlock { + origin: BlockOrigin::File, + header: block.header, + justification: None, + post_digests: Vec::new(), + body: Some(block.extrinsics), + finalized: false, + auxiliary: Vec::new(), + fork_choice: ForkChoiceStrategy::LongestChain, + } + }; + + assert_eq!( + block_import.import_block(block(), HashMap::new()).unwrap(), + ImportResult::Imported(ImportedAux { + needs_justification: true, + clear_justification_requests: false, + bad_justification: false + }), + ); + + assert_eq!( + block_import.import_block(block(), HashMap::new()).unwrap(), + ImportResult::AlreadyInChain + ); } #[test] fn test_bad_justification() { - let peers_a = &[AuthorityKeyring::Alice, AuthorityKeyring::Bob, AuthorityKeyring::Charlie]; - let peers_b = &[AuthorityKeyring::Alice, AuthorityKeyring::Bob]; - let voters = make_ids(peers_a); - let api = TestApi::new(voters); - let net = GrandpaTestNet::new(api.clone(), 3); - - let client = net.peer(0).client().clone(); - let (block_import, ..) = net.make_block_import(client.clone()); - - let builder = client.new_block_at(&BlockId::Number(0)).unwrap(); - let block = builder.bake().unwrap(); - api.scheduled_changes.lock().insert(*block.header.parent_hash(), ScheduledChange { - next_authorities: make_ids(peers_b), - delay: 0, - }); - - let block = || { - let block = block.clone(); - ImportBlock { - origin: BlockOrigin::File, - header: block.header, - justification: Some(Vec::new()), - post_digests: Vec::new(), - body: Some(block.extrinsics), - finalized: false, - auxiliary: Vec::new(), - fork_choice: ForkChoiceStrategy::LongestChain, - } - }; - - assert_eq!( - block_import.import_block(block(), HashMap::new()).unwrap(), - ImportResult::Imported(ImportedAux { needs_justification: true, clear_justification_requests: false, bad_justification: true }), - ); - - assert_eq!( - block_import.import_block(block(), HashMap::new()).unwrap(), - ImportResult::AlreadyInChain - ); + let peers_a = &[ + AuthorityKeyring::Alice, + AuthorityKeyring::Bob, + AuthorityKeyring::Charlie, + ]; + let peers_b = &[AuthorityKeyring::Alice, AuthorityKeyring::Bob]; + let voters = make_ids(peers_a); + let api = TestApi::new(voters); + let net = GrandpaTestNet::new(api.clone(), 3); + + let client = net.peer(0).client().clone(); + let (block_import, ..) = net.make_block_import(client.clone()); + + let builder = client.new_block_at(&BlockId::Number(0)).unwrap(); + let block = builder.bake().unwrap(); + api.scheduled_changes.lock().insert( + *block.header.parent_hash(), + ScheduledChange { + next_authorities: make_ids(peers_b), + delay: 0, + }, + ); + + let block = || { + let block = block.clone(); + ImportBlock { + origin: BlockOrigin::File, + header: block.header, + justification: Some(Vec::new()), + post_digests: Vec::new(), + body: Some(block.extrinsics), + finalized: false, + auxiliary: Vec::new(), + fork_choice: ForkChoiceStrategy::LongestChain, + } + }; + + assert_eq!( + block_import.import_block(block(), HashMap::new()).unwrap(), + ImportResult::Imported(ImportedAux { + needs_justification: true, + clear_justification_requests: false, + bad_justification: true + }), + ); + + assert_eq!( + block_import.import_block(block(), HashMap::new()).unwrap(), + ImportResult::AlreadyInChain + ); } diff --git a/core/finality-grandpa/src/until_imported.rs b/core/finality-grandpa/src/until_imported.rs index 4b867c18c8..bf160c92ff 100644 --- a/core/finality-grandpa/src/until_imported.rs +++ b/core/finality-grandpa/src/until_imported.rs @@ -20,238 +20,250 @@ //! //! This is used for votes and commit messages currently. -use super::{BlockStatus, Error, SignedMessage, CompactCommit}; +use super::{BlockStatus, CompactCommit, Error, SignedMessage}; -use log::{debug, warn}; use client::ImportNotifications; use futures::prelude::*; use futures::stream::Fuse; +use log::{debug, warn}; use parking_lot::Mutex; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use substrate_primitives::ed25519::Public as AuthorityId; use tokio::timer::Interval; use std::collections::{HashMap, VecDeque}; -use std::sync::{atomic::{AtomicUsize, Ordering}, Arc}; +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, +}; use std::time::{Duration, Instant}; const LOG_PENDING_INTERVAL: Duration = Duration::from_secs(15); // something which will block until imported. pub(crate) trait BlockUntilImported: Sized { - // the type that is blocked on. - type Blocked; - - /// new incoming item. For all internal items, - /// check if they require to be waited for. - /// if so, call the `Wait` closure. - /// if they are ready, call the `Ready` closure. - fn schedule_wait( - input: Self::Blocked, - status_check: &S, - wait: Wait, - ready: Ready, - ) -> Result<(), Error> where - S: BlockStatus, - Wait: FnMut(Block::Hash, Self), - Ready: FnMut(Self::Blocked); - - /// called when the wait has completed. The canonical number is passed through - /// for further checks. - fn wait_completed(self, canon_number: NumberFor) -> Option; + // the type that is blocked on. + type Blocked; + + /// new incoming item. For all internal items, + /// check if they require to be waited for. + /// if so, call the `Wait` closure. + /// if they are ready, call the `Ready` closure. + fn schedule_wait( + input: Self::Blocked, + status_check: &S, + wait: Wait, + ready: Ready, + ) -> Result<(), Error> + where + S: BlockStatus, + Wait: FnMut(Block::Hash, Self), + Ready: FnMut(Self::Blocked); + + /// called when the wait has completed. The canonical number is passed through + /// for further checks. + fn wait_completed(self, canon_number: NumberFor) -> Option; } /// Buffering imported messages until blocks with given hashes are imported. pub(crate) struct UntilImported> { - import_notifications: Fuse>, - status_check: Status, - inner: Fuse, - ready: VecDeque, - check_pending: Interval, - pending: HashMap)>, + import_notifications: Fuse>, + status_check: Status, + inner: Fuse, + ready: VecDeque, + check_pending: Interval, + pending: HashMap)>, } impl UntilImported - where Status: BlockStatus, M: BlockUntilImported +where + Status: BlockStatus, + M: BlockUntilImported, { - /// Create a new `UntilImported` wrapper. - pub(crate) fn new( - import_notifications: ImportNotifications, - status_check: Status, - stream: I, - ) -> Self { - // how often to check if pending messages that are waiting for blocks to be - // imported can be checked. - // - // the import notifications interval takes care of most of this; this is - // used in the event of missed import notifications - const CHECK_PENDING_INTERVAL: Duration = Duration::from_secs(5); - let now = Instant::now(); - - let check_pending = Interval::new(now + CHECK_PENDING_INTERVAL, CHECK_PENDING_INTERVAL); - UntilImported { - import_notifications: import_notifications.fuse(), - status_check, - inner: stream.fuse(), - ready: VecDeque::new(), - check_pending, - pending: HashMap::new(), - } - } + /// Create a new `UntilImported` wrapper. + pub(crate) fn new( + import_notifications: ImportNotifications, + status_check: Status, + stream: I, + ) -> Self { + // how often to check if pending messages that are waiting for blocks to be + // imported can be checked. + // + // the import notifications interval takes care of most of this; this is + // used in the event of missed import notifications + const CHECK_PENDING_INTERVAL: Duration = Duration::from_secs(5); + let now = Instant::now(); + + let check_pending = Interval::new(now + CHECK_PENDING_INTERVAL, CHECK_PENDING_INTERVAL); + UntilImported { + import_notifications: import_notifications.fuse(), + status_check, + inner: stream.fuse(), + ready: VecDeque::new(), + check_pending, + pending: HashMap::new(), + } + } } -impl Stream for UntilImported where - Status: BlockStatus, - I: Stream, - M: BlockUntilImported, +impl Stream for UntilImported +where + Status: BlockStatus, + I: Stream, + M: BlockUntilImported, { - type Item = M::Blocked; - type Error = Error; - - fn poll(&mut self) -> Poll, Error> { - loop { - match self.inner.poll()? { - Async::Ready(None) => return Ok(Async::Ready(None)), - Async::Ready(Some(input)) => { - // new input: schedule wait of any parts which require - // blocks to be known. - let ready = &mut self.ready; - let pending = &mut self.pending; - M::schedule_wait( - input, - &self.status_check, - |target_hash, wait| pending - .entry(target_hash) - .or_insert_with(|| (Instant::now(), Vec::new())) - .1 - .push(wait), - |ready_item| ready.push_back(ready_item), - )?; - } - Async::NotReady => break, - } - } - - loop { - match self.import_notifications.poll() { - Err(_) => return Err(Error::Network(format!("Failed to get new message"))), - Ok(Async::Ready(None)) => return Ok(Async::Ready(None)), - Ok(Async::Ready(Some(notification))) => { - // new block imported. queue up all messages tied to that hash. - if let Some((_, messages)) = self.pending.remove(¬ification.hash) { - let canon_number = notification.header.number().clone(); - let ready_messages = messages.into_iter() - .filter_map(|m| m.wait_completed(canon_number)); - - self.ready.extend(ready_messages); - } - } - Ok(Async::NotReady) => break, - } - } - - let mut update_interval = false; - while let Async::Ready(Some(_)) = self.check_pending.poll().map_err(Error::Timer)? { - update_interval = true; - } - - if update_interval { - let mut known_keys = Vec::new(); - for (&block_hash, &mut (ref mut last_log, ref v)) in &mut self.pending { - if let Some(number) = self.status_check.block_number(block_hash)? { - known_keys.push((block_hash, number)); - } else { - let next_log = *last_log + LOG_PENDING_INTERVAL; - if Instant::now() <= next_log { - debug!( - target: "afg", - "Waiting to import block {} before {} votes can be imported. \ - Possible fork?", - block_hash, - v.len(), - ); - - *last_log = next_log; - } - } - } - - for (known_hash, canon_number) in known_keys { - if let Some((_, pending_messages)) = self.pending.remove(&known_hash) { - let ready_messages = pending_messages.into_iter() - .filter_map(|m| m.wait_completed(canon_number)); - - self.ready.extend(ready_messages); - } - } - } - - if let Some(ready) = self.ready.pop_front() { - return Ok(Async::Ready(Some(ready))) - } - - if self.import_notifications.is_done() && self.inner.is_done() { - Ok(Async::Ready(None)) - } else { - - Ok(Async::NotReady) - } - } + type Item = M::Blocked; + type Error = Error; + + fn poll(&mut self) -> Poll, Error> { + loop { + match self.inner.poll()? { + Async::Ready(None) => return Ok(Async::Ready(None)), + Async::Ready(Some(input)) => { + // new input: schedule wait of any parts which require + // blocks to be known. + let ready = &mut self.ready; + let pending = &mut self.pending; + M::schedule_wait( + input, + &self.status_check, + |target_hash, wait| { + pending + .entry(target_hash) + .or_insert_with(|| (Instant::now(), Vec::new())) + .1 + .push(wait) + }, + |ready_item| ready.push_back(ready_item), + )?; + } + Async::NotReady => break, + } + } + + loop { + match self.import_notifications.poll() { + Err(_) => return Err(Error::Network(format!("Failed to get new message"))), + Ok(Async::Ready(None)) => return Ok(Async::Ready(None)), + Ok(Async::Ready(Some(notification))) => { + // new block imported. queue up all messages tied to that hash. + if let Some((_, messages)) = self.pending.remove(¬ification.hash) { + let canon_number = notification.header.number().clone(); + let ready_messages = messages + .into_iter() + .filter_map(|m| m.wait_completed(canon_number)); + + self.ready.extend(ready_messages); + } + } + Ok(Async::NotReady) => break, + } + } + + let mut update_interval = false; + while let Async::Ready(Some(_)) = self.check_pending.poll().map_err(Error::Timer)? { + update_interval = true; + } + + if update_interval { + let mut known_keys = Vec::new(); + for (&block_hash, &mut (ref mut last_log, ref v)) in &mut self.pending { + if let Some(number) = self.status_check.block_number(block_hash)? { + known_keys.push((block_hash, number)); + } else { + let next_log = *last_log + LOG_PENDING_INTERVAL; + if Instant::now() <= next_log { + debug!( + target: "afg", + "Waiting to import block {} before {} votes can be imported. \ + Possible fork?", + block_hash, + v.len(), + ); + + *last_log = next_log; + } + } + } + + for (known_hash, canon_number) in known_keys { + if let Some((_, pending_messages)) = self.pending.remove(&known_hash) { + let ready_messages = pending_messages + .into_iter() + .filter_map(|m| m.wait_completed(canon_number)); + + self.ready.extend(ready_messages); + } + } + } + + if let Some(ready) = self.ready.pop_front() { + return Ok(Async::Ready(Some(ready))); + } + + if self.import_notifications.is_done() && self.inner.is_done() { + Ok(Async::Ready(None)) + } else { + Ok(Async::NotReady) + } + } } fn warn_authority_wrong_target(hash: H, id: AuthorityId) { - warn!( - target: "afg", - "Authority {:?} signed GRANDPA message with \ - wrong block number for hash {}", - id, - hash, - ); + warn!( + target: "afg", + "Authority {:?} signed GRANDPA message with \ + wrong block number for hash {}", + id, + hash, + ); } impl BlockUntilImported for SignedMessage { - type Blocked = Self; - - fn schedule_wait( - msg: Self::Blocked, - status_check: &S, - mut wait: Wait, - mut ready: Ready, - ) -> Result<(), Error> where - S: BlockStatus, - Wait: FnMut(Block::Hash, Self), - Ready: FnMut(Self::Blocked), - { - let (&target_hash, target_number) = msg.target(); - - if let Some(number) = status_check.block_number(target_hash)? { - if number != target_number { - warn_authority_wrong_target(target_hash, msg.id); - } else { - ready(msg); - } - } else { - wait(target_hash, msg) - } - - Ok(()) - } - - fn wait_completed(self, canon_number: NumberFor) -> Option { - let (&target_hash, target_number) = self.target(); - if canon_number != target_number { - warn_authority_wrong_target(target_hash, self.id); - - None - } else { - Some(self) - } - } + type Blocked = Self; + + fn schedule_wait( + msg: Self::Blocked, + status_check: &S, + mut wait: Wait, + mut ready: Ready, + ) -> Result<(), Error> + where + S: BlockStatus, + Wait: FnMut(Block::Hash, Self), + Ready: FnMut(Self::Blocked), + { + let (&target_hash, target_number) = msg.target(); + + if let Some(number) = status_check.block_number(target_hash)? { + if number != target_number { + warn_authority_wrong_target(target_hash, msg.id); + } else { + ready(msg); + } + } else { + wait(target_hash, msg) + } + + Ok(()) + } + + fn wait_completed(self, canon_number: NumberFor) -> Option { + let (&target_hash, target_number) = self.target(); + if canon_number != target_number { + warn_authority_wrong_target(target_hash, self.id); + + None + } else { + Some(self) + } + } } /// Helper type definition for the stream which waits until vote targets for /// signed messages are imported. -pub(crate) type UntilVoteTargetImported = UntilImported>; +pub(crate) type UntilVoteTargetImported = + UntilImported>; /// This blocks a commit message's import until all blocks /// referenced in its votes are known. @@ -259,319 +271,330 @@ pub(crate) type UntilVoteTargetImported = UntilImported { - inner: Arc<(AtomicUsize, Mutex)>>)>, - target_number: NumberFor, + inner: Arc<(AtomicUsize, Mutex)>>)>, + target_number: NumberFor, } impl BlockUntilImported for BlockCommitMessage { - type Blocked = (u64, CompactCommit); - - fn schedule_wait( - input: Self::Blocked, - status_check: &S, - mut wait: Wait, - mut ready: Ready, - ) -> Result<(), Error> where - S: BlockStatus, - Wait: FnMut(Block::Hash, Self), - Ready: FnMut(Self::Blocked), - { - use std::collections::hash_map::Entry; - - enum KnownOrUnknown { - Known(N), - Unknown(N), - } - - impl KnownOrUnknown { - fn number(&self) -> &N { - match *self { - KnownOrUnknown::Known(ref n) => n, - KnownOrUnknown::Unknown(ref n) => n, - } - } - } - - let mut checked_hashes: HashMap<_, KnownOrUnknown>> = HashMap::new(); - let mut unknown_count = 0; - - { - // returns false when should early exit. - let mut query_known = |target_hash, perceived_number| -> Result { - // check integrity: all precommits for same hash have same number. - let canon_number = match checked_hashes.entry(target_hash) { - Entry::Occupied(entry) => entry.get().number().clone(), - Entry::Vacant(entry) => { - if let Some(number) = status_check.block_number(target_hash)? { - entry.insert(KnownOrUnknown::Known(number)); - number - - } else { - entry.insert(KnownOrUnknown::Unknown(perceived_number)); - unknown_count += 1; - perceived_number - } - } - }; - - if canon_number != perceived_number { - // invalid commit: messages targeting wrong number or - // at least different from other vote. in same commit. - return Ok(false); - } - - Ok(true) - }; - - let commit = &input.1; - - // add known hashes from the precommits. - for precommit in &commit.precommits { - let target_number = precommit.target_number; - let target_hash = precommit.target_hash; - - if !query_known(target_hash, target_number)? { - return Ok(()) - } - } - - // see if commit target hash is known. - if !query_known(commit.target_hash, commit.target_number)? { - return Ok(()) - } - } - - // none of the hashes in the commit message were unknown. - // we can just return the commit directly. - if unknown_count == 0 { - ready(input); - return Ok(()) - } - - let locked_commit = Arc::new((AtomicUsize::new(unknown_count), Mutex::new(Some(input)))); - - // schedule waits for all unknown messages. - // when the last one of these has `wait_completed` called on it, - // the commit will be returned. - // - // in the future, we may want to issue sync requests to the network - // if this is taking a long time. - for (hash, is_known) in checked_hashes { - if let KnownOrUnknown::Unknown(target_number) = is_known { - wait(hash, BlockCommitMessage { - inner: locked_commit.clone(), - target_number, - }) - } - } - - Ok(()) - } - - fn wait_completed(self, canon_number: NumberFor) -> Option { - if self.target_number != canon_number { - // if we return without deducting the counter, then none of the other - // handles can return the commit message. - return None; - } - - let mut last_count = self.inner.0.load(Ordering::Acquire); - - // CAS loop to ensure that we always have a last reader. - loop { - if last_count == 1 { // we are the last one left. - return self.inner.1.lock().take(); - } - - let prev_value = self.inner.0.compare_and_swap( - last_count, - last_count - 1, - Ordering::SeqCst, - ); - - if prev_value == last_count { - return None; - } else { - last_count = prev_value; - } - } - } + type Blocked = (u64, CompactCommit); + + fn schedule_wait( + input: Self::Blocked, + status_check: &S, + mut wait: Wait, + mut ready: Ready, + ) -> Result<(), Error> + where + S: BlockStatus, + Wait: FnMut(Block::Hash, Self), + Ready: FnMut(Self::Blocked), + { + use std::collections::hash_map::Entry; + + enum KnownOrUnknown { + Known(N), + Unknown(N), + } + + impl KnownOrUnknown { + fn number(&self) -> &N { + match *self { + KnownOrUnknown::Known(ref n) => n, + KnownOrUnknown::Unknown(ref n) => n, + } + } + } + + let mut checked_hashes: HashMap<_, KnownOrUnknown>> = HashMap::new(); + let mut unknown_count = 0; + + { + // returns false when should early exit. + let mut query_known = |target_hash, perceived_number| -> Result { + // check integrity: all precommits for same hash have same number. + let canon_number = match checked_hashes.entry(target_hash) { + Entry::Occupied(entry) => entry.get().number().clone(), + Entry::Vacant(entry) => { + if let Some(number) = status_check.block_number(target_hash)? { + entry.insert(KnownOrUnknown::Known(number)); + number + } else { + entry.insert(KnownOrUnknown::Unknown(perceived_number)); + unknown_count += 1; + perceived_number + } + } + }; + + if canon_number != perceived_number { + // invalid commit: messages targeting wrong number or + // at least different from other vote. in same commit. + return Ok(false); + } + + Ok(true) + }; + + let commit = &input.1; + + // add known hashes from the precommits. + for precommit in &commit.precommits { + let target_number = precommit.target_number; + let target_hash = precommit.target_hash; + + if !query_known(target_hash, target_number)? { + return Ok(()); + } + } + + // see if commit target hash is known. + if !query_known(commit.target_hash, commit.target_number)? { + return Ok(()); + } + } + + // none of the hashes in the commit message were unknown. + // we can just return the commit directly. + if unknown_count == 0 { + ready(input); + return Ok(()); + } + + let locked_commit = Arc::new((AtomicUsize::new(unknown_count), Mutex::new(Some(input)))); + + // schedule waits for all unknown messages. + // when the last one of these has `wait_completed` called on it, + // the commit will be returned. + // + // in the future, we may want to issue sync requests to the network + // if this is taking a long time. + for (hash, is_known) in checked_hashes { + if let KnownOrUnknown::Unknown(target_number) = is_known { + wait( + hash, + BlockCommitMessage { + inner: locked_commit.clone(), + target_number, + }, + ) + } + } + + Ok(()) + } + + fn wait_completed(self, canon_number: NumberFor) -> Option { + if self.target_number != canon_number { + // if we return without deducting the counter, then none of the other + // handles can return the commit message. + return None; + } + + let mut last_count = self.inner.0.load(Ordering::Acquire); + + // CAS loop to ensure that we always have a last reader. + loop { + if last_count == 1 { + // we are the last one left. + return self.inner.1.lock().take(); + } + + let prev_value = + self.inner + .0 + .compare_and_swap(last_count, last_count - 1, Ordering::SeqCst); + + if prev_value == last_count { + return None; + } else { + last_count = prev_value; + } + } + } } /// A stream which gates off incoming commit messages until all referenced /// block hashes have been imported. -pub(crate) type UntilCommitBlocksImported = UntilImported< - Block, - Status, - I, - BlockCommitMessage, ->; +pub(crate) type UntilCommitBlocksImported = + UntilImported>; #[cfg(test)] mod tests { - use super::*; - use tokio::runtime::current_thread::Runtime; - use tokio::timer::Delay; - use test_client::runtime::{Block, Hash, Header}; - use consensus_common::BlockOrigin; - use client::BlockImportNotification; - use futures::future::Either; - use futures::sync::mpsc; - use grandpa::Precommit; - - #[derive(Clone)] - struct TestChainState { - sender: mpsc::UnboundedSender>, - known_blocks: Arc>>, - } - - impl TestChainState { - fn new() -> (Self, ImportNotifications) { - let (tx, rx) = mpsc::unbounded(); - let state = TestChainState { - sender: tx, - known_blocks: Arc::new(Mutex::new(HashMap::new())), - }; - - (state, rx) - } - - fn block_status(&self) -> TestBlockStatus { - TestBlockStatus { inner: self.known_blocks.clone() } - } - - fn import_header(&self, header: Header) { - let hash = header.hash(); - let number = header.number().clone(); - - self.known_blocks.lock().insert(hash, number); - self.sender.unbounded_send(BlockImportNotification { - hash, - origin: BlockOrigin::File, - header, - is_new_best: false, - }).unwrap(); - } - } - - struct TestBlockStatus { - inner: Arc>>, - } - - impl BlockStatus for TestBlockStatus { - fn block_number(&self, hash: Hash) -> Result, Error> { - Ok(self.inner.lock().get(&hash).map(|x| x.clone())) - } - } - - fn make_header(number: u64) -> Header { - Header::new( - number, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ) - } - - #[test] - fn blocking_commit_message() { - let h1 = make_header(5); - let h2 = make_header(6); - let h3 = make_header(7); - - let (chain_state, import_notifications) = TestChainState::new(); - let block_status = chain_state.block_status(); - - let unknown_commit = CompactCommit:: { - target_hash: h1.hash(), - target_number: 5, - precommits: vec![ - Precommit { - target_hash: h2.hash(), - target_number: 6, - }, - Precommit { - target_hash: h3.hash(), - target_number: 7, - }, - ], - auth_data: Vec::new(), // not used - }; - - let (commit_tx, commit_rx) = mpsc::unbounded(); - - let until_imported = UntilCommitBlocksImported::new( - import_notifications, - block_status, - commit_rx.map_err(|_| panic!("should never error")), - ); - - commit_tx.unbounded_send((0, unknown_commit.clone())).unwrap(); - - let inner_chain_state = chain_state.clone(); - let work = until_imported - .into_future() - .select2(Delay::new(Instant::now() + Duration::from_millis(100))) - .then(move |res| match res { - Err(_) => panic!("neither should have had error"), - Ok(Either::A(_)) => panic!("timeout should have fired first"), - Ok(Either::B((_, until_imported))) => { - // timeout fired. push in the headers. - inner_chain_state.import_header(h1); - inner_chain_state.import_header(h2); - inner_chain_state.import_header(h3); - - until_imported - } - }); - - let mut runtime = Runtime::new().unwrap(); - assert_eq!(runtime.block_on(work).map_err(|(e, _)| e).unwrap().0, Some((0, unknown_commit))); - } - - #[test] - fn commit_message_all_known() { - let h1 = make_header(5); - let h2 = make_header(6); - let h3 = make_header(7); - - let (chain_state, import_notifications) = TestChainState::new(); - let block_status = chain_state.block_status(); - - let known_commit = CompactCommit:: { - target_hash: h1.hash(), - target_number: 5, - precommits: vec![ - Precommit { - target_hash: h2.hash(), - target_number: 6, - }, - Precommit { - target_hash: h3.hash(), - target_number: 7, - }, - ], - auth_data: Vec::new(), // not used - }; - - chain_state.import_header(h1); - chain_state.import_header(h2); - chain_state.import_header(h3); - - let (commit_tx, commit_rx) = mpsc::unbounded(); - - let until_imported = UntilCommitBlocksImported::new( - import_notifications, - block_status, - commit_rx.map_err(|_| panic!("should never error")), - ); - - commit_tx.unbounded_send((0, known_commit.clone())).unwrap(); - - let work = until_imported.into_future(); - - let mut runtime = Runtime::new().unwrap(); - assert_eq!(runtime.block_on(work).map_err(|(e, _)| e).unwrap().0, Some((0, known_commit))); - } + use super::*; + use client::BlockImportNotification; + use consensus_common::BlockOrigin; + use futures::future::Either; + use futures::sync::mpsc; + use grandpa::Precommit; + use test_client::runtime::{Block, Hash, Header}; + use tokio::runtime::current_thread::Runtime; + use tokio::timer::Delay; + + #[derive(Clone)] + struct TestChainState { + sender: mpsc::UnboundedSender>, + known_blocks: Arc>>, + } + + impl TestChainState { + fn new() -> (Self, ImportNotifications) { + let (tx, rx) = mpsc::unbounded(); + let state = TestChainState { + sender: tx, + known_blocks: Arc::new(Mutex::new(HashMap::new())), + }; + + (state, rx) + } + + fn block_status(&self) -> TestBlockStatus { + TestBlockStatus { + inner: self.known_blocks.clone(), + } + } + + fn import_header(&self, header: Header) { + let hash = header.hash(); + let number = header.number().clone(); + + self.known_blocks.lock().insert(hash, number); + self.sender + .unbounded_send(BlockImportNotification { + hash, + origin: BlockOrigin::File, + header, + is_new_best: false, + }) + .unwrap(); + } + } + + struct TestBlockStatus { + inner: Arc>>, + } + + impl BlockStatus for TestBlockStatus { + fn block_number(&self, hash: Hash) -> Result, Error> { + Ok(self.inner.lock().get(&hash).map(|x| x.clone())) + } + } + + fn make_header(number: u64) -> Header { + Header::new( + number, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ) + } + + #[test] + fn blocking_commit_message() { + let h1 = make_header(5); + let h2 = make_header(6); + let h3 = make_header(7); + + let (chain_state, import_notifications) = TestChainState::new(); + let block_status = chain_state.block_status(); + + let unknown_commit = CompactCommit:: { + target_hash: h1.hash(), + target_number: 5, + precommits: vec![ + Precommit { + target_hash: h2.hash(), + target_number: 6, + }, + Precommit { + target_hash: h3.hash(), + target_number: 7, + }, + ], + auth_data: Vec::new(), // not used + }; + + let (commit_tx, commit_rx) = mpsc::unbounded(); + + let until_imported = UntilCommitBlocksImported::new( + import_notifications, + block_status, + commit_rx.map_err(|_| panic!("should never error")), + ); + + commit_tx + .unbounded_send((0, unknown_commit.clone())) + .unwrap(); + + let inner_chain_state = chain_state.clone(); + let work = until_imported + .into_future() + .select2(Delay::new(Instant::now() + Duration::from_millis(100))) + .then(move |res| match res { + Err(_) => panic!("neither should have had error"), + Ok(Either::A(_)) => panic!("timeout should have fired first"), + Ok(Either::B((_, until_imported))) => { + // timeout fired. push in the headers. + inner_chain_state.import_header(h1); + inner_chain_state.import_header(h2); + inner_chain_state.import_header(h3); + + until_imported + } + }); + + let mut runtime = Runtime::new().unwrap(); + assert_eq!( + runtime.block_on(work).map_err(|(e, _)| e).unwrap().0, + Some((0, unknown_commit)) + ); + } + + #[test] + fn commit_message_all_known() { + let h1 = make_header(5); + let h2 = make_header(6); + let h3 = make_header(7); + + let (chain_state, import_notifications) = TestChainState::new(); + let block_status = chain_state.block_status(); + + let known_commit = CompactCommit:: { + target_hash: h1.hash(), + target_number: 5, + precommits: vec![ + Precommit { + target_hash: h2.hash(), + target_number: 6, + }, + Precommit { + target_hash: h3.hash(), + target_number: 7, + }, + ], + auth_data: Vec::new(), // not used + }; + + chain_state.import_header(h1); + chain_state.import_header(h2); + chain_state.import_header(h3); + + let (commit_tx, commit_rx) = mpsc::unbounded(); + + let until_imported = UntilCommitBlocksImported::new( + import_notifications, + block_status, + commit_rx.map_err(|_| panic!("should never error")), + ); + + commit_tx.unbounded_send((0, known_commit.clone())).unwrap(); + + let work = until_imported.into_future(); + + let mut runtime = Runtime::new().unwrap(); + assert_eq!( + runtime.block_on(work).map_err(|(e, _)| e).unwrap().0, + Some((0, known_commit)) + ); + } } diff --git a/core/inherents/src/lib.rs b/core/inherents/src/lib.rs index 7d2324bc93..9ce0c89599 100644 --- a/core/inherents/src/lib.rs +++ b/core/inherents/src/lib.rs @@ -33,16 +33,19 @@ #![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs)] +use codec::{Decode, Encode}; use parity_codec as codec; -use codec::{Encode, Decode}; -use rstd::{collections::btree_map::{BTreeMap, IntoIter, Entry}, vec::Vec}; +use rstd::{ + collections::btree_map::{BTreeMap, Entry, IntoIter}, + vec::Vec, +}; #[cfg(feature = "std")] use parking_lot::RwLock; #[cfg(feature = "std")] -use std::{sync::Arc, format}; +use std::{format, sync::Arc}; #[cfg(feature = "std")] pub mod pool; @@ -55,97 +58,90 @@ pub type InherentIdentifier = [u8; 8]; /// Inherent data to include in a block. #[derive(Clone, Default)] pub struct InherentData { - /// All inherent data encoded with parity-codec and an identifier. - data: BTreeMap> + /// All inherent data encoded with parity-codec and an identifier. + data: BTreeMap>, } impl InherentData { - /// Create a new instance. - pub fn new() -> Self { - Self::default() - } - - /// Put data for an inherent into the internal storage. - /// - /// # Return - /// - /// Returns `Ok(())` if the data could be inserted an no data for an inherent with the same - /// identifier existed, otherwise an error is returned. - /// - /// Inherent identifiers need to be unique, otherwise decoding of these values will not work! - pub fn put_data( - &mut self, - identifier: InherentIdentifier, - inherent: &I, - ) -> Result<(), RuntimeString> { - match self.data.entry(identifier) { - Entry::Vacant(entry) => { - entry.insert(inherent.encode()); - Ok(()) - }, - Entry::Occupied(_) => { - Err("Inherent with same identifier already exists!".into()) - } - } - } - - /// Replace the data for an inherent. - /// - /// If it does not exist, the data is just inserted. - pub fn replace_data( - &mut self, - identifier: InherentIdentifier, - inherent: &I, - ) { - self.data.insert(identifier, inherent.encode()); - } - - /// Returns the data for the requested inherent. - /// - /// # Return - /// - /// - `Ok(Some(I))` if the data could be found and deserialized. - /// - `Ok(None)` if the data could not be found. - /// - `Err(_)` if the data could be found, but deserialization did not work. - pub fn get_data( - &self, - identifier: &InherentIdentifier, - ) -> Result, RuntimeString> { - match self.data.get(identifier) { - Some(inherent) => - I::decode(&mut &inherent[..]) - .ok_or_else(|| "Could not decode requested inherent type!".into()) - .map(Some), - None => Ok(None) - } - } + /// Create a new instance. + pub fn new() -> Self { + Self::default() + } + + /// Put data for an inherent into the internal storage. + /// + /// # Return + /// + /// Returns `Ok(())` if the data could be inserted an no data for an inherent with the same + /// identifier existed, otherwise an error is returned. + /// + /// Inherent identifiers need to be unique, otherwise decoding of these values will not work! + pub fn put_data( + &mut self, + identifier: InherentIdentifier, + inherent: &I, + ) -> Result<(), RuntimeString> { + match self.data.entry(identifier) { + Entry::Vacant(entry) => { + entry.insert(inherent.encode()); + Ok(()) + } + Entry::Occupied(_) => Err("Inherent with same identifier already exists!".into()), + } + } + + /// Replace the data for an inherent. + /// + /// If it does not exist, the data is just inserted. + pub fn replace_data(&mut self, identifier: InherentIdentifier, inherent: &I) { + self.data.insert(identifier, inherent.encode()); + } + + /// Returns the data for the requested inherent. + /// + /// # Return + /// + /// - `Ok(Some(I))` if the data could be found and deserialized. + /// - `Ok(None)` if the data could not be found. + /// - `Err(_)` if the data could be found, but deserialization did not work. + pub fn get_data( + &self, + identifier: &InherentIdentifier, + ) -> Result, RuntimeString> { + match self.data.get(identifier) { + Some(inherent) => I::decode(&mut &inherent[..]) + .ok_or_else(|| "Could not decode requested inherent type!".into()) + .map(Some), + None => Ok(None), + } + } } impl codec::Encode for InherentData { - fn encode(&self) -> Vec { - let keys = self.data.keys().collect::>(); - let values = self.data.values().collect::>(); - - let mut encoded = keys.encode(); - encoded.extend(values.encode()); - encoded - } + fn encode(&self) -> Vec { + let keys = self.data.keys().collect::>(); + let values = self.data.values().collect::>(); + + let mut encoded = keys.encode(); + encoded.extend(values.encode()); + encoded + } } impl codec::Decode for InherentData { - fn decode(value: &mut I) -> Option { - Vec::::decode(value) - .and_then(|i| Vec::>::decode(value).map(|v| (i, v))) - .and_then(|(i, v)| { - if i.len() == v.len() { - Some(Self { - data: i.into_iter().zip(v.into_iter()).collect() - }) - } else { - None - } - }) - } + fn decode(value: &mut I) -> Option { + Vec::::decode(value) + .and_then(|i| Vec::>::decode(value).map(|v| (i, v))) + .and_then(|(i, v)| { + if i.len() == v.len() { + Some(Self { + data: i.into_iter().zip(v.into_iter()).collect(), + }) + } else { + None + } + }) + } } /// The result of checking inherents. @@ -156,209 +152,216 @@ impl codec::Decode for InherentData { /// abbort checking inherents. #[derive(Encode, Decode, Clone)] pub struct CheckInherentsResult { - /// Did the check succeed? - okay: bool, - /// Did we encounter a fatal error? - fatal_error: bool, - /// We use the `InherentData` to store our errors. - errors: InherentData, + /// Did the check succeed? + okay: bool, + /// Did we encounter a fatal error? + fatal_error: bool, + /// We use the `InherentData` to store our errors. + errors: InherentData, } impl Default for CheckInherentsResult { - fn default() -> Self { - Self { - okay: true, - errors: InherentData::new(), - fatal_error: false, - } - } + fn default() -> Self { + Self { + okay: true, + errors: InherentData::new(), + fatal_error: false, + } + } } impl CheckInherentsResult { - /// Create a new instance. - pub fn new() -> Self { - Self::default() - } - - /// Put an error into the result. - /// - /// This makes this result resolve to `ok() == false`. - /// - /// # Parameters - /// - /// - identifier - The identifier of the inherent that generated the error. - /// - error - The error that will be encoded. - pub fn put_error( - &mut self, - identifier: InherentIdentifier, - error: &E, - ) -> Result<(), RuntimeString> { - // Don't accept any other error - if self.fatal_error { - return Err("No other errors are accepted after an hard error!".into()) - } - - if error.is_fatal_error() { - // remove the other errors. - self.errors.data.clear(); - } - - self.errors.put_data(identifier, error)?; - - self.okay = false; - self.fatal_error = error.is_fatal_error(); - Ok(()) - } - - /// Get an error out of the result. - /// - /// # Return - /// - /// - `Ok(Some(I))` if the error could be found and deserialized. - /// - `Ok(None)` if the error could not be found. - /// - `Err(_)` if the error could be found, but deserialization did not work. - pub fn get_error( - &self, - identifier: &InherentIdentifier, - ) -> Result, RuntimeString> { - self.errors.get_data(identifier) - } - - /// Convert into an iterator over all contained errors. - pub fn into_errors(self) -> IntoIter> { - self.errors.data.into_iter() - } - - /// Is this result ok? - pub fn ok(&self) -> bool { - self.okay - } - - /// Is this a fatal error? - pub fn fatal_error(&self) -> bool { - self.fatal_error - } + /// Create a new instance. + pub fn new() -> Self { + Self::default() + } + + /// Put an error into the result. + /// + /// This makes this result resolve to `ok() == false`. + /// + /// # Parameters + /// + /// - identifier - The identifier of the inherent that generated the error. + /// - error - The error that will be encoded. + pub fn put_error( + &mut self, + identifier: InherentIdentifier, + error: &E, + ) -> Result<(), RuntimeString> { + // Don't accept any other error + if self.fatal_error { + return Err("No other errors are accepted after an hard error!".into()); + } + + if error.is_fatal_error() { + // remove the other errors. + self.errors.data.clear(); + } + + self.errors.put_data(identifier, error)?; + + self.okay = false; + self.fatal_error = error.is_fatal_error(); + Ok(()) + } + + /// Get an error out of the result. + /// + /// # Return + /// + /// - `Ok(Some(I))` if the error could be found and deserialized. + /// - `Ok(None)` if the error could not be found. + /// - `Err(_)` if the error could be found, but deserialization did not work. + pub fn get_error( + &self, + identifier: &InherentIdentifier, + ) -> Result, RuntimeString> { + self.errors.get_data(identifier) + } + + /// Convert into an iterator over all contained errors. + pub fn into_errors(self) -> IntoIter> { + self.errors.data.into_iter() + } + + /// Is this result ok? + pub fn ok(&self) -> bool { + self.okay + } + + /// Is this a fatal error? + pub fn fatal_error(&self) -> bool { + self.fatal_error + } } #[cfg(feature = "std")] impl PartialEq for CheckInherentsResult { - fn eq(&self, other: &Self) -> bool { - self.fatal_error == other.fatal_error && - self.okay == other.okay && - self.errors.data == other.errors.data - } + fn eq(&self, other: &Self) -> bool { + self.fatal_error == other.fatal_error + && self.okay == other.okay + && self.errors.data == other.errors.data + } } /// All `InherentData` providers. #[cfg(feature = "std")] #[derive(Clone, Default)] pub struct InherentDataProviders { - providers: Arc>>>, + providers: Arc>>>, } #[cfg(feature = "std")] impl InherentDataProviders { - /// Create a new instance. - pub fn new() -> Self { - Self::default() - } - - /// Register an `InherentData` provider. - /// - /// The registration order is preserved and this order will also be used when creating the - /// inherent data. - /// - /// # Result - /// - /// Will return an error, if a provider with the same identifier already exists. - pub fn register_provider( - &self, - provider: P, - ) -> Result<(), RuntimeString> { - if self.has_provider(&provider.inherent_identifier()) { - Err( - format!( - "Inherent data provider with identifier {:?} already exists!", - &provider.inherent_identifier() - ).into() - ) - } else { - provider.on_register(self)?; - self.providers.write().push(Box::new(provider)); - Ok(()) - } - } - - /// Returns if a provider for the given identifier exists. - pub fn has_provider(&self, identifier: &InherentIdentifier) -> bool { - self.providers.read().iter().any(|p| p.inherent_identifier() == identifier) - } - - /// Create inherent data. - pub fn create_inherent_data(&self) -> Result { - let mut data = InherentData::new(); - self.providers.read().iter().try_for_each(|p| { - p.provide_inherent_data(&mut data) - .map_err(|e| format!("Error for `{:?}`: {:?}", p.inherent_identifier(), e)) - })?; - Ok(data) - } - - /// Converts a given encoded error into a `String`. - /// - /// Useful if the implementation encouters an error for an identifier it does not know. - pub fn error_to_string(&self, identifier: &InherentIdentifier, error: &[u8]) -> String { - let res = self.providers.read().iter().filter_map(|p| - if p.inherent_identifier() == identifier { - Some( - p.error_to_string(error) - .unwrap_or_else(|| error_to_string_fallback(identifier)) - ) - } else { - None - } - ).next(); - - match res { - Some(res) => res, - None => format!( - "Error while checking inherent of type \"{}\", but this inherent type is unknown.", - String::from_utf8_lossy(identifier) - ) - } - } + /// Create a new instance. + pub fn new() -> Self { + Self::default() + } + + /// Register an `InherentData` provider. + /// + /// The registration order is preserved and this order will also be used when creating the + /// inherent data. + /// + /// # Result + /// + /// Will return an error, if a provider with the same identifier already exists. + pub fn register_provider( + &self, + provider: P, + ) -> Result<(), RuntimeString> { + if self.has_provider(&provider.inherent_identifier()) { + Err(format!( + "Inherent data provider with identifier {:?} already exists!", + &provider.inherent_identifier() + ) + .into()) + } else { + provider.on_register(self)?; + self.providers.write().push(Box::new(provider)); + Ok(()) + } + } + + /// Returns if a provider for the given identifier exists. + pub fn has_provider(&self, identifier: &InherentIdentifier) -> bool { + self.providers + .read() + .iter() + .any(|p| p.inherent_identifier() == identifier) + } + + /// Create inherent data. + pub fn create_inherent_data(&self) -> Result { + let mut data = InherentData::new(); + self.providers.read().iter().try_for_each(|p| { + p.provide_inherent_data(&mut data) + .map_err(|e| format!("Error for `{:?}`: {:?}", p.inherent_identifier(), e)) + })?; + Ok(data) + } + + /// Converts a given encoded error into a `String`. + /// + /// Useful if the implementation encouters an error for an identifier it does not know. + pub fn error_to_string(&self, identifier: &InherentIdentifier, error: &[u8]) -> String { + let res = self + .providers + .read() + .iter() + .filter_map(|p| { + if p.inherent_identifier() == identifier { + Some( + p.error_to_string(error) + .unwrap_or_else(|| error_to_string_fallback(identifier)), + ) + } else { + None + } + }) + .next(); + + match res { + Some(res) => res, + None => format!( + "Error while checking inherent of type \"{}\", but this inherent type is unknown.", + String::from_utf8_lossy(identifier) + ), + } + } } /// Something that provides inherent data. #[cfg(feature = "std")] pub trait ProvideInherentData { - /// Is called when this inherent data provider is registered at the given - /// `InherentDataProviders`. - fn on_register(&self, _: &InherentDataProviders) -> Result<(), RuntimeString> { - Ok(()) - } - - /// The identifier of the inherent for that data will be provided. - fn inherent_identifier(&self) -> &'static InherentIdentifier; - - /// Provide inherent data that should be included in a block. - /// - /// The data should be stored in the given `InherentData` structure. - fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), RuntimeString>; - - /// Convert the given encoded error to a string. - /// - /// If the given error could not be decoded, `None` should be returned. - fn error_to_string(&self, error: &[u8]) -> Option; + /// Is called when this inherent data provider is registered at the given + /// `InherentDataProviders`. + fn on_register(&self, _: &InherentDataProviders) -> Result<(), RuntimeString> { + Ok(()) + } + + /// The identifier of the inherent for that data will be provided. + fn inherent_identifier(&self) -> &'static InherentIdentifier; + + /// Provide inherent data that should be included in a block. + /// + /// The data should be stored in the given `InherentData` structure. + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), RuntimeString>; + + /// Convert the given encoded error to a string. + /// + /// If the given error could not be decoded, `None` should be returned. + fn error_to_string(&self, error: &[u8]) -> Option; } /// A fallback function, if the decoding of an error fails. #[cfg(feature = "std")] fn error_to_string_fallback(identifier: &InherentIdentifier) -> String { - format!( - "Error while checking inherent of type \"{}\", but error could not be decoded.", - String::from_utf8_lossy(identifier) - ) + format!( + "Error while checking inherent of type \"{}\", but error could not be decoded.", + String::from_utf8_lossy(identifier) + ) } /// Did we encounter a fatal error while checking an inherent? @@ -370,8 +373,8 @@ fn error_to_string_fallback(identifier: &InherentIdentifier) -> String { /// correct, but it is required to verify the block at a later time again and then the inherent /// check will succeed. pub trait IsFatalError { - /// Is this a fatal error? - fn is_fatal_error(&self) -> bool; + /// Is this a fatal error? + fn is_fatal_error(&self) -> bool; } /// Auxiliary to make any given error resolve to `is_fatal_error() == true`. @@ -379,203 +382,225 @@ pub trait IsFatalError { pub struct MakeFatalError(E); impl From for MakeFatalError { - fn from(err: E) -> Self { - MakeFatalError(err) - } + fn from(err: E) -> Self { + MakeFatalError(err) + } } impl IsFatalError for MakeFatalError { - fn is_fatal_error(&self) -> bool { - true - } + fn is_fatal_error(&self) -> bool { + true + } } /// A module that provides an inherent and may also verifies it. pub trait ProvideInherent { - /// The call type of the module. - type Call; - /// The error returned by `check_inherent`. - type Error: codec::Encode + IsFatalError; - /// The inherent identifier used by this inherent. - const INHERENT_IDENTIFIER: self::InherentIdentifier; - - /// Create an inherent out of the given `InherentData`. - fn create_inherent(data: &InherentData) -> Option; - - /// Check the given inherent if it is valid. - /// Checking the inherent is optional and can be omitted. - fn check_inherent(_: &Self::Call, _: &InherentData) -> Result<(), Self::Error> { - Ok(()) - } + /// The call type of the module. + type Call; + /// The error returned by `check_inherent`. + type Error: codec::Encode + IsFatalError; + /// The inherent identifier used by this inherent. + const INHERENT_IDENTIFIER: self::InherentIdentifier; + + /// Create an inherent out of the given `InherentData`. + fn create_inherent(data: &InherentData) -> Option; + + /// Check the given inherent if it is valid. + /// Checking the inherent is optional and can be omitted. + fn check_inherent(_: &Self::Call, _: &InherentData) -> Result<(), Self::Error> { + Ok(()) + } } #[cfg(test)] mod tests { - use super::*; - use codec::{Encode, Decode}; - - const TEST_INHERENT_0: InherentIdentifier = *b"testinh0"; - const TEST_INHERENT_1: InherentIdentifier = *b"testinh1"; - - #[derive(Encode)] - struct NoFatalError(E); - impl IsFatalError for NoFatalError { - fn is_fatal_error(&self) -> bool { - false - } - } - - #[test] - fn inherent_data_encodes_and_decodes() { - let inherent_0 = vec![1, 2, 3]; - let inherent_1: u32 = 7; - - let mut data = InherentData::new(); - data.put_data(TEST_INHERENT_0, &inherent_0).unwrap(); - data.put_data(TEST_INHERENT_1, &inherent_1).unwrap(); - - let encoded = data.encode(); - - let decoded = InherentData::decode(&mut &encoded[..]).unwrap(); - - assert_eq!(decoded.get_data::>(&TEST_INHERENT_0).unwrap().unwrap(), inherent_0); - assert_eq!(decoded.get_data::(&TEST_INHERENT_1).unwrap().unwrap(), inherent_1); - } - - #[test] - fn adding_same_inherent_returns_an_error() { - let mut data = InherentData::new(); - data.put_data(TEST_INHERENT_0, &8).unwrap(); - assert!(data.put_data(TEST_INHERENT_0, &10).is_err()); - } - - #[derive(Clone)] - struct TestInherentDataProvider { - registered: Arc>, - } - - impl TestInherentDataProvider { - fn new() -> Self { - let inst = Self { - registered: Default::default(), - }; - - // just make sure - assert!(!inst.is_registered()); - - inst - } - - fn is_registered(&self) -> bool { - *self.registered.read() - } - } - - const ERROR_TO_STRING: &str = "Found error!"; - - impl ProvideInherentData for TestInherentDataProvider { - fn on_register(&self, _: &InherentDataProviders) -> Result<(), RuntimeString> { - *self.registered.write() = true; - Ok(()) - } - - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &TEST_INHERENT_0 - } - - fn provide_inherent_data(&self, data: &mut InherentData) -> Result<(), RuntimeString> { - data.put_data(TEST_INHERENT_0, &42) - } - - fn error_to_string(&self, _: &[u8]) -> Option { - Some(ERROR_TO_STRING.into()) - } - } - - #[test] - fn registering_inherent_provider() { - let provider = TestInherentDataProvider::new(); - let providers = InherentDataProviders::new(); - - providers.register_provider(provider.clone()).unwrap(); - assert!(provider.is_registered()); - assert!(providers.has_provider(provider.inherent_identifier())); - - // Second time should fail - assert!(providers.register_provider(provider.clone()).is_err()); - } - - #[test] - fn create_inherent_data_from_all_providers() { - let provider = TestInherentDataProvider::new(); - let providers = InherentDataProviders::new(); - - providers.register_provider(provider.clone()).unwrap(); - assert!(provider.is_registered()); - - let inherent_data = providers.create_inherent_data().unwrap(); - - assert_eq!( - inherent_data.get_data::(provider.inherent_identifier()).unwrap().unwrap(), - 42u32 - ); - } - - #[test] - fn encoded_error_to_string() { - let provider = TestInherentDataProvider::new(); - let providers = InherentDataProviders::new(); - - providers.register_provider(provider.clone()).unwrap(); - assert!(provider.is_registered()); - - assert_eq!( - &providers.error_to_string(&TEST_INHERENT_0, &[1, 2]), ERROR_TO_STRING - ); - - assert!( - providers - .error_to_string(&TEST_INHERENT_1, &[1, 2]) - .contains("inherent type is unknown") - ); - } - - #[test] - fn check_inherents_result_encodes_and_decodes() { - let mut result = CheckInherentsResult::new(); - assert!(result.ok()); - - result.put_error(TEST_INHERENT_0, &NoFatalError(2u32)).unwrap(); - assert!(!result.ok()); - assert!(!result.fatal_error()); - - let encoded = result.encode(); - - let decoded = CheckInherentsResult::decode(&mut &encoded[..]).unwrap(); - - assert_eq!(decoded.get_error::(&TEST_INHERENT_0).unwrap().unwrap(), 2); - assert!(!decoded.ok()); - assert!(!decoded.fatal_error()); - } - - #[test] - fn check_inherents_result_removes_other_errors_on_fatal_error() { - let mut result = CheckInherentsResult::new(); - assert!(result.ok()); - - result.put_error(TEST_INHERENT_0, &NoFatalError(2u32)).unwrap(); - assert!(!result.ok()); - assert!(!result.fatal_error()); - - result.put_error(TEST_INHERENT_1, &MakeFatalError(4u32)).unwrap(); - assert!(!result.ok()); - assert!(result.fatal_error()); - - assert!(result.put_error(TEST_INHERENT_0, &NoFatalError(5u32)).is_err()); - - result.into_errors().for_each(|(i, e)| match i { - TEST_INHERENT_1 => assert_eq!(u32::decode(&mut &e[..]).unwrap(), 4), - _ => panic!("There should be no other error!"), - }); - } + use super::*; + use codec::{Decode, Encode}; + + const TEST_INHERENT_0: InherentIdentifier = *b"testinh0"; + const TEST_INHERENT_1: InherentIdentifier = *b"testinh1"; + + #[derive(Encode)] + struct NoFatalError(E); + impl IsFatalError for NoFatalError { + fn is_fatal_error(&self) -> bool { + false + } + } + + #[test] + fn inherent_data_encodes_and_decodes() { + let inherent_0 = vec![1, 2, 3]; + let inherent_1: u32 = 7; + + let mut data = InherentData::new(); + data.put_data(TEST_INHERENT_0, &inherent_0).unwrap(); + data.put_data(TEST_INHERENT_1, &inherent_1).unwrap(); + + let encoded = data.encode(); + + let decoded = InherentData::decode(&mut &encoded[..]).unwrap(); + + assert_eq!( + decoded + .get_data::>(&TEST_INHERENT_0) + .unwrap() + .unwrap(), + inherent_0 + ); + assert_eq!( + decoded.get_data::(&TEST_INHERENT_1).unwrap().unwrap(), + inherent_1 + ); + } + + #[test] + fn adding_same_inherent_returns_an_error() { + let mut data = InherentData::new(); + data.put_data(TEST_INHERENT_0, &8).unwrap(); + assert!(data.put_data(TEST_INHERENT_0, &10).is_err()); + } + + #[derive(Clone)] + struct TestInherentDataProvider { + registered: Arc>, + } + + impl TestInherentDataProvider { + fn new() -> Self { + let inst = Self { + registered: Default::default(), + }; + + // just make sure + assert!(!inst.is_registered()); + + inst + } + + fn is_registered(&self) -> bool { + *self.registered.read() + } + } + + const ERROR_TO_STRING: &str = "Found error!"; + + impl ProvideInherentData for TestInherentDataProvider { + fn on_register(&self, _: &InherentDataProviders) -> Result<(), RuntimeString> { + *self.registered.write() = true; + Ok(()) + } + + fn inherent_identifier(&self) -> &'static InherentIdentifier { + &TEST_INHERENT_0 + } + + fn provide_inherent_data(&self, data: &mut InherentData) -> Result<(), RuntimeString> { + data.put_data(TEST_INHERENT_0, &42) + } + + fn error_to_string(&self, _: &[u8]) -> Option { + Some(ERROR_TO_STRING.into()) + } + } + + #[test] + fn registering_inherent_provider() { + let provider = TestInherentDataProvider::new(); + let providers = InherentDataProviders::new(); + + providers.register_provider(provider.clone()).unwrap(); + assert!(provider.is_registered()); + assert!(providers.has_provider(provider.inherent_identifier())); + + // Second time should fail + assert!(providers.register_provider(provider.clone()).is_err()); + } + + #[test] + fn create_inherent_data_from_all_providers() { + let provider = TestInherentDataProvider::new(); + let providers = InherentDataProviders::new(); + + providers.register_provider(provider.clone()).unwrap(); + assert!(provider.is_registered()); + + let inherent_data = providers.create_inherent_data().unwrap(); + + assert_eq!( + inherent_data + .get_data::(provider.inherent_identifier()) + .unwrap() + .unwrap(), + 42u32 + ); + } + + #[test] + fn encoded_error_to_string() { + let provider = TestInherentDataProvider::new(); + let providers = InherentDataProviders::new(); + + providers.register_provider(provider.clone()).unwrap(); + assert!(provider.is_registered()); + + assert_eq!( + &providers.error_to_string(&TEST_INHERENT_0, &[1, 2]), + ERROR_TO_STRING + ); + + assert!(providers + .error_to_string(&TEST_INHERENT_1, &[1, 2]) + .contains("inherent type is unknown")); + } + + #[test] + fn check_inherents_result_encodes_and_decodes() { + let mut result = CheckInherentsResult::new(); + assert!(result.ok()); + + result + .put_error(TEST_INHERENT_0, &NoFatalError(2u32)) + .unwrap(); + assert!(!result.ok()); + assert!(!result.fatal_error()); + + let encoded = result.encode(); + + let decoded = CheckInherentsResult::decode(&mut &encoded[..]).unwrap(); + + assert_eq!( + decoded.get_error::(&TEST_INHERENT_0).unwrap().unwrap(), + 2 + ); + assert!(!decoded.ok()); + assert!(!decoded.fatal_error()); + } + + #[test] + fn check_inherents_result_removes_other_errors_on_fatal_error() { + let mut result = CheckInherentsResult::new(); + assert!(result.ok()); + + result + .put_error(TEST_INHERENT_0, &NoFatalError(2u32)) + .unwrap(); + assert!(!result.ok()); + assert!(!result.fatal_error()); + + result + .put_error(TEST_INHERENT_1, &MakeFatalError(4u32)) + .unwrap(); + assert!(!result.ok()); + assert!(result.fatal_error()); + + assert!(result + .put_error(TEST_INHERENT_0, &NoFatalError(5u32)) + .is_err()); + + result.into_errors().for_each(|(i, e)| match i { + TEST_INHERENT_1 => assert_eq!(u32::decode(&mut &e[..]).unwrap(), 4), + _ => panic!("There should be no other error!"), + }); + } } diff --git a/core/inherents/src/pool.rs b/core/inherents/src/pool.rs index 2c7e953696..5d7246ffa9 100644 --- a/core/inherents/src/pool.rs +++ b/core/inherents/src/pool.rs @@ -16,60 +16,60 @@ //! Inherents Pool -use std::{fmt, mem, vec}; use parking_lot::Mutex; +use std::{fmt, mem, vec}; /// Inherents Pool /// /// The pool is responsible to collect inherents asynchronously generated /// by some other parts of the code and make them ready for the next block production. pub struct InherentsPool { - data: Mutex>, + data: Mutex>, } impl Default for InherentsPool { - fn default() -> Self { - InherentsPool { - data: Default::default(), - } - } + fn default() -> Self { + InherentsPool { + data: Default::default(), + } + } } impl fmt::Debug for InherentsPool { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let mut builder = fmt.debug_struct("InherentsPool"); - if let Some(data) = self.data.try_lock() { - builder.field("data", &*data); - } - builder.finish() - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let mut builder = fmt.debug_struct("InherentsPool"); + if let Some(data) = self.data.try_lock() { + builder.field("data", &*data); + } + builder.finish() + } } impl InherentsPool { - /// Add inherent extrinsic to the pool. - /// - /// This inherent will be appended to the next produced block. - pub fn add(&self, extrinsic: T) { - self.data.lock().push(extrinsic); - } + /// Add inherent extrinsic to the pool. + /// + /// This inherent will be appended to the next produced block. + pub fn add(&self, extrinsic: T) { + self.data.lock().push(extrinsic); + } - /// Drain all currently queued inherents. - pub fn drain(&self) -> Vec { - mem::replace(&mut *self.data.lock(), vec![]) - } + /// Drain all currently queued inherents. + pub fn drain(&self) -> Vec { + mem::replace(&mut *self.data.lock(), vec![]) + } } #[cfg(test)] mod tests { - use super::*; + use super::*; - #[test] - fn should_drain_inherents_to_given_data() { - let pool = InherentsPool::default(); - pool.add(5); - pool.add(7); + #[test] + fn should_drain_inherents_to_given_data() { + let pool = InherentsPool::default(); + pool.add(5); + pool.add(7); - assert_eq!(pool.drain(), vec![5, 7]); - assert_eq!(pool.drain(), vec![]); - } + assert_eq!(pool.drain(), vec![5, 7]); + assert_eq!(pool.drain(), vec![]); + } } diff --git a/core/keyring/src/ed25519.rs b/core/keyring/src/ed25519.rs index f36d8fd485..03503055df 100644 --- a/core/keyring/src/ed25519.rs +++ b/core/keyring/src/ed25519.rs @@ -16,158 +16,175 @@ //! Support code for the runtime. A set of test accounts. -use std::{collections::HashMap, ops::Deref}; use lazy_static::lazy_static; -use substrate_primitives::{ed25519::{Pair, Public, Signature}, Pair as PairT, H256}; +use std::{collections::HashMap, ops::Deref}; pub use substrate_primitives::ed25519; +use substrate_primitives::{ + ed25519::{Pair, Public, Signature}, + Pair as PairT, H256, +}; /// Set of test accounts. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum_macros::Display, strum_macros::EnumIter)] pub enum Keyring { - Alice, - Bob, - Charlie, - Dave, - Eve, - Ferdie, - One, - Two, + Alice, + Bob, + Charlie, + Dave, + Eve, + Ferdie, + One, + Two, } impl Keyring { - pub fn from_public(who: &Public) -> Option { - [ - Keyring::Alice, - Keyring::Bob, - Keyring::Charlie, - Keyring::Dave, - Keyring::Eve, - Keyring::Ferdie, - Keyring::One, - Keyring::Two, - ].iter() - .map(|i| *i) - .find(|&k| &Public::from(k) == who) - } - - pub fn from_raw_public(who: [u8; 32]) -> Option { - Self::from_public(&Public::from_raw(who)) - } - - pub fn to_raw_public(self) -> [u8; 32] { - *Public::from(self).as_array_ref() - } - - pub fn from_h256_public(who: H256) -> Option { - Self::from_public(&Public::from_raw(who.into())) - } - - pub fn to_h256_public(self) -> H256 { - Public::from(self).as_array_ref().into() - } - - pub fn to_raw_public_vec(self) -> Vec { - Public::from(self).to_raw_vec() - } - - pub fn sign(self, msg: &[u8]) -> Signature { - Pair::from(self).sign(msg) - } - - pub fn pair(self) -> Pair { - Pair::from_string(&format!("//{}", <&'static str>::from(self)), None) - .expect("static values are known good; qed") - } - - /// Returns an interator over all test accounts. - pub fn iter() -> impl Iterator { - ::iter() - } + pub fn from_public(who: &Public) -> Option { + [ + Keyring::Alice, + Keyring::Bob, + Keyring::Charlie, + Keyring::Dave, + Keyring::Eve, + Keyring::Ferdie, + Keyring::One, + Keyring::Two, + ] + .iter() + .map(|i| *i) + .find(|&k| &Public::from(k) == who) + } + + pub fn from_raw_public(who: [u8; 32]) -> Option { + Self::from_public(&Public::from_raw(who)) + } + + pub fn to_raw_public(self) -> [u8; 32] { + *Public::from(self).as_array_ref() + } + + pub fn from_h256_public(who: H256) -> Option { + Self::from_public(&Public::from_raw(who.into())) + } + + pub fn to_h256_public(self) -> H256 { + Public::from(self).as_array_ref().into() + } + + pub fn to_raw_public_vec(self) -> Vec { + Public::from(self).to_raw_vec() + } + + pub fn sign(self, msg: &[u8]) -> Signature { + Pair::from(self).sign(msg) + } + + pub fn pair(self) -> Pair { + Pair::from_string(&format!("//{}", <&'static str>::from(self)), None) + .expect("static values are known good; qed") + } + + /// Returns an interator over all test accounts. + pub fn iter() -> impl Iterator { + ::iter() + } } impl From for &'static str { - fn from(k: Keyring) -> Self { - match k { - Keyring::Alice => "Alice", - Keyring::Bob => "Bob", - Keyring::Charlie => "Charlie", - Keyring::Dave => "Dave", - Keyring::Eve => "Eve", - Keyring::Ferdie => "Ferdie", - Keyring::One => "One", - Keyring::Two => "Two", - } - } + fn from(k: Keyring) -> Self { + match k { + Keyring::Alice => "Alice", + Keyring::Bob => "Bob", + Keyring::Charlie => "Charlie", + Keyring::Dave => "Dave", + Keyring::Eve => "Eve", + Keyring::Ferdie => "Ferdie", + Keyring::One => "One", + Keyring::Two => "Two", + } + } } lazy_static! { - static ref PRIVATE_KEYS: HashMap = { - Keyring::iter().map(|i| (i, i.pair())).collect() - }; - - static ref PUBLIC_KEYS: HashMap = { - PRIVATE_KEYS.iter().map(|(&name, pair)| (name, pair.public())).collect() - }; + static ref PRIVATE_KEYS: HashMap = + { Keyring::iter().map(|i| (i, i.pair())).collect() }; + static ref PUBLIC_KEYS: HashMap = { + PRIVATE_KEYS + .iter() + .map(|(&name, pair)| (name, pair.public())) + .collect() + }; } impl From for Public { - fn from(k: Keyring) -> Self { - (*PUBLIC_KEYS).get(&k).unwrap().clone() - } + fn from(k: Keyring) -> Self { + (*PUBLIC_KEYS).get(&k).unwrap().clone() + } } impl From for Pair { - fn from(k: Keyring) -> Self { - k.pair() - } + fn from(k: Keyring) -> Self { + k.pair() + } } impl From for [u8; 32] { - fn from(k: Keyring) -> Self { - *(*PUBLIC_KEYS).get(&k).unwrap().as_array_ref() - } + fn from(k: Keyring) -> Self { + *(*PUBLIC_KEYS).get(&k).unwrap().as_array_ref() + } } impl From for H256 { - fn from(k: Keyring) -> Self { - (*PUBLIC_KEYS).get(&k).unwrap().as_array_ref().into() - } + fn from(k: Keyring) -> Self { + (*PUBLIC_KEYS).get(&k).unwrap().as_array_ref().into() + } } impl From for &'static [u8; 32] { - fn from(k: Keyring) -> Self { - (*PUBLIC_KEYS).get(&k).unwrap().as_array_ref() - } + fn from(k: Keyring) -> Self { + (*PUBLIC_KEYS).get(&k).unwrap().as_array_ref() + } } impl AsRef<[u8; 32]> for Keyring { - fn as_ref(&self) -> &[u8; 32] { - (*PUBLIC_KEYS).get(self).unwrap().as_array_ref() - } + fn as_ref(&self) -> &[u8; 32] { + (*PUBLIC_KEYS).get(self).unwrap().as_array_ref() + } } impl AsRef for Keyring { - fn as_ref(&self) -> &Public { - (*PUBLIC_KEYS).get(self).unwrap() - } + fn as_ref(&self) -> &Public { + (*PUBLIC_KEYS).get(self).unwrap() + } } impl Deref for Keyring { - type Target = [u8; 32]; - fn deref(&self) -> &[u8; 32] { - (*PUBLIC_KEYS).get(self).unwrap().as_array_ref() - } + type Target = [u8; 32]; + fn deref(&self) -> &[u8; 32] { + (*PUBLIC_KEYS).get(self).unwrap().as_array_ref() + } } #[cfg(test)] mod tests { - use super::*; - use substrate_primitives::{ed25519::Pair, Pair as PairT}; - - #[test] - fn should_work() { - assert!(Pair::verify(&Keyring::Alice.sign(b"I am Alice!"), b"I am Alice!", Keyring::Alice)); - assert!(!Pair::verify(&Keyring::Alice.sign(b"I am Alice!"), b"I am Bob!", Keyring::Alice)); - assert!(!Pair::verify(&Keyring::Alice.sign(b"I am Alice!"), b"I am Alice!", Keyring::Bob)); - } + use super::*; + use substrate_primitives::{ed25519::Pair, Pair as PairT}; + + #[test] + fn should_work() { + assert!(Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Alice!", + Keyring::Alice + )); + assert!(!Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Bob!", + Keyring::Alice + )); + assert!(!Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Alice!", + Keyring::Bob + )); + } } diff --git a/core/keyring/src/lib.rs b/core/keyring/src/lib.rs index 5cf38401d0..519ba34bef 100644 --- a/core/keyring/src/lib.rs +++ b/core/keyring/src/lib.rs @@ -31,6 +31,6 @@ pub use sr25519::Keyring as AccountKeyring; pub use ed25519::Keyring as AuthorityKeyring; pub mod test { - /// The keyring for use with accounts when using the test runtime. - pub use super::ed25519::Keyring as AccountKeyring; + /// The keyring for use with accounts when using the test runtime. + pub use super::ed25519::Keyring as AccountKeyring; } diff --git a/core/keyring/src/sr25519.rs b/core/keyring/src/sr25519.rs index 1d3342d86d..1f06313c9a 100644 --- a/core/keyring/src/sr25519.rs +++ b/core/keyring/src/sr25519.rs @@ -16,163 +16,184 @@ //! Support code for the runtime. A set of test accounts. +use lazy_static::lazy_static; use std::collections::HashMap; use std::ops::Deref; -use lazy_static::lazy_static; -use substrate_primitives::{sr25519::{Pair, Public, Signature}, Pair as PairT, H256}; pub use substrate_primitives::sr25519; +use substrate_primitives::{ + sr25519::{Pair, Public, Signature}, + Pair as PairT, H256, +}; /// Set of test accounts. #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub enum Keyring { - Alice, - Bob, - Charlie, - Dave, - Eve, - Ferdie, - One, - Two, + Alice, + Bob, + Charlie, + Dave, + Eve, + Ferdie, + One, + Two, } impl Keyring { - pub fn from_public(who: &Public) -> Option { - [ - Keyring::Alice, - Keyring::Bob, - Keyring::Charlie, - Keyring::Dave, - Keyring::Eve, - Keyring::Ferdie, - Keyring::One, - Keyring::Two, - ].iter() - .map(|i| *i) - .find(|&k| &Public::from(k) == who) - } - - pub fn from_raw_public(who: [u8; 32]) -> Option { - Self::from_public(&Public::from_raw(who)) - } - - pub fn to_raw_public(self) -> [u8; 32] { - *Public::from(self).as_array_ref() - } - - pub fn from_h256_public(who: H256) -> Option { - Self::from_public(&Public::from_raw(who.into())) - } - - pub fn to_h256_public(self) -> H256 { - Public::from(self).as_array_ref().into() - } - - pub fn to_raw_public_vec(self) -> Vec { - Public::from(self).to_raw_vec() - } - - pub fn sign(self, msg: &[u8]) -> Signature { - Pair::from(self).sign(msg) - } - - pub fn pair(self) -> Pair { - Pair::from_string(&format!("//{}", <&'static str>::from(self)), None) - .expect("static values are known good; qed") - } + pub fn from_public(who: &Public) -> Option { + [ + Keyring::Alice, + Keyring::Bob, + Keyring::Charlie, + Keyring::Dave, + Keyring::Eve, + Keyring::Ferdie, + Keyring::One, + Keyring::Two, + ] + .iter() + .map(|i| *i) + .find(|&k| &Public::from(k) == who) + } + + pub fn from_raw_public(who: [u8; 32]) -> Option { + Self::from_public(&Public::from_raw(who)) + } + + pub fn to_raw_public(self) -> [u8; 32] { + *Public::from(self).as_array_ref() + } + + pub fn from_h256_public(who: H256) -> Option { + Self::from_public(&Public::from_raw(who.into())) + } + + pub fn to_h256_public(self) -> H256 { + Public::from(self).as_array_ref().into() + } + + pub fn to_raw_public_vec(self) -> Vec { + Public::from(self).to_raw_vec() + } + + pub fn sign(self, msg: &[u8]) -> Signature { + Pair::from(self).sign(msg) + } + + pub fn pair(self) -> Pair { + Pair::from_string(&format!("//{}", <&'static str>::from(self)), None) + .expect("static values are known good; qed") + } } impl From for &'static str { - fn from(k: Keyring) -> Self { - match k { - Keyring::Alice => "Alice", - Keyring::Bob => "Bob", - Keyring::Charlie => "Charlie", - Keyring::Dave => "Dave", - Keyring::Eve => "Eve", - Keyring::Ferdie => "Ferdie", - Keyring::One => "One", - Keyring::Two => "Two", - } - } + fn from(k: Keyring) -> Self { + match k { + Keyring::Alice => "Alice", + Keyring::Bob => "Bob", + Keyring::Charlie => "Charlie", + Keyring::Dave => "Dave", + Keyring::Eve => "Eve", + Keyring::Ferdie => "Ferdie", + Keyring::One => "One", + Keyring::Two => "Two", + } + } } lazy_static! { - static ref PRIVATE_KEYS: HashMap = { - [ - Keyring::Alice, - Keyring::Bob, - Keyring::Charlie, - Keyring::Dave, - Keyring::Eve, - Keyring::Ferdie, - Keyring::One, - Keyring::Two, - ].iter().map(|&i| (i, i.pair())).collect() - }; - - static ref PUBLIC_KEYS: HashMap = { - PRIVATE_KEYS.iter().map(|(&name, pair)| (name, pair.public())).collect() - }; + static ref PRIVATE_KEYS: HashMap = { + [ + Keyring::Alice, + Keyring::Bob, + Keyring::Charlie, + Keyring::Dave, + Keyring::Eve, + Keyring::Ferdie, + Keyring::One, + Keyring::Two, + ] + .iter() + .map(|&i| (i, i.pair())) + .collect() + }; + static ref PUBLIC_KEYS: HashMap = { + PRIVATE_KEYS + .iter() + .map(|(&name, pair)| (name, pair.public())) + .collect() + }; } impl From for Public { - fn from(k: Keyring) -> Self { - (*PUBLIC_KEYS).get(&k).unwrap().clone() - } + fn from(k: Keyring) -> Self { + (*PUBLIC_KEYS).get(&k).unwrap().clone() + } } impl From for Pair { - fn from(k: Keyring) -> Self { - k.pair() - } + fn from(k: Keyring) -> Self { + k.pair() + } } impl From for [u8; 32] { - fn from(k: Keyring) -> Self { - *(*PUBLIC_KEYS).get(&k).unwrap().as_array_ref() - } + fn from(k: Keyring) -> Self { + *(*PUBLIC_KEYS).get(&k).unwrap().as_array_ref() + } } impl From for H256 { - fn from(k: Keyring) -> Self { - (*PUBLIC_KEYS).get(&k).unwrap().as_array_ref().into() - } + fn from(k: Keyring) -> Self { + (*PUBLIC_KEYS).get(&k).unwrap().as_array_ref().into() + } } impl From for &'static [u8; 32] { - fn from(k: Keyring) -> Self { - (*PUBLIC_KEYS).get(&k).unwrap().as_array_ref() - } + fn from(k: Keyring) -> Self { + (*PUBLIC_KEYS).get(&k).unwrap().as_array_ref() + } } impl AsRef<[u8; 32]> for Keyring { - fn as_ref(&self) -> &[u8; 32] { - (*PUBLIC_KEYS).get(self).unwrap().as_array_ref() - } + fn as_ref(&self) -> &[u8; 32] { + (*PUBLIC_KEYS).get(self).unwrap().as_array_ref() + } } impl AsRef for Keyring { - fn as_ref(&self) -> &Public { - (*PUBLIC_KEYS).get(self).unwrap() - } + fn as_ref(&self) -> &Public { + (*PUBLIC_KEYS).get(self).unwrap() + } } impl Deref for Keyring { - type Target = [u8; 32]; - fn deref(&self) -> &[u8; 32] { - (*PUBLIC_KEYS).get(self).unwrap().as_array_ref() - } + type Target = [u8; 32]; + fn deref(&self) -> &[u8; 32] { + (*PUBLIC_KEYS).get(self).unwrap().as_array_ref() + } } #[cfg(test)] mod tests { - use super::*; - use substrate_primitives::{sr25519::Pair, Pair as PairT}; - - #[test] - fn should_work() { - assert!(Pair::verify(&Keyring::Alice.sign(b"I am Alice!"), b"I am Alice!", Keyring::Alice)); - assert!(!Pair::verify(&Keyring::Alice.sign(b"I am Alice!"), b"I am Bob!", Keyring::Alice)); - assert!(!Pair::verify(&Keyring::Alice.sign(b"I am Alice!"), b"I am Alice!", Keyring::Bob)); - } + use super::*; + use substrate_primitives::{sr25519::Pair, Pair as PairT}; + + #[test] + fn should_work() { + assert!(Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Alice!", + Keyring::Alice + )); + assert!(!Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Bob!", + Keyring::Alice + )); + assert!(!Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Alice!", + Keyring::Bob + )); + } } diff --git a/core/keystore/src/lib.rs b/core/keystore/src/lib.rs index 59c1a65cfb..a467980d1d 100644 --- a/core/keystore/src/lib.rs +++ b/core/keystore/src/lib.rs @@ -21,147 +21,164 @@ #![allow(deprecated)] use std::collections::HashMap; -use std::path::PathBuf; use std::fs::{self, File}; use std::io::{self, Write}; +use std::path::PathBuf; -use error_chain::{bail, error_chain, error_chain_processing, impl_error_chain_processed, - impl_extract_backtrace, impl_error_chain_kind}; +use error_chain::{ + bail, error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed, + impl_extract_backtrace, +}; -use substrate_primitives::{ed25519::{Pair, Public}, Pair as PairT}; +use substrate_primitives::{ + ed25519::{Pair, Public}, + Pair as PairT, +}; pub use crypto::KEY_ITERATIONS; error_chain! { - foreign_links { - Io(io::Error); - Json(serde_json::Error); - } - - errors { - InvalidPassword { - description("Invalid password"), - display("Invalid password"), - } - InvalidPhrase { - description("Invalid recovery phrase (BIP39) data"), - display("Invalid recovery phrase (BIP39) data"), - } - InvalidSeed { - description("Invalid seed"), - display("Invalid seed"), - } - } + foreign_links { + Io(io::Error); + Json(serde_json::Error); + } + + errors { + InvalidPassword { + description("Invalid password"), + display("Invalid password"), + } + InvalidPhrase { + description("Invalid recovery phrase (BIP39) data"), + display("Invalid recovery phrase (BIP39) data"), + } + InvalidSeed { + description("Invalid seed"), + display("Invalid seed"), + } + } } /// Key store. pub struct Store { - path: PathBuf, - additional: HashMap, + path: PathBuf, + additional: HashMap, } impl Store { - /// Create a new store at the given path. - pub fn open(path: PathBuf) -> Result { - fs::create_dir_all(&path)?; - Ok(Store { path, additional: HashMap::new() }) - } - - /// Generate a new key, placing it into the store. - pub fn generate(&self, password: &str) -> Result { - let (pair, phrase) = Pair::generate_with_phrase(Some(password)); - let mut file = File::create(self.key_file_path(&pair.public()))?; - ::serde_json::to_writer(&file, &phrase)?; - file.flush()?; - Ok(pair) - } - - /// Create a new key from seed. Do not place it into the store. - pub fn generate_from_seed(&mut self, seed: &str) -> Result { - let pair = Pair::from_string(seed, None) - .map_err(|_| Error::from(ErrorKind::InvalidSeed))?; - self.additional.insert(pair.public(), pair.clone()); - Ok(pair) - } - - /// Load a key file with given public key. - pub fn load(&self, public: &Public, password: &str) -> Result { - if let Some(pair) = self.additional.get(public) { - return Ok(pair.clone()); - } - let path = self.key_file_path(public); - let file = File::open(path)?; - - let phrase: String = ::serde_json::from_reader(&file)?; - let pair = Pair::from_phrase(&phrase, Some(password)) - .map_err(|_| Error::from(ErrorKind::InvalidPhrase))?; - if &pair.public() != public { - bail!(ErrorKind::InvalidPassword); - } - Ok(pair) - } - - /// Get public keys of all stored keys. - pub fn contents(&self) -> Result> { - let mut public_keys: Vec = self.additional.keys().cloned().collect(); - for entry in fs::read_dir(&self.path)? { - let entry = entry?; - let path = entry.path(); - - // skip directories and non-unicode file names (hex is unicode) - if let Some(name) = path.file_name().and_then(|n| n.to_str()) { - if name.len() != 64 { continue } - - match hex::decode(name) { - Ok(ref hex) if hex.len() == 32 => { - let mut buf = [0; 32]; - buf.copy_from_slice(&hex[..]); - - public_keys.push(Public(buf)); - } - _ => continue, - } - } - } - - Ok(public_keys) - } - - fn key_file_path(&self, public: &Public) -> PathBuf { - let mut buf = self.path.clone(); - buf.push(hex::encode(public.as_slice())); - buf - } + /// Create a new store at the given path. + pub fn open(path: PathBuf) -> Result { + fs::create_dir_all(&path)?; + Ok(Store { + path, + additional: HashMap::new(), + }) + } + + /// Generate a new key, placing it into the store. + pub fn generate(&self, password: &str) -> Result { + let (pair, phrase) = Pair::generate_with_phrase(Some(password)); + let mut file = File::create(self.key_file_path(&pair.public()))?; + ::serde_json::to_writer(&file, &phrase)?; + file.flush()?; + Ok(pair) + } + + /// Create a new key from seed. Do not place it into the store. + pub fn generate_from_seed(&mut self, seed: &str) -> Result { + let pair = + Pair::from_string(seed, None).map_err(|_| Error::from(ErrorKind::InvalidSeed))?; + self.additional.insert(pair.public(), pair.clone()); + Ok(pair) + } + + /// Load a key file with given public key. + pub fn load(&self, public: &Public, password: &str) -> Result { + if let Some(pair) = self.additional.get(public) { + return Ok(pair.clone()); + } + let path = self.key_file_path(public); + let file = File::open(path)?; + + let phrase: String = ::serde_json::from_reader(&file)?; + let pair = Pair::from_phrase(&phrase, Some(password)) + .map_err(|_| Error::from(ErrorKind::InvalidPhrase))?; + if &pair.public() != public { + bail!(ErrorKind::InvalidPassword); + } + Ok(pair) + } + + /// Get public keys of all stored keys. + pub fn contents(&self) -> Result> { + let mut public_keys: Vec = self.additional.keys().cloned().collect(); + for entry in fs::read_dir(&self.path)? { + let entry = entry?; + let path = entry.path(); + + // skip directories and non-unicode file names (hex is unicode) + if let Some(name) = path.file_name().and_then(|n| n.to_str()) { + if name.len() != 64 { + continue; + } + + match hex::decode(name) { + Ok(ref hex) if hex.len() == 32 => { + let mut buf = [0; 32]; + buf.copy_from_slice(&hex[..]); + + public_keys.push(Public(buf)); + } + _ => continue, + } + } + } + + Ok(public_keys) + } + + fn key_file_path(&self, public: &Public) -> PathBuf { + let mut buf = self.path.clone(); + buf.push(hex::encode(public.as_slice())); + buf + } } #[cfg(test)] mod tests { - use super::*; - use tempdir::TempDir; + use super::*; + use tempdir::TempDir; - #[test] - fn basic_store() { - let temp_dir = TempDir::new("keystore").unwrap(); - let store = Store::open(temp_dir.path().to_owned()).unwrap(); + #[test] + fn basic_store() { + let temp_dir = TempDir::new("keystore").unwrap(); + let store = Store::open(temp_dir.path().to_owned()).unwrap(); - assert!(store.contents().unwrap().is_empty()); + assert!(store.contents().unwrap().is_empty()); - let key = store.generate("thepassword").unwrap(); - let key2 = store.load(&key.public(), "thepassword").unwrap(); + let key = store.generate("thepassword").unwrap(); + let key2 = store.load(&key.public(), "thepassword").unwrap(); - assert!(store.load(&key.public(), "notthepassword").is_err()); + assert!(store.load(&key.public(), "notthepassword").is_err()); - assert_eq!(key.public(), key2.public()); + assert_eq!(key.public(), key2.public()); - assert_eq!(store.contents().unwrap()[0], key.public()); - } + assert_eq!(store.contents().unwrap()[0], key.public()); + } - #[test] - fn test_generate_from_seed() { - let temp_dir = TempDir::new("keystore").unwrap(); - let mut store = Store::open(temp_dir.path().to_owned()).unwrap(); + #[test] + fn test_generate_from_seed() { + let temp_dir = TempDir::new("keystore").unwrap(); + let mut store = Store::open(temp_dir.path().to_owned()).unwrap(); - let pair = store.generate_from_seed("0x3d97c819d68f9bafa7d6e79cb991eebcd77d966c5334c0b94d9e1fa7ad0869dc").unwrap(); - assert_eq!("5DKUrgFqCPV8iAXx9sjy1nyBygQCeiUYRFWurZGhnrn3HBL8", pair.public().to_ss58check()); - } + let pair = store + .generate_from_seed( + "0x3d97c819d68f9bafa7d6e79cb991eebcd77d966c5334c0b94d9e1fa7ad0869dc", + ) + .unwrap(); + assert_eq!( + "5DKUrgFqCPV8iAXx9sjy1nyBygQCeiUYRFWurZGhnrn3HBL8", + pair.public().to_ss58check() + ); + } } diff --git a/core/network-libp2p/src/behaviour.rs b/core/network-libp2p/src/behaviour.rs index f93665ce76..792a285827 100644 --- a/core/network-libp2p/src/behaviour.rs +++ b/core/network-libp2p/src/behaviour.rs @@ -16,17 +16,17 @@ use crate::custom_proto::{CustomProto, CustomProtoOut, RegisteredProtocol}; use futures::prelude::*; -use libp2p::NetworkBehaviour; -use libp2p::core::{Multiaddr, PeerId, ProtocolsHandler, PublicKey}; +use libp2p::core::swarm::toggle::Toggle; use libp2p::core::swarm::{ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction}; use libp2p::core::swarm::{NetworkBehaviourEventProcess, PollParameters}; -use libp2p::core::swarm::toggle::Toggle; -use libp2p::identify::{Identify, IdentifyEvent, protocol::IdentifyInfo}; +use libp2p::core::{Multiaddr, PeerId, ProtocolsHandler, PublicKey}; +use libp2p::identify::{protocol::IdentifyInfo, Identify, IdentifyEvent}; use libp2p::kad::{Kademlia, KademliaOut}; use libp2p::mdns::{Mdns, MdnsEvent}; use libp2p::ping::{Ping, PingEvent}; +use libp2p::NetworkBehaviour; use log::{debug, trace, warn}; -use std::{cmp, io, fmt, time::Duration, time::Instant}; +use std::{cmp, fmt, io, time::Duration, time::Instant}; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_timer::Delay; use void; @@ -35,416 +35,450 @@ use void; #[derive(NetworkBehaviour)] #[behaviour(out_event = "BehaviourOut", poll_method = "poll")] pub struct Behaviour { - /// Periodically ping nodes, and close the connection if it's unresponsive. - ping: Ping, - /// Custom protocols (dot, bbq, sub, etc.). - custom_protocols: CustomProto, - /// Discovers nodes of the network. Defined below. - discovery: DiscoveryBehaviour, - /// Periodically identifies the remote and responds to incoming requests. - identify: Identify, - /// Discovers nodes on the local network. - mdns: Toggle>, - - /// Queue of events to produce for the outside. - #[behaviour(ignore)] - events: Vec>, + /// Periodically ping nodes, and close the connection if it's unresponsive. + ping: Ping, + /// Custom protocols (dot, bbq, sub, etc.). + custom_protocols: CustomProto, + /// Discovers nodes of the network. Defined below. + discovery: DiscoveryBehaviour, + /// Periodically identifies the remote and responds to incoming requests. + identify: Identify, + /// Discovers nodes on the local network. + mdns: Toggle>, + + /// Queue of events to produce for the outside. + #[behaviour(ignore)] + events: Vec>, } impl Behaviour { - /// Builds a new `Behaviour`. - pub fn new( - user_agent: String, - local_public_key: PublicKey, - protocol: RegisteredProtocol, - known_addresses: Vec<(PeerId, Multiaddr)>, - peerset: substrate_peerset::PeersetMut, - enable_mdns: bool, - ) -> Self { - let identify = { - let proto_version = "/substrate/1.0".to_string(); - Identify::new(proto_version, user_agent, local_public_key.clone()) - }; - - let custom_protocols = CustomProto::new(protocol, peerset); - - let mut kademlia = Kademlia::new(local_public_key.into_peer_id()); - for (peer_id, addr) in &known_addresses { - kademlia.add_connected_address(peer_id, addr.clone()); - } - - Behaviour { - ping: Ping::new(), - custom_protocols, - discovery: DiscoveryBehaviour { - user_defined: known_addresses, - kademlia, - next_kad_random_query: Delay::new(Instant::now()), - duration_to_next_kad: Duration::from_secs(1), - }, - identify, - mdns: if enable_mdns { - match Mdns::new() { - Ok(mdns) => Some(mdns).into(), - Err(err) => { - warn!(target: "sub-libp2p", "Failed to initialize mDNS: {:?}", err); - None.into() - } - } - } else { - None.into() - }, - events: Vec::new(), - } - } - - /// Sends a message to a peer. - /// - /// Has no effect if the custom protocol is not open with the given peer. - /// - /// Also note that even we have a valid open substream, it may in fact be already closed - /// without us knowing, in which case the packet will not be received. - #[inline] - pub fn send_custom_message(&mut self, target: &PeerId, data: TMessage) { - self.custom_protocols.send_packet(target, data) - } - - /// Returns the list of nodes that we know exist in the network. - pub fn known_peers(&self) -> impl Iterator { - self.discovery.kademlia.kbuckets_entries() - } - - /// Returns true if we try to open protocols with the given peer. - pub fn is_enabled(&self, peer_id: &PeerId) -> bool { - self.custom_protocols.is_enabled(peer_id) - } - - /// Returns true if we have an open protocol with the given peer. - pub fn is_open(&self, peer_id: &PeerId) -> bool { - self.custom_protocols.is_open(peer_id) - } - - /// Adds a hard-coded address for the given peer, that never expires. - pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { - if self.discovery.user_defined.iter().all(|(p, a)| *p != peer_id && *a != addr) { - self.discovery.user_defined.push((peer_id, addr)); - } - } - - /// Disconnects the custom protocols from a peer. - /// - /// The peer will still be able to use Kademlia or other protocols, but will get disconnected - /// after a few seconds of inactivity. - /// - /// This is asynchronous and does not instantly close the custom protocols. - /// Corresponding closing events will be generated once the closing actually happens. - /// - /// Has no effect if we're not connected to the `PeerId`. - #[inline] - pub fn drop_node(&mut self, peer_id: &PeerId) { - self.custom_protocols.disconnect_peer(peer_id) - } - - /// Returns the state of the peerset manager, for debugging purposes. - pub fn peerset_debug_info(&self) -> serde_json::Value { - self.custom_protocols.peerset_debug_info() - } + /// Builds a new `Behaviour`. + pub fn new( + user_agent: String, + local_public_key: PublicKey, + protocol: RegisteredProtocol, + known_addresses: Vec<(PeerId, Multiaddr)>, + peerset: substrate_peerset::PeersetMut, + enable_mdns: bool, + ) -> Self { + let identify = { + let proto_version = "/substrate/1.0".to_string(); + Identify::new(proto_version, user_agent, local_public_key.clone()) + }; + + let custom_protocols = CustomProto::new(protocol, peerset); + + let mut kademlia = Kademlia::new(local_public_key.into_peer_id()); + for (peer_id, addr) in &known_addresses { + kademlia.add_connected_address(peer_id, addr.clone()); + } + + Behaviour { + ping: Ping::new(), + custom_protocols, + discovery: DiscoveryBehaviour { + user_defined: known_addresses, + kademlia, + next_kad_random_query: Delay::new(Instant::now()), + duration_to_next_kad: Duration::from_secs(1), + }, + identify, + mdns: if enable_mdns { + match Mdns::new() { + Ok(mdns) => Some(mdns).into(), + Err(err) => { + warn!(target: "sub-libp2p", "Failed to initialize mDNS: {:?}", err); + None.into() + } + } + } else { + None.into() + }, + events: Vec::new(), + } + } + + /// Sends a message to a peer. + /// + /// Has no effect if the custom protocol is not open with the given peer. + /// + /// Also note that even we have a valid open substream, it may in fact be already closed + /// without us knowing, in which case the packet will not be received. + #[inline] + pub fn send_custom_message(&mut self, target: &PeerId, data: TMessage) { + self.custom_protocols.send_packet(target, data) + } + + /// Returns the list of nodes that we know exist in the network. + pub fn known_peers(&self) -> impl Iterator { + self.discovery.kademlia.kbuckets_entries() + } + + /// Returns true if we try to open protocols with the given peer. + pub fn is_enabled(&self, peer_id: &PeerId) -> bool { + self.custom_protocols.is_enabled(peer_id) + } + + /// Returns true if we have an open protocol with the given peer. + pub fn is_open(&self, peer_id: &PeerId) -> bool { + self.custom_protocols.is_open(peer_id) + } + + /// Adds a hard-coded address for the given peer, that never expires. + pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { + if self + .discovery + .user_defined + .iter() + .all(|(p, a)| *p != peer_id && *a != addr) + { + self.discovery.user_defined.push((peer_id, addr)); + } + } + + /// Disconnects the custom protocols from a peer. + /// + /// The peer will still be able to use Kademlia or other protocols, but will get disconnected + /// after a few seconds of inactivity. + /// + /// This is asynchronous and does not instantly close the custom protocols. + /// Corresponding closing events will be generated once the closing actually happens. + /// + /// Has no effect if we're not connected to the `PeerId`. + #[inline] + pub fn drop_node(&mut self, peer_id: &PeerId) { + self.custom_protocols.disconnect_peer(peer_id) + } + + /// Returns the state of the peerset manager, for debugging purposes. + pub fn peerset_debug_info(&self) -> serde_json::Value { + self.custom_protocols.peerset_debug_info() + } } /// Event that can be emitted by the behaviour. #[derive(Debug)] pub enum BehaviourOut { - /// Opened a custom protocol with the remote. - CustomProtocolOpen { - /// Version of the protocol that has been opened. - version: u8, - /// Id of the node we have opened a connection with. - peer_id: PeerId, - /// Endpoint used for this custom protocol. - endpoint: ConnectedPoint, - }, - - /// Closed a custom protocol with the remote. - CustomProtocolClosed { - /// Id of the peer we were connected to. - peer_id: PeerId, - /// Reason why the substream closed. If `Ok`, then it's a graceful exit (EOF). - result: io::Result<()>, - }, - - /// Receives a message on a custom protocol substream. - CustomMessage { - /// Id of the peer the message came from. - peer_id: PeerId, - /// Message that has been received. - message: TMessage, - }, - - /// A substream with a remote is clogged. We should avoid sending more data to it if possible. - Clogged { - /// Id of the peer the message came from. - peer_id: PeerId, - /// Copy of the messages that are within the buffer, for further diagnostic. - messages: Vec, - }, - - /// We have obtained debug information from a peer. - Identified { - /// Id of the peer that has been identified. - peer_id: PeerId, - /// Information about the peer. - info: IdentifyInfo, - }, - - /// We have successfully pinged a peer. - PingSuccess { - /// Id of the peer that has been pinged. - peer_id: PeerId, - /// Time it took for the ping to come back. - ping_time: Duration, - }, + /// Opened a custom protocol with the remote. + CustomProtocolOpen { + /// Version of the protocol that has been opened. + version: u8, + /// Id of the node we have opened a connection with. + peer_id: PeerId, + /// Endpoint used for this custom protocol. + endpoint: ConnectedPoint, + }, + + /// Closed a custom protocol with the remote. + CustomProtocolClosed { + /// Id of the peer we were connected to. + peer_id: PeerId, + /// Reason why the substream closed. If `Ok`, then it's a graceful exit (EOF). + result: io::Result<()>, + }, + + /// Receives a message on a custom protocol substream. + CustomMessage { + /// Id of the peer the message came from. + peer_id: PeerId, + /// Message that has been received. + message: TMessage, + }, + + /// A substream with a remote is clogged. We should avoid sending more data to it if possible. + Clogged { + /// Id of the peer the message came from. + peer_id: PeerId, + /// Copy of the messages that are within the buffer, for further diagnostic. + messages: Vec, + }, + + /// We have obtained debug information from a peer. + Identified { + /// Id of the peer that has been identified. + peer_id: PeerId, + /// Information about the peer. + info: IdentifyInfo, + }, + + /// We have successfully pinged a peer. + PingSuccess { + /// Id of the peer that has been pinged. + peer_id: PeerId, + /// Time it took for the ping to come back. + ping_time: Duration, + }, } impl From> for BehaviourOut { - fn from(other: CustomProtoOut) -> BehaviourOut { - match other { - CustomProtoOut::CustomProtocolOpen { version, peer_id, endpoint } => { - BehaviourOut::CustomProtocolOpen { version, peer_id, endpoint } - } - CustomProtoOut::CustomProtocolClosed { peer_id, result } => { - BehaviourOut::CustomProtocolClosed { peer_id, result } - } - CustomProtoOut::CustomMessage { peer_id, message } => { - BehaviourOut::CustomMessage { peer_id, message } - } - CustomProtoOut::Clogged { peer_id, messages } => { - BehaviourOut::Clogged { peer_id, messages } - } - } - } + fn from(other: CustomProtoOut) -> BehaviourOut { + match other { + CustomProtoOut::CustomProtocolOpen { + version, + peer_id, + endpoint, + } => BehaviourOut::CustomProtocolOpen { + version, + peer_id, + endpoint, + }, + CustomProtoOut::CustomProtocolClosed { peer_id, result } => { + BehaviourOut::CustomProtocolClosed { peer_id, result } + } + CustomProtoOut::CustomMessage { peer_id, message } => { + BehaviourOut::CustomMessage { peer_id, message } + } + CustomProtoOut::Clogged { peer_id, messages } => { + BehaviourOut::Clogged { peer_id, messages } + } + } + } } -impl NetworkBehaviourEventProcess for Behaviour { - fn inject_event(&mut self, event: void::Void) { - void::unreachable(event) - } +impl NetworkBehaviourEventProcess + for Behaviour +{ + fn inject_event(&mut self, event: void::Void) { + void::unreachable(event) + } } -impl NetworkBehaviourEventProcess> for Behaviour { - fn inject_event(&mut self, event: CustomProtoOut) { - self.events.push(event.into()); - } +impl NetworkBehaviourEventProcess> + for Behaviour +{ + fn inject_event(&mut self, event: CustomProtoOut) { + self.events.push(event.into()); + } } -impl NetworkBehaviourEventProcess for Behaviour { - fn inject_event(&mut self, event: IdentifyEvent) { - match event { - IdentifyEvent::Identified { peer_id, mut info, .. } => { - trace!(target: "sub-libp2p", "Identified {:?} => {:?}", peer_id, info); - // TODO: ideally we would delay the first identification to when we open the custom - // protocol, so that we only report id info to the service about the nodes we - // care about (https://github.com/libp2p/rust-libp2p/issues/876) - if !info.protocol_version.contains("substrate") { - warn!(target: "sub-libp2p", "Connected to a non-Substrate node: {:?}", info); - } - if info.listen_addrs.len() > 30 { - warn!(target: "sub-libp2p", "Node {:?} id reported more than 30 addresses", +impl NetworkBehaviourEventProcess + for Behaviour +{ + fn inject_event(&mut self, event: IdentifyEvent) { + match event { + IdentifyEvent::Identified { + peer_id, mut info, .. + } => { + trace!(target: "sub-libp2p", "Identified {:?} => {:?}", peer_id, info); + // TODO: ideally we would delay the first identification to when we open the custom + // protocol, so that we only report id info to the service about the nodes we + // care about (https://github.com/libp2p/rust-libp2p/issues/876) + if !info.protocol_version.contains("substrate") { + warn!(target: "sub-libp2p", "Connected to a non-Substrate node: {:?}", info); + } + if info.listen_addrs.len() > 30 { + warn!(target: "sub-libp2p", "Node {:?} id reported more than 30 addresses", peer_id); - info.listen_addrs.truncate(30); - } - for addr in &info.listen_addrs { - self.discovery.kademlia.add_connected_address(&peer_id, addr.clone()); - } - self.custom_protocols.add_discovered_node(&peer_id); - self.events.push(BehaviourOut::Identified { peer_id, info }); - } - IdentifyEvent::Error { .. } => {} - IdentifyEvent::SendBack { result: Err(ref err), ref peer_id } => - debug!(target: "sub-libp2p", "Error when sending back identify info \ + info.listen_addrs.truncate(30); + } + for addr in &info.listen_addrs { + self.discovery + .kademlia + .add_connected_address(&peer_id, addr.clone()); + } + self.custom_protocols.add_discovered_node(&peer_id); + self.events.push(BehaviourOut::Identified { peer_id, info }); + } + IdentifyEvent::Error { .. } => {} + IdentifyEvent::SendBack { + result: Err(ref err), + ref peer_id, + } => debug!(target: "sub-libp2p", "Error when sending back identify info \ to {:?} => {}", peer_id, err), - IdentifyEvent::SendBack { .. } => {} - } - } + IdentifyEvent::SendBack { .. } => {} + } + } } -impl NetworkBehaviourEventProcess for Behaviour { - fn inject_event(&mut self, out: KademliaOut) { - match out { - KademliaOut::Discovered { .. } => {} - KademliaOut::KBucketAdded { peer_id, .. } => { - self.custom_protocols.add_discovered_node(&peer_id); - } - KademliaOut::FindNodeResult { key, closer_peers } => { - trace!(target: "sub-libp2p", "Libp2p => Query for {:?} yielded {:?} results", +impl NetworkBehaviourEventProcess + for Behaviour +{ + fn inject_event(&mut self, out: KademliaOut) { + match out { + KademliaOut::Discovered { .. } => {} + KademliaOut::KBucketAdded { peer_id, .. } => { + self.custom_protocols.add_discovered_node(&peer_id); + } + KademliaOut::FindNodeResult { key, closer_peers } => { + trace!(target: "sub-libp2p", "Libp2p => Query for {:?} yielded {:?} results", key, closer_peers.len()); - if closer_peers.is_empty() { - warn!(target: "sub-libp2p", "Libp2p => Random Kademlia query has yielded empty \ + if closer_peers.is_empty() { + warn!(target: "sub-libp2p", "Libp2p => Random Kademlia query has yielded empty \ results"); - } - } - // We never start any GET_PROVIDERS query. - KademliaOut::GetProvidersResult { .. } => () - } - } + } + } + // We never start any GET_PROVIDERS query. + KademliaOut::GetProvidersResult { .. } => (), + } + } } -impl NetworkBehaviourEventProcess for Behaviour { - fn inject_event(&mut self, event: PingEvent) { - match event { - PingEvent::PingSuccess { peer, time } => { - trace!(target: "sub-libp2p", "Ping time with {:?}: {:?}", peer, time); - self.events.push(BehaviourOut::PingSuccess { peer_id: peer, ping_time: time }); - } - } - } +impl NetworkBehaviourEventProcess + for Behaviour +{ + fn inject_event(&mut self, event: PingEvent) { + match event { + PingEvent::PingSuccess { peer, time } => { + trace!(target: "sub-libp2p", "Ping time with {:?}: {:?}", peer, time); + self.events.push(BehaviourOut::PingSuccess { + peer_id: peer, + ping_time: time, + }); + } + } + } } -impl NetworkBehaviourEventProcess for Behaviour { - fn inject_event(&mut self, event: MdnsEvent) { - match event { - MdnsEvent::Discovered(list) => { - for (peer_id, _) in list { - self.custom_protocols.add_discovered_node(&peer_id); - } - }, - MdnsEvent::Expired(_) => {} - } - } +impl NetworkBehaviourEventProcess + for Behaviour +{ + fn inject_event(&mut self, event: MdnsEvent) { + match event { + MdnsEvent::Discovered(list) => { + for (peer_id, _) in list { + self.custom_protocols.add_discovered_node(&peer_id); + } + } + MdnsEvent::Expired(_) => {} + } + } } impl Behaviour { - fn poll(&mut self) -> Async>> { - if !self.events.is_empty() { - return Async::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))) - } + fn poll(&mut self) -> Async>> { + if !self.events.is_empty() { + return Async::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))); + } - Async::NotReady - } + Async::NotReady + } } /// Implementation of `NetworkBehaviour` that discovers the nodes on the network. pub struct DiscoveryBehaviour { - /// User-defined list of nodes and their addresses. Typically includes bootstrap nodes and - /// reserved nodes. - user_defined: Vec<(PeerId, Multiaddr)>, - /// Kademlia requests and answers. - kademlia: Kademlia, - /// Stream that fires when we need to perform the next random Kademlia query. - next_kad_random_query: Delay, - /// After `next_kad_random_query` triggers, the next one triggers after this duration. - duration_to_next_kad: Duration, + /// User-defined list of nodes and their addresses. Typically includes bootstrap nodes and + /// reserved nodes. + user_defined: Vec<(PeerId, Multiaddr)>, + /// Kademlia requests and answers. + kademlia: Kademlia, + /// Stream that fires when we need to perform the next random Kademlia query. + next_kad_random_query: Delay, + /// After `next_kad_random_query` triggers, the next one triggers after this duration. + duration_to_next_kad: Duration, } impl NetworkBehaviour for DiscoveryBehaviour where - TSubstream: AsyncRead + AsyncWrite, + TSubstream: AsyncRead + AsyncWrite, { - type ProtocolsHandler = as NetworkBehaviour>::ProtocolsHandler; - type OutEvent = as NetworkBehaviour>::OutEvent; - - fn new_handler(&mut self) -> Self::ProtocolsHandler { - NetworkBehaviour::new_handler(&mut self.kademlia) - } - - fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { - let mut list = self.user_defined.iter() - .filter_map(|(p, a)| if p == peer_id { Some(a.clone()) } else { None }) - .collect::>(); - list.extend(self.kademlia.addresses_of_peer(peer_id)); - trace!(target: "sub-libp2p", "Addresses of {:?} are {:?}", peer_id, list); - if list.is_empty() { - if self.kademlia.kbuckets_entries().any(|p| p == peer_id) { - debug!(target: "sub-libp2p", "Requested dialing to {:?} (peer in k-buckets), \ + type ProtocolsHandler = as NetworkBehaviour>::ProtocolsHandler; + type OutEvent = as NetworkBehaviour>::OutEvent; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + NetworkBehaviour::new_handler(&mut self.kademlia) + } + + fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { + let mut list = self + .user_defined + .iter() + .filter_map(|(p, a)| if p == peer_id { Some(a.clone()) } else { None }) + .collect::>(); + list.extend(self.kademlia.addresses_of_peer(peer_id)); + trace!(target: "sub-libp2p", "Addresses of {:?} are {:?}", peer_id, list); + if list.is_empty() { + if self.kademlia.kbuckets_entries().any(|p| p == peer_id) { + debug!(target: "sub-libp2p", "Requested dialing to {:?} (peer in k-buckets), \ and no address was found", peer_id); - } else { - debug!(target: "sub-libp2p", "Requested dialing to {:?} (peer not in k-buckets), \ + } else { + debug!(target: "sub-libp2p", "Requested dialing to {:?} (peer not in k-buckets), \ and no address was found", peer_id); - } - } - list - } - - fn inject_connected(&mut self, peer_id: PeerId, endpoint: ConnectedPoint) { - NetworkBehaviour::inject_connected(&mut self.kademlia, peer_id, endpoint) - } - - fn inject_disconnected(&mut self, peer_id: &PeerId, endpoint: ConnectedPoint) { - NetworkBehaviour::inject_disconnected(&mut self.kademlia, peer_id, endpoint) - } - - fn inject_replaced(&mut self, peer_id: PeerId, closed: ConnectedPoint, opened: ConnectedPoint) { - NetworkBehaviour::inject_replaced(&mut self.kademlia, peer_id, closed, opened) - } - - fn inject_node_event( - &mut self, - peer_id: PeerId, - event: ::OutEvent, - ) { - NetworkBehaviour::inject_node_event(&mut self.kademlia, peer_id, event) - } - - fn poll( - &mut self, - params: &mut PollParameters, - ) -> Async< - NetworkBehaviourAction< - ::InEvent, - Self::OutEvent, - >, - > { - // Poll Kademlia. - match self.kademlia.poll(params) { - Async::Ready(action) => return Async::Ready(action), - Async::NotReady => (), - } - - // Poll the stream that fires when we need to start a random Kademlia query. - loop { - match self.next_kad_random_query.poll() { - Ok(Async::NotReady) => break, - Ok(Async::Ready(_)) => { - let random_peer_id = PeerId::random(); - debug!(target: "sub-libp2p", "Libp2p <= Starting random Kademlia request for \ + } + } + list + } + + fn inject_connected(&mut self, peer_id: PeerId, endpoint: ConnectedPoint) { + NetworkBehaviour::inject_connected(&mut self.kademlia, peer_id, endpoint) + } + + fn inject_disconnected(&mut self, peer_id: &PeerId, endpoint: ConnectedPoint) { + NetworkBehaviour::inject_disconnected(&mut self.kademlia, peer_id, endpoint) + } + + fn inject_replaced(&mut self, peer_id: PeerId, closed: ConnectedPoint, opened: ConnectedPoint) { + NetworkBehaviour::inject_replaced(&mut self.kademlia, peer_id, closed, opened) + } + + fn inject_node_event( + &mut self, + peer_id: PeerId, + event: ::OutEvent, + ) { + NetworkBehaviour::inject_node_event(&mut self.kademlia, peer_id, event) + } + + fn poll( + &mut self, + params: &mut PollParameters, + ) -> Async< + NetworkBehaviourAction< + ::InEvent, + Self::OutEvent, + >, + > { + // Poll Kademlia. + match self.kademlia.poll(params) { + Async::Ready(action) => return Async::Ready(action), + Async::NotReady => (), + } + + // Poll the stream that fires when we need to start a random Kademlia query. + loop { + match self.next_kad_random_query.poll() { + Ok(Async::NotReady) => break, + Ok(Async::Ready(_)) => { + let random_peer_id = PeerId::random(); + debug!(target: "sub-libp2p", "Libp2p <= Starting random Kademlia request for \ {:?}", random_peer_id); - self.kademlia.find_node(random_peer_id); - - // Reset the `Delay` to the next random. - self.next_kad_random_query.reset(Instant::now() + self.duration_to_next_kad); - self.duration_to_next_kad = cmp::min(self.duration_to_next_kad * 2, - Duration::from_secs(60)); - }, - Err(err) => { - warn!(target: "sub-libp2p", "Kademlia query timer errored: {:?}", err); - break - } - } - } - - Async::NotReady - } + self.kademlia.find_node(random_peer_id); + + // Reset the `Delay` to the next random. + self.next_kad_random_query + .reset(Instant::now() + self.duration_to_next_kad); + self.duration_to_next_kad = + cmp::min(self.duration_to_next_kad * 2, Duration::from_secs(60)); + } + Err(err) => { + warn!(target: "sub-libp2p", "Kademlia query timer errored: {:?}", err); + break; + } + } + } + + Async::NotReady + } } /// The severity of misbehaviour of a peer that is reported. #[derive(Debug, PartialEq, Eq, Clone)] pub enum Severity { - /// Peer is timing out. Could be bad connectivity of overload of work on either of our sides. - Timeout, - /// Peer has been notably useless. E.g. unable to answer a request that we might reasonably consider - /// it could answer. - Useless(String), - /// Peer has behaved in an invalid manner. This doesn't necessarily need to be Byzantine, but peer - /// must have taken concrete action in order to behave in such a way which is wantanly invalid. - Bad(String), + /// Peer is timing out. Could be bad connectivity of overload of work on either of our sides. + Timeout, + /// Peer has been notably useless. E.g. unable to answer a request that we might reasonably consider + /// it could answer. + Useless(String), + /// Peer has behaved in an invalid manner. This doesn't necessarily need to be Byzantine, but peer + /// must have taken concrete action in order to behave in such a way which is wantanly invalid. + Bad(String), } impl fmt::Display for Severity { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - match self { - Severity::Timeout => write!(fmt, "Timeout"), - Severity::Useless(r) => write!(fmt, "Useless ({})", r), - Severity::Bad(r) => write!(fmt, "Bad ({})", r), - } - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + match self { + Severity::Timeout => write!(fmt, "Timeout"), + Severity::Useless(r) => write!(fmt, "Useless ({})", r), + Severity::Bad(r) => write!(fmt, "Bad ({})", r), + } + } } - diff --git a/core/network-libp2p/src/config.rs b/core/network-libp2p/src/config.rs index 74f9280005..f9ca6e9f78 100644 --- a/core/network-libp2p/src/config.rs +++ b/core/network-libp2p/src/config.rs @@ -16,99 +16,103 @@ //! Libp2p network configuration. -use libp2p::identity::{Keypair, secp256k1, ed25519}; -use libp2p::{Multiaddr, multiaddr::Protocol}; +use libp2p::identity::{ed25519, secp256k1, Keypair}; +use libp2p::{multiaddr::Protocol, Multiaddr}; use std::error::Error; -use std::{io::{self, Write}, iter, fs, net::Ipv4Addr, path::{Path, PathBuf}}; +use std::{ + fs, + io::{self, Write}, + iter, + net::Ipv4Addr, + path::{Path, PathBuf}, +}; /// Network service configuration. #[derive(Clone)] pub struct NetworkConfiguration { - /// Directory path to store general network configuration. None means nothing will be saved. - pub config_path: Option, - /// Directory path to store network-specific configuration. None means nothing will be saved. - pub net_config_path: Option, - /// Multiaddresses to listen for incoming connections. - pub listen_addresses: Vec, - /// Multiaddresses to advertise. Detected automatically if empty. - pub public_addresses: Vec, - /// List of initial node addresses - pub boot_nodes: Vec, - /// The node key configuration, which determines the node's network identity keypair. - pub node_key: NodeKeyConfig, - /// Maximum allowed number of incoming connections. - pub in_peers: u32, - /// Number of outgoing connections we're trying to maintain. - pub out_peers: u32, - /// List of reserved node addresses. - pub reserved_nodes: Vec, - /// The non-reserved peer mode. - pub non_reserved_mode: NonReservedPeerMode, - /// Client identifier. Sent over the wire for debugging purposes. - pub client_version: String, - /// Name of the node. Sent over the wire for debugging purposes. - pub node_name: String, - /// If true, the network will use mDNS to discover other libp2p nodes on the local network - /// and connect to them if they support the same chain. - pub enable_mdns: bool, + /// Directory path to store general network configuration. None means nothing will be saved. + pub config_path: Option, + /// Directory path to store network-specific configuration. None means nothing will be saved. + pub net_config_path: Option, + /// Multiaddresses to listen for incoming connections. + pub listen_addresses: Vec, + /// Multiaddresses to advertise. Detected automatically if empty. + pub public_addresses: Vec, + /// List of initial node addresses + pub boot_nodes: Vec, + /// The node key configuration, which determines the node's network identity keypair. + pub node_key: NodeKeyConfig, + /// Maximum allowed number of incoming connections. + pub in_peers: u32, + /// Number of outgoing connections we're trying to maintain. + pub out_peers: u32, + /// List of reserved node addresses. + pub reserved_nodes: Vec, + /// The non-reserved peer mode. + pub non_reserved_mode: NonReservedPeerMode, + /// Client identifier. Sent over the wire for debugging purposes. + pub client_version: String, + /// Name of the node. Sent over the wire for debugging purposes. + pub node_name: String, + /// If true, the network will use mDNS to discover other libp2p nodes on the local network + /// and connect to them if they support the same chain. + pub enable_mdns: bool, } impl Default for NetworkConfiguration { - fn default() -> Self { - NetworkConfiguration { - config_path: None, - net_config_path: None, - listen_addresses: Vec::new(), - public_addresses: Vec::new(), - boot_nodes: Vec::new(), - node_key: NodeKeyConfig::Secp256k1(Secret::New), - in_peers: 25, - out_peers: 75, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Accept, - client_version: "unknown".into(), - node_name: "unknown".into(), - enable_mdns: false, - } - } + fn default() -> Self { + NetworkConfiguration { + config_path: None, + net_config_path: None, + listen_addresses: Vec::new(), + public_addresses: Vec::new(), + boot_nodes: Vec::new(), + node_key: NodeKeyConfig::Secp256k1(Secret::New), + in_peers: 25, + out_peers: 75, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Accept, + client_version: "unknown".into(), + node_name: "unknown".into(), + enable_mdns: false, + } + } } impl NetworkConfiguration { - /// Create a new instance of default settings. - pub fn new() -> Self { - Self::default() - } + /// Create a new instance of default settings. + pub fn new() -> Self { + Self::default() + } - /// Create new default configuration for localhost-only connection with random port (useful for testing) - pub fn new_local() -> NetworkConfiguration { - let mut config = NetworkConfiguration::new(); - config.listen_addresses = vec![ - iter::once(Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) - .chain(iter::once(Protocol::Tcp(0))) - .collect() - ]; - config - } + /// Create new default configuration for localhost-only connection with random port (useful for testing) + pub fn new_local() -> NetworkConfiguration { + let mut config = NetworkConfiguration::new(); + config.listen_addresses = vec![iter::once(Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) + .chain(iter::once(Protocol::Tcp(0))) + .collect()]; + config + } } /// The policy for connections to non-reserved peers. #[derive(Clone, Debug, PartialEq, Eq)] pub enum NonReservedPeerMode { - /// Accept them. This is the default. - Accept, - /// Deny them. - Deny, + /// Accept them. This is the default. + Accept, + /// Deny them. + Deny, } impl NonReservedPeerMode { - /// Attempt to parse the peer mode from a string. - pub fn parse(s: &str) -> Option { - match s { - "accept" => Some(NonReservedPeerMode::Accept), - "deny" => Some(NonReservedPeerMode::Deny), - _ => None, - } - } + /// Attempt to parse the peer mode from a string. + pub fn parse(s: &str) -> Option { + match s { + "accept" => Some(NonReservedPeerMode::Accept), + "deny" => Some(NonReservedPeerMode::Deny), + _ => None, + } + } } /// The configuration of a node's secret key, describing the type of key @@ -116,10 +120,10 @@ impl NonReservedPeerMode { /// the evaluation of the node key configuration. #[derive(Clone)] pub enum NodeKeyConfig { - /// A Secp256k1 secret key configuration. - Secp256k1(Secret), - /// A Ed25519 secret key configuration. - Ed25519(Secret) + /// A Secp256k1 secret key configuration. + Secp256k1(Secret), + /// A Ed25519 secret key configuration. + Ed25519(Secret), } /// The options for obtaining a Secp256k1 secret key. @@ -131,60 +135,58 @@ pub type Ed25519Secret = Secret; /// The configuration options for obtaining a secret key `K`. #[derive(Clone)] pub enum Secret { - /// Use the given secret key `K`. - Input(K), - /// Read the secret key from a file. If the file does not exist, - /// it is created with a newly generated secret key `K`. The format - /// of the file is determined by `K`: - /// - /// * `secp256k1::SecretKey`: An unencoded 32 bytes Secp256k1 secret key. - /// * `ed25519::SecretKey`: An unencoded 32 bytes Ed25519 secret key. - File(PathBuf), - /// Always generate a new secret key `K`. - New + /// Use the given secret key `K`. + Input(K), + /// Read the secret key from a file. If the file does not exist, + /// it is created with a newly generated secret key `K`. The format + /// of the file is determined by `K`: + /// + /// * `secp256k1::SecretKey`: An unencoded 32 bytes Secp256k1 secret key. + /// * `ed25519::SecretKey`: An unencoded 32 bytes Ed25519 secret key. + File(PathBuf), + /// Always generate a new secret key `K`. + New, } impl NodeKeyConfig { - /// Evaluate a `NodeKeyConfig` to obtain an identity `Keypair`: - /// - /// * If the secret is configured as input, the corresponding keypair is returned. - /// - /// * If the secret is configured as a file, it is read from that file, if it exists. - /// Otherwise a new secret is generated and stored. In either case, the - /// keypair obtained from the secret is returned. - /// - /// * If the secret is configured to be new, it is generated and the corresponding - /// keypair is returned. - pub fn into_keypair(self) -> io::Result { - use NodeKeyConfig::*; - match self { - Secp256k1(Secret::New) => - Ok(Keypair::generate_secp256k1()), + /// Evaluate a `NodeKeyConfig` to obtain an identity `Keypair`: + /// + /// * If the secret is configured as input, the corresponding keypair is returned. + /// + /// * If the secret is configured as a file, it is read from that file, if it exists. + /// Otherwise a new secret is generated and stored. In either case, the + /// keypair obtained from the secret is returned. + /// + /// * If the secret is configured to be new, it is generated and the corresponding + /// keypair is returned. + pub fn into_keypair(self) -> io::Result { + use NodeKeyConfig::*; + match self { + Secp256k1(Secret::New) => Ok(Keypair::generate_secp256k1()), - Secp256k1(Secret::Input(k)) => - Ok(Keypair::Secp256k1(k.into())), + Secp256k1(Secret::Input(k)) => Ok(Keypair::Secp256k1(k.into())), - Secp256k1(Secret::File(f)) => - get_secret(f, - |mut b| secp256k1::SecretKey::from_bytes(&mut b), - secp256k1::SecretKey::generate) - .map(secp256k1::Keypair::from) - .map(Keypair::Secp256k1), + Secp256k1(Secret::File(f)) => get_secret( + f, + |mut b| secp256k1::SecretKey::from_bytes(&mut b), + secp256k1::SecretKey::generate, + ) + .map(secp256k1::Keypair::from) + .map(Keypair::Secp256k1), - Ed25519(Secret::New) => - Ok(Keypair::generate_ed25519()), + Ed25519(Secret::New) => Ok(Keypair::generate_ed25519()), - Ed25519(Secret::Input(k)) => - Ok(Keypair::Ed25519(k.into())), + Ed25519(Secret::Input(k)) => Ok(Keypair::Ed25519(k.into())), - Ed25519(Secret::File(f)) => - get_secret(f, - |mut b| ed25519::SecretKey::from_bytes(&mut b), - ed25519::SecretKey::generate) - .map(ed25519::Keypair::from) - .map(Keypair::Ed25519), - } - } + Ed25519(Secret::File(f)) => get_secret( + f, + |mut b| ed25519::SecretKey::from_bytes(&mut b), + ed25519::SecretKey::generate, + ) + .map(ed25519::Keypair::from) + .map(Keypair::Ed25519), + } + } } /// Load a secret key from a file, if it exists, or generate a @@ -192,99 +194,106 @@ impl NodeKeyConfig { /// the secret key is returned. fn get_secret(file: P, parse: F, generate: G) -> io::Result where - P: AsRef, - F: for<'r> FnOnce(&'r mut [u8]) -> Result, - G: FnOnce() -> K, - E: Error + Send + Sync + 'static, - K: AsRef<[u8]> + P: AsRef, + F: for<'r> FnOnce(&'r mut [u8]) -> Result, + G: FnOnce() -> K, + E: Error + Send + Sync + 'static, + K: AsRef<[u8]>, { - std::fs::read(&file) - .and_then(|mut sk_bytes| - parse(&mut sk_bytes) - .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))) - .or_else(|e| { - if e.kind() == io::ErrorKind::NotFound { - file.as_ref().parent().map_or(Ok(()), fs::create_dir_all)?; - let sk = generate(); - write_secret_file(file, sk.as_ref())?; - Ok(sk) - } else { - Err(e) - } - }) + std::fs::read(&file) + .and_then(|mut sk_bytes| { + parse(&mut sk_bytes).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + }) + .or_else(|e| { + if e.kind() == io::ErrorKind::NotFound { + file.as_ref().parent().map_or(Ok(()), fs::create_dir_all)?; + let sk = generate(); + write_secret_file(file, sk.as_ref())?; + Ok(sk) + } else { + Err(e) + } + }) } /// Write secret bytes to a file. fn write_secret_file

(path: P, sk_bytes: &[u8]) -> io::Result<()> where - P: AsRef + P: AsRef, { - let mut file = open_secret_file(&path)?; - file.write_all(sk_bytes) + let mut file = open_secret_file(&path)?; + file.write_all(sk_bytes) } /// Opens a file containing a secret key in write mode. #[cfg(unix)] fn open_secret_file

(path: P) -> io::Result where - P: AsRef + P: AsRef, { - use std::os::unix::fs::OpenOptionsExt; - fs::OpenOptions::new() - .write(true) - .create_new(true) - .mode(0o600) - .open(path) + use std::os::unix::fs::OpenOptionsExt; + fs::OpenOptions::new() + .write(true) + .create_new(true) + .mode(0o600) + .open(path) } /// Opens a file containing a secret key in write mode. #[cfg(not(unix))] fn open_secret_file

(path: P) -> Result where - P: AsRef + P: AsRef, { - fs::OpenOptions::new() - .write(true) - .create_new(true) - .open(path) + fs::OpenOptions::new() + .write(true) + .create_new(true) + .open(path) } #[cfg(test)] mod tests { - use super::*; - use tempdir::TempDir; + use super::*; + use tempdir::TempDir; - fn secret_bytes(kp: &Keypair) -> Vec { - match kp { - Keypair::Ed25519(p) => p.secret().as_ref().iter().cloned().collect(), - Keypair::Secp256k1(p) => p.secret().as_ref().iter().cloned().collect(), - _ => panic!("Unexpected keypair.") - } - } + fn secret_bytes(kp: &Keypair) -> Vec { + match kp { + Keypair::Ed25519(p) => p.secret().as_ref().iter().cloned().collect(), + Keypair::Secp256k1(p) => p.secret().as_ref().iter().cloned().collect(), + _ => panic!("Unexpected keypair."), + } + } - #[test] - fn test_secret_file() { - let tmp = TempDir::new("x").unwrap(); - std::fs::remove_dir(tmp.path()).unwrap(); // should be recreated - let file = tmp.path().join("x").to_path_buf(); - let kp1 = NodeKeyConfig::Ed25519(Secret::File(file.clone())).into_keypair().unwrap(); - let kp2 = NodeKeyConfig::Ed25519(Secret::File(file.clone())).into_keypair().unwrap(); - assert!(file.is_file() && secret_bytes(&kp1) == secret_bytes(&kp2)) - } + #[test] + fn test_secret_file() { + let tmp = TempDir::new("x").unwrap(); + std::fs::remove_dir(tmp.path()).unwrap(); // should be recreated + let file = tmp.path().join("x").to_path_buf(); + let kp1 = NodeKeyConfig::Ed25519(Secret::File(file.clone())) + .into_keypair() + .unwrap(); + let kp2 = NodeKeyConfig::Ed25519(Secret::File(file.clone())) + .into_keypair() + .unwrap(); + assert!(file.is_file() && secret_bytes(&kp1) == secret_bytes(&kp2)) + } - #[test] - fn test_secret_input() { - let sk = secp256k1::SecretKey::generate(); - let kp1 = NodeKeyConfig::Secp256k1(Secret::Input(sk.clone())).into_keypair().unwrap(); - let kp2 = NodeKeyConfig::Secp256k1(Secret::Input(sk)).into_keypair().unwrap(); - assert!(secret_bytes(&kp1) == secret_bytes(&kp2)); - } + #[test] + fn test_secret_input() { + let sk = secp256k1::SecretKey::generate(); + let kp1 = NodeKeyConfig::Secp256k1(Secret::Input(sk.clone())) + .into_keypair() + .unwrap(); + let kp2 = NodeKeyConfig::Secp256k1(Secret::Input(sk)) + .into_keypair() + .unwrap(); + assert!(secret_bytes(&kp1) == secret_bytes(&kp2)); + } - #[test] - fn test_secret_new() { - let kp1 = NodeKeyConfig::Ed25519(Secret::New).into_keypair().unwrap(); - let kp2 = NodeKeyConfig::Ed25519(Secret::New).into_keypair().unwrap(); - assert!(secret_bytes(&kp1) != secret_bytes(&kp2)); - } + #[test] + fn test_secret_new() { + let kp1 = NodeKeyConfig::Ed25519(Secret::New).into_keypair().unwrap(); + let kp2 = NodeKeyConfig::Ed25519(Secret::New).into_keypair().unwrap(); + assert!(secret_bytes(&kp1) != secret_bytes(&kp2)); + } } - diff --git a/core/network-libp2p/src/custom_proto/behaviour.rs b/core/network-libp2p/src/custom_proto/behaviour.rs index eb60bb9ba4..c57dba531b 100644 --- a/core/network-libp2p/src/custom_proto/behaviour.rs +++ b/core/network-libp2p/src/custom_proto/behaviour.rs @@ -14,904 +14,1037 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use crate::custom_proto::handler::{CustomProtoHandlerProto, CustomProtoHandlerOut, CustomProtoHandlerIn}; +use crate::custom_proto::handler::{ + CustomProtoHandlerIn, CustomProtoHandlerOut, CustomProtoHandlerProto, +}; use crate::custom_proto::upgrade::{CustomMessage, RegisteredProtocol}; use fnv::FnvHashMap; use futures::prelude::*; -use libp2p::core::swarm::{ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction, PollParameters}; +use libp2p::core::swarm::{ + ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction, PollParameters, +}; use libp2p::core::{Multiaddr, PeerId}; use log::{debug, error, trace, warn}; use smallvec::SmallVec; -use std::{collections::hash_map::Entry, cmp, error, io, marker::PhantomData, mem, time::Duration, time::Instant}; +use std::{ + cmp, collections::hash_map::Entry, error, io, marker::PhantomData, mem, time::Duration, + time::Instant, +}; use tokio_io::{AsyncRead, AsyncWrite}; /// Network behaviour that handles opening substreams for custom protocols with other nodes. pub struct CustomProto { - /// List of protocols to open with peers. Never modified. - protocol: RegisteredProtocol, + /// List of protocols to open with peers. Never modified. + protocol: RegisteredProtocol, - /// Receiver for instructions about who to connect to or disconnect from. - peerset: substrate_peerset::PeersetMut, + /// Receiver for instructions about who to connect to or disconnect from. + peerset: substrate_peerset::PeersetMut, - /// List of peers in our state. - peers: FnvHashMap, + /// List of peers in our state. + peers: FnvHashMap, - /// List of incoming messages we have sent to the peer set manager and that are waiting for an - /// answer. - incoming: SmallVec<[IncomingPeer; 6]>, + /// List of incoming messages we have sent to the peer set manager and that are waiting for an + /// answer. + incoming: SmallVec<[IncomingPeer; 6]>, - /// We generate indices to identify incoming connections. This is the next value for the index - /// to use when a connection is incoming. - next_incoming_index: substrate_peerset::IncomingIndex, + /// We generate indices to identify incoming connections. This is the next value for the index + /// to use when a connection is incoming. + next_incoming_index: substrate_peerset::IncomingIndex, - /// Events to produce from `poll()`. - events: SmallVec<[NetworkBehaviourAction, CustomProtoOut>; 4]>, + /// Events to produce from `poll()`. + events: SmallVec< + [NetworkBehaviourAction, CustomProtoOut>; 4], + >, - /// Marker to pin the generics. - marker: PhantomData, + /// Marker to pin the generics. + marker: PhantomData, } /// State of a peer we're connected to. #[derive(Debug)] enum PeerState { - /// State is poisoned. This is a temporary state for a peer and we should always switch back - /// to it later. If it is found in the wild, that means there was either a panic or a bug in - /// the state machine code. - Poisoned, - - /// The peer misbehaved. If the PSM wants us to connect to this node, we will add an artificial - /// delay to the connection. - Banned { - /// Until when the node is banned. - until: Instant, - }, - - /// The peerset requested that we connect to this peer. We are not connected to this node. - PendingRequest { - /// When to actually start dialing. - timer: tokio_timer::Delay, - }, - - /// The peerset requested that we connect to this peer. We are currently dialing this peer. - Requested, - - /// We are connected to this peer but the peerset refused it. This peer can still perform - /// Kademlia queries and such, but should get disconnected in a few seconds. - Disabled { - /// How we are connected to this peer. - connected_point: ConnectedPoint, - /// If true, we still have a custom protocol open with it. It will likely get closed in - /// a short amount of time, but we need to keep the information in order to not have a - /// state mismatch. - open: bool, - /// If `Some`, the node is banned until the given `Instant`. - banned_until: Option, - }, - - /// We are connected to this peer but we are not opening any Substrate substream. The handler - /// will be enabled when `timer` fires. This peer can still perform Kademlia queries and such, - /// but should get disconnected in a few seconds. - DisabledPendingEnable { - /// How we are connected to this peer. - connected_point: ConnectedPoint, - /// If true, we still have a custom protocol open with it. It will likely get closed in - /// a short amount of time, but we need to keep the information in order to not have a - /// state mismatch. - open: bool, - /// When to enable this remote. - timer: tokio_timer::Delay, - }, - - /// We are connected to this peer and the peerset has accepted it. The handler is in the - /// enabled state. - Enabled { - /// How we are connected to this peer. - connected_point: ConnectedPoint, - /// If true, we have a custom protocol open with this peer. - open: bool, - }, - - /// We are connected to this peer, and we sent an incoming message to the peerset. The handler - /// is in initialization mode. We are waiting for the Accept or Reject from the peerset. There - /// is a corresponding entry in `incoming`. - Incoming { - /// How we are connected to this peer. - connected_point: ConnectedPoint, - }, + /// State is poisoned. This is a temporary state for a peer and we should always switch back + /// to it later. If it is found in the wild, that means there was either a panic or a bug in + /// the state machine code. + Poisoned, + + /// The peer misbehaved. If the PSM wants us to connect to this node, we will add an artificial + /// delay to the connection. + Banned { + /// Until when the node is banned. + until: Instant, + }, + + /// The peerset requested that we connect to this peer. We are not connected to this node. + PendingRequest { + /// When to actually start dialing. + timer: tokio_timer::Delay, + }, + + /// The peerset requested that we connect to this peer. We are currently dialing this peer. + Requested, + + /// We are connected to this peer but the peerset refused it. This peer can still perform + /// Kademlia queries and such, but should get disconnected in a few seconds. + Disabled { + /// How we are connected to this peer. + connected_point: ConnectedPoint, + /// If true, we still have a custom protocol open with it. It will likely get closed in + /// a short amount of time, but we need to keep the information in order to not have a + /// state mismatch. + open: bool, + /// If `Some`, the node is banned until the given `Instant`. + banned_until: Option, + }, + + /// We are connected to this peer but we are not opening any Substrate substream. The handler + /// will be enabled when `timer` fires. This peer can still perform Kademlia queries and such, + /// but should get disconnected in a few seconds. + DisabledPendingEnable { + /// How we are connected to this peer. + connected_point: ConnectedPoint, + /// If true, we still have a custom protocol open with it. It will likely get closed in + /// a short amount of time, but we need to keep the information in order to not have a + /// state mismatch. + open: bool, + /// When to enable this remote. + timer: tokio_timer::Delay, + }, + + /// We are connected to this peer and the peerset has accepted it. The handler is in the + /// enabled state. + Enabled { + /// How we are connected to this peer. + connected_point: ConnectedPoint, + /// If true, we have a custom protocol open with this peer. + open: bool, + }, + + /// We are connected to this peer, and we sent an incoming message to the peerset. The handler + /// is in initialization mode. We are waiting for the Accept or Reject from the peerset. There + /// is a corresponding entry in `incoming`. + Incoming { + /// How we are connected to this peer. + connected_point: ConnectedPoint, + }, } /// State of an "incoming" message sent to the peer set manager. #[derive(Debug)] struct IncomingPeer { - /// Id of the node that is concerned. - peer_id: PeerId, - /// If true, this "incoming" still corresponds to an actual connection. If false, then the - /// connection corresponding to it has been closed or replaced already. - alive: bool, - /// Id that the we sent to the peerset. - incoming_id: substrate_peerset::IncomingIndex, + /// Id of the node that is concerned. + peer_id: PeerId, + /// If true, this "incoming" still corresponds to an actual connection. If false, then the + /// connection corresponding to it has been closed or replaced already. + alive: bool, + /// Id that the we sent to the peerset. + incoming_id: substrate_peerset::IncomingIndex, } /// Event that can be emitted by the `CustomProto`. #[derive(Debug)] pub enum CustomProtoOut { - /// Opened a custom protocol with the remote. - CustomProtocolOpen { - /// Version of the protocol that has been opened. - version: u8, - /// Id of the node we have opened a connection with. - peer_id: PeerId, - /// Endpoint used for this custom protocol. - endpoint: ConnectedPoint, - }, - - /// Closed a custom protocol with the remote. - CustomProtocolClosed { - /// Id of the peer we were connected to. - peer_id: PeerId, - /// Reason why the substream closed. If `Ok`, then it's a graceful exit (EOF). - result: io::Result<()>, - }, - - /// Receives a message on a custom protocol substream. - CustomMessage { - /// Id of the peer the message came from. - peer_id: PeerId, - /// Message that has been received. - message: TMessage, - }, - - /// The substream used by the protocol is pretty large. We should print avoid sending more - /// messages on it if possible. - Clogged { - /// Id of the peer which is clogged. - peer_id: PeerId, - /// Copy of the messages that are within the buffer, for further diagnostic. - messages: Vec, - }, + /// Opened a custom protocol with the remote. + CustomProtocolOpen { + /// Version of the protocol that has been opened. + version: u8, + /// Id of the node we have opened a connection with. + peer_id: PeerId, + /// Endpoint used for this custom protocol. + endpoint: ConnectedPoint, + }, + + /// Closed a custom protocol with the remote. + CustomProtocolClosed { + /// Id of the peer we were connected to. + peer_id: PeerId, + /// Reason why the substream closed. If `Ok`, then it's a graceful exit (EOF). + result: io::Result<()>, + }, + + /// Receives a message on a custom protocol substream. + CustomMessage { + /// Id of the peer the message came from. + peer_id: PeerId, + /// Message that has been received. + message: TMessage, + }, + + /// The substream used by the protocol is pretty large. We should print avoid sending more + /// messages on it if possible. + Clogged { + /// Id of the peer which is clogged. + peer_id: PeerId, + /// Copy of the messages that are within the buffer, for further diagnostic. + messages: Vec, + }, } impl CustomProto { - /// Creates a `CustomProtos`. - pub fn new( - protocol: RegisteredProtocol, - peerset: substrate_peerset::PeersetMut, - ) -> Self { - CustomProto { - protocol, - peerset, - peers: FnvHashMap::default(), - incoming: SmallVec::new(), - next_incoming_index: substrate_peerset::IncomingIndex(0), - events: SmallVec::new(), - marker: PhantomData, - } - } - - /// Disconnects the given peer if we are connected to it. - pub fn disconnect_peer(&mut self, peer_id: &PeerId) { - debug!(target: "sub-libp2p", "Disconnecting {:?} by request from the external API", peer_id); - self.disconnect_peer_inner(peer_id, None); - } - - /// Inner implementation of `disconnect_peer`. If `ban` is `Some`, we ban the node for the - /// specific duration. - fn disconnect_peer_inner(&mut self, peer_id: &PeerId, ban: Option) { - let mut entry = if let Entry::Occupied(entry) = self.peers.entry(peer_id.clone()) { - entry - } else { - return - }; - - match mem::replace(entry.get_mut(), PeerState::Poisoned) { - // We're not connected anyway. - st @ PeerState::Disabled { .. } => *entry.into_mut() = st, - st @ PeerState::Requested => *entry.into_mut() = st, - st @ PeerState::PendingRequest { .. } => *entry.into_mut() = st, - st @ PeerState::Banned { .. } => *entry.into_mut() = st, - - // DisabledPendingEnable => Disabled. - PeerState::DisabledPendingEnable { open, connected_point, timer } => { - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); - self.peerset.dropped(peer_id); - let banned_until = Some(if let Some(ban) = ban { - cmp::max(timer.deadline(), Instant::now() + ban) - } else { - timer.deadline() - }); - *entry.into_mut() = PeerState::Disabled { open, connected_point, banned_until } - }, - - // Enabled => Disabled. - PeerState::Enabled { open, connected_point } => { - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); - self.peerset.dropped(peer_id); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); - self.events.push(NetworkBehaviourAction::SendEvent { - peer_id: peer_id.clone(), - event: CustomProtoHandlerIn::Disable, - }); - let banned_until = ban.map(|dur| Instant::now() + dur); - *entry.into_mut() = PeerState::Disabled { open, connected_point, banned_until } - }, - - // Incoming => Disabled. - PeerState::Incoming { connected_point, .. } => { - let inc = if let Some(inc) = self.incoming.iter_mut() - .find(|i| i.peer_id == *entry.key() && i.alive) { - inc - } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ + /// Creates a `CustomProtos`. + pub fn new( + protocol: RegisteredProtocol, + peerset: substrate_peerset::PeersetMut, + ) -> Self { + CustomProto { + protocol, + peerset, + peers: FnvHashMap::default(), + incoming: SmallVec::new(), + next_incoming_index: substrate_peerset::IncomingIndex(0), + events: SmallVec::new(), + marker: PhantomData, + } + } + + /// Disconnects the given peer if we are connected to it. + pub fn disconnect_peer(&mut self, peer_id: &PeerId) { + debug!(target: "sub-libp2p", "Disconnecting {:?} by request from the external API", peer_id); + self.disconnect_peer_inner(peer_id, None); + } + + /// Inner implementation of `disconnect_peer`. If `ban` is `Some`, we ban the node for the + /// specific duration. + fn disconnect_peer_inner(&mut self, peer_id: &PeerId, ban: Option) { + let mut entry = if let Entry::Occupied(entry) = self.peers.entry(peer_id.clone()) { + entry + } else { + return; + }; + + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // We're not connected anyway. + st @ PeerState::Disabled { .. } => *entry.into_mut() = st, + st @ PeerState::Requested => *entry.into_mut() = st, + st @ PeerState::PendingRequest { .. } => *entry.into_mut() = st, + st @ PeerState::Banned { .. } => *entry.into_mut() = st, + + // DisabledPendingEnable => Disabled. + PeerState::DisabledPendingEnable { + open, + connected_point, + timer, + } => { + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); + self.peerset.dropped(peer_id); + let banned_until = Some(if let Some(ban) = ban { + cmp::max(timer.deadline(), Instant::now() + ban) + } else { + timer.deadline() + }); + *entry.into_mut() = PeerState::Disabled { + open, + connected_point, + banned_until, + } + } + + // Enabled => Disabled. + PeerState::Enabled { + open, + connected_point, + } => { + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); + self.peerset.dropped(peer_id); + debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); + self.events.push(NetworkBehaviourAction::SendEvent { + peer_id: peer_id.clone(), + event: CustomProtoHandlerIn::Disable, + }); + let banned_until = ban.map(|dur| Instant::now() + dur); + *entry.into_mut() = PeerState::Disabled { + open, + connected_point, + banned_until, + } + } + + // Incoming => Disabled. + PeerState::Incoming { + connected_point, .. + } => { + let inc = if let Some(inc) = self + .incoming + .iter_mut() + .find(|i| i.peer_id == *entry.key() && i.alive) + { + inc + } else { + error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ incoming for incoming peer"); - return - }; - - inc.alive = false; - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); - self.events.push(NetworkBehaviourAction::SendEvent { - peer_id: peer_id.clone(), - event: CustomProtoHandlerIn::Disable, - }); - let banned_until = ban.map(|dur| Instant::now() + dur); - *entry.into_mut() = PeerState::Disabled { open: false, connected_point, banned_until } - }, - - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id), - } - } - - /// Returns true if we try to open protocols with the given peer. - pub fn is_enabled(&self, peer_id: &PeerId) -> bool { - match self.peers.get(peer_id) { - None => false, - Some(PeerState::Disabled { .. }) => false, - Some(PeerState::DisabledPendingEnable { .. }) => false, - Some(PeerState::Enabled { .. }) => true, - Some(PeerState::Incoming { .. }) => false, - Some(PeerState::Requested) => false, - Some(PeerState::PendingRequest { .. }) => false, - Some(PeerState::Banned { .. }) => false, - Some(PeerState::Poisoned) => false, - } - } - - /// Returns true if we have opened a protocol with the given peer. - pub fn is_open(&self, peer_id: &PeerId) -> bool { - match self.peers.get(peer_id) { - None => false, - Some(PeerState::Disabled { open, .. }) => *open, - Some(PeerState::DisabledPendingEnable { open, .. }) => *open, - Some(PeerState::Enabled { open, .. }) => *open, - Some(PeerState::Incoming { .. }) => false, - Some(PeerState::Requested) => false, - Some(PeerState::PendingRequest { .. }) => false, - Some(PeerState::Banned { .. }) => false, - Some(PeerState::Poisoned) => false, - } - } - - /// Sends a message to a peer. - /// - /// Has no effect if the custom protocol is not open with the given peer. - /// - /// Also note that even we have a valid open substream, it may in fact be already closed - /// without us knowing, in which case the packet will not be received. - pub fn send_packet(&mut self, target: &PeerId, message: TMessage) { - if !self.is_open(target) { - return; - } - - trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); - self.events.push(NetworkBehaviourAction::SendEvent { - peer_id: target.clone(), - event: CustomProtoHandlerIn::SendCustomMessage { - message, - } - }); - } - - /// Indicates to the peerset that we have discovered new addresses for a given node. - pub fn add_discovered_node(&mut self, peer_id: &PeerId) { - debug!(target: "sub-libp2p", "PSM <= Discovered({:?})", peer_id); - self.peerset.discovered(peer_id.clone()) - } - - /// Returns the state of the peerset manager, for debugging purposes. - pub fn peerset_debug_info(&self) -> serde_json::Value { - self.peerset.debug_info() - } - - /// Function that is called when the peerset wants us to connect to a node. - fn peerset_report_connect(&mut self, peer_id: PeerId) { - let mut occ_entry = match self.peers.entry(peer_id) { - Entry::Occupied(entry) => entry, - Entry::Vacant(entry) => { - // If there's no entry in `self.peers`, start dialing. - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Starting to connect", entry.key()); - debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", entry.key()); - self.events.push(NetworkBehaviourAction::DialPeer { peer_id: entry.key().clone() }); - entry.insert(PeerState::Requested); - return; - } - }; - - match mem::replace(occ_entry.get_mut(), PeerState::Poisoned) { - PeerState::Banned { ref until } if *until > Instant::now() => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Will start to connect at \ + return; + }; + + inc.alive = false; + debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); + self.events.push(NetworkBehaviourAction::SendEvent { + peer_id: peer_id.clone(), + event: CustomProtoHandlerIn::Disable, + }); + let banned_until = ban.map(|dur| Instant::now() + dur); + *entry.into_mut() = PeerState::Disabled { + open: false, + connected_point, + banned_until, + } + } + + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id) + } + } + } + + /// Returns true if we try to open protocols with the given peer. + pub fn is_enabled(&self, peer_id: &PeerId) -> bool { + match self.peers.get(peer_id) { + None => false, + Some(PeerState::Disabled { .. }) => false, + Some(PeerState::DisabledPendingEnable { .. }) => false, + Some(PeerState::Enabled { .. }) => true, + Some(PeerState::Incoming { .. }) => false, + Some(PeerState::Requested) => false, + Some(PeerState::PendingRequest { .. }) => false, + Some(PeerState::Banned { .. }) => false, + Some(PeerState::Poisoned) => false, + } + } + + /// Returns true if we have opened a protocol with the given peer. + pub fn is_open(&self, peer_id: &PeerId) -> bool { + match self.peers.get(peer_id) { + None => false, + Some(PeerState::Disabled { open, .. }) => *open, + Some(PeerState::DisabledPendingEnable { open, .. }) => *open, + Some(PeerState::Enabled { open, .. }) => *open, + Some(PeerState::Incoming { .. }) => false, + Some(PeerState::Requested) => false, + Some(PeerState::PendingRequest { .. }) => false, + Some(PeerState::Banned { .. }) => false, + Some(PeerState::Poisoned) => false, + } + } + + /// Sends a message to a peer. + /// + /// Has no effect if the custom protocol is not open with the given peer. + /// + /// Also note that even we have a valid open substream, it may in fact be already closed + /// without us knowing, in which case the packet will not be received. + pub fn send_packet(&mut self, target: &PeerId, message: TMessage) { + if !self.is_open(target) { + return; + } + + trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); + self.events.push(NetworkBehaviourAction::SendEvent { + peer_id: target.clone(), + event: CustomProtoHandlerIn::SendCustomMessage { message }, + }); + } + + /// Indicates to the peerset that we have discovered new addresses for a given node. + pub fn add_discovered_node(&mut self, peer_id: &PeerId) { + debug!(target: "sub-libp2p", "PSM <= Discovered({:?})", peer_id); + self.peerset.discovered(peer_id.clone()) + } + + /// Returns the state of the peerset manager, for debugging purposes. + pub fn peerset_debug_info(&self) -> serde_json::Value { + self.peerset.debug_info() + } + + /// Function that is called when the peerset wants us to connect to a node. + fn peerset_report_connect(&mut self, peer_id: PeerId) { + let mut occ_entry = match self.peers.entry(peer_id) { + Entry::Occupied(entry) => entry, + Entry::Vacant(entry) => { + // If there's no entry in `self.peers`, start dialing. + debug!(target: "sub-libp2p", "PSM => Connect({:?}): Starting to connect", entry.key()); + debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", entry.key()); + self.events.push(NetworkBehaviourAction::DialPeer { + peer_id: entry.key().clone(), + }); + entry.insert(PeerState::Requested); + return; + } + }; + + match mem::replace(occ_entry.get_mut(), PeerState::Poisoned) { + PeerState::Banned { ref until } if *until > Instant::now() => { + debug!(target: "sub-libp2p", "PSM => Connect({:?}): Will start to connect at \ until {:?}", occ_entry.key(), until); - *occ_entry.into_mut() = PeerState::PendingRequest { - timer: tokio_timer::Delay::new(until.clone()), - }; - }, - - PeerState::Banned { .. } => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Starting to connect", occ_entry.key()); - debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", occ_entry.key()); - self.events.push(NetworkBehaviourAction::DialPeer { peer_id: occ_entry.key().clone() }); - *occ_entry.into_mut() = PeerState::Requested; - }, - - PeerState::Disabled { open, ref connected_point, banned_until: Some(ref banned) } - if *banned > Instant::now() => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Has idle connection through \ + *occ_entry.into_mut() = PeerState::PendingRequest { + timer: tokio_timer::Delay::new(until.clone()), + }; + } + + PeerState::Banned { .. } => { + debug!(target: "sub-libp2p", "PSM => Connect({:?}): Starting to connect", occ_entry.key()); + debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", occ_entry.key()); + self.events.push(NetworkBehaviourAction::DialPeer { + peer_id: occ_entry.key().clone(), + }); + *occ_entry.into_mut() = PeerState::Requested; + } + + PeerState::Disabled { + open, + ref connected_point, + banned_until: Some(ref banned), + } if *banned > Instant::now() => { + debug!(target: "sub-libp2p", "PSM => Connect({:?}): Has idle connection through \ {:?} but node is banned until {:?}", occ_entry.key(), connected_point, banned); - *occ_entry.into_mut() = PeerState::DisabledPendingEnable { - connected_point: connected_point.clone(), - open, - timer: tokio_timer::Delay::new(banned.clone()), - }; - }, - - PeerState::Disabled { open, connected_point, banned_until: _ } => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling previously-idle \ + *occ_entry.into_mut() = PeerState::DisabledPendingEnable { + connected_point: connected_point.clone(), + open, + timer: tokio_timer::Delay::new(banned.clone()), + }; + } + + PeerState::Disabled { + open, + connected_point, + banned_until: _, + } => { + debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling previously-idle \ connection through {:?}", occ_entry.key(), connected_point); - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); - self.events.push(NetworkBehaviourAction::SendEvent { - peer_id: occ_entry.key().clone(), - event: CustomProtoHandlerIn::Enable(connected_point.clone().into()), - }); - *occ_entry.into_mut() = PeerState::Enabled { connected_point, open }; - }, - - PeerState::Incoming { connected_point, .. } => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling incoming \ + debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); + self.events.push(NetworkBehaviourAction::SendEvent { + peer_id: occ_entry.key().clone(), + event: CustomProtoHandlerIn::Enable(connected_point.clone().into()), + }); + *occ_entry.into_mut() = PeerState::Enabled { + connected_point, + open, + }; + } + + PeerState::Incoming { + connected_point, .. + } => { + debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling incoming \ connection through {:?}", occ_entry.key(), connected_point); - if let Some(inc) = self.incoming.iter_mut() - .find(|i| i.peer_id == *occ_entry.key() && i.alive) { - inc.alive = false; - } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ + if let Some(inc) = self + .incoming + .iter_mut() + .find(|i| i.peer_id == *occ_entry.key() && i.alive) + { + inc.alive = false; + } else { + error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ incoming for incoming peer") - } - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); - self.events.push(NetworkBehaviourAction::SendEvent { - peer_id: occ_entry.key().clone(), - event: CustomProtoHandlerIn::Enable(connected_point.clone().into()), - }); - *occ_entry.into_mut() = PeerState::Enabled { connected_point, open: false }; - }, - - st @ PeerState::Enabled { .. } => { - warn!(target: "sub-libp2p", "PSM => Connect({:?}): Already connected to this \ + } + debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); + self.events.push(NetworkBehaviourAction::SendEvent { + peer_id: occ_entry.key().clone(), + event: CustomProtoHandlerIn::Enable(connected_point.clone().into()), + }); + *occ_entry.into_mut() = PeerState::Enabled { + connected_point, + open: false, + }; + } + + st @ PeerState::Enabled { .. } => { + warn!(target: "sub-libp2p", "PSM => Connect({:?}): Already connected to this \ peer", occ_entry.key()); - *occ_entry.into_mut() = st; - }, - st @ PeerState::DisabledPendingEnable { .. } => { - warn!(target: "sub-libp2p", "PSM => Connect({:?}): Already have an idle \ + *occ_entry.into_mut() = st; + } + st @ PeerState::DisabledPendingEnable { .. } => { + warn!(target: "sub-libp2p", "PSM => Connect({:?}): Already have an idle \ connection to this peer and waiting to enable it", occ_entry.key()); - *occ_entry.into_mut() = st; - }, - st @ PeerState::Requested { .. } | st @ PeerState::PendingRequest { .. } => { - warn!(target: "sub-libp2p", "PSM => Connect({:?}): Received a previous \ + *occ_entry.into_mut() = st; + } + st @ PeerState::Requested { .. } | st @ PeerState::PendingRequest { .. } => { + warn!(target: "sub-libp2p", "PSM => Connect({:?}): Received a previous \ request for that peer", occ_entry.key()); - *occ_entry.into_mut() = st; - }, - - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", occ_entry.key()), - } - } - - /// Function that is called when the peerset wants us to disconnect from a node. - fn peerset_report_disconnect(&mut self, peer_id: PeerId) { - let mut entry = match self.peers.entry(peer_id) { - Entry::Occupied(entry) => entry, - Entry::Vacant(entry) => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Node already disabled", entry.key()); - return - } - }; - - match mem::replace(entry.get_mut(), PeerState::Poisoned) { - st @ PeerState::Disabled { .. } | st @ PeerState::Banned { .. } => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Node already disabled", entry.key()); - *entry.into_mut() = st; - }, - - PeerState::DisabledPendingEnable { open, connected_point, timer } => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Interrupting pending \ + *occ_entry.into_mut() = st; + } + + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", occ_entry.key()) + } + } + } + + /// Function that is called when the peerset wants us to disconnect from a node. + fn peerset_report_disconnect(&mut self, peer_id: PeerId) { + let mut entry = match self.peers.entry(peer_id) { + Entry::Occupied(entry) => entry, + Entry::Vacant(entry) => { + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Node already disabled", entry.key()); + return; + } + }; + + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + st @ PeerState::Disabled { .. } | st @ PeerState::Banned { .. } => { + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Node already disabled", entry.key()); + *entry.into_mut() = st; + } + + PeerState::DisabledPendingEnable { + open, + connected_point, + timer, + } => { + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Interrupting pending \ enable", entry.key()); - *entry.into_mut() = PeerState::Disabled { - open, - connected_point, - banned_until: Some(timer.deadline()), - }; - }, - - PeerState::Enabled { open, connected_point } => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Disabling connection", entry.key()); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", entry.key()); - self.events.push(NetworkBehaviourAction::SendEvent { - peer_id: entry.key().clone(), - event: CustomProtoHandlerIn::Disable, - }); - *entry.into_mut() = PeerState::Disabled { open, connected_point, banned_until: None } - }, - st @ PeerState::Incoming { .. } => { - error!(target: "sub-libp2p", "PSM => Drop({:?}): Was in incoming mode", + *entry.into_mut() = PeerState::Disabled { + open, + connected_point, + banned_until: Some(timer.deadline()), + }; + } + + PeerState::Enabled { + open, + connected_point, + } => { + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Disabling connection", entry.key()); + debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", entry.key()); + self.events.push(NetworkBehaviourAction::SendEvent { + peer_id: entry.key().clone(), + event: CustomProtoHandlerIn::Disable, + }); + *entry.into_mut() = PeerState::Disabled { + open, + connected_point, + banned_until: None, + } + } + st @ PeerState::Incoming { .. } => { + error!(target: "sub-libp2p", "PSM => Drop({:?}): Was in incoming mode", entry.key()); - *entry.into_mut() = st; - }, - PeerState::Requested => { - // We don't cancel dialing. Libp2p doesn't expose that on purpose, as other - // sub-systems (such as the discovery mechanism) may require dialing this node as - // well at the same time. - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Was not yet connected", entry.key()); - entry.remove(); - }, - PeerState::PendingRequest { timer } => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Was not yet connected", entry.key()); - *entry.into_mut() = PeerState::Banned { until: timer.deadline() } - }, - - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key()), - } - } - - /// Function that is called when the peerset wants us to accept an incoming node. - fn peerset_report_accept(&mut self, index: substrate_peerset::IncomingIndex) { - let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) { - self.incoming.remove(pos) - } else { - error!(target: "sub-libp2p", "PSM => Accept({:?}): Invalid index", index); - return - }; - - if !incoming.alive { - debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Obsolete incoming, + *entry.into_mut() = st; + } + PeerState::Requested => { + // We don't cancel dialing. Libp2p doesn't expose that on purpose, as other + // sub-systems (such as the discovery mechanism) may require dialing this node as + // well at the same time. + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Was not yet connected", entry.key()); + entry.remove(); + } + PeerState::PendingRequest { timer } => { + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Was not yet connected", entry.key()); + *entry.into_mut() = PeerState::Banned { + until: timer.deadline(), + } + } + + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key()) + } + } + } + + /// Function that is called when the peerset wants us to accept an incoming node. + fn peerset_report_accept(&mut self, index: substrate_peerset::IncomingIndex) { + let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) + { + self.incoming.remove(pos) + } else { + error!(target: "sub-libp2p", "PSM => Accept({:?}): Invalid index", index); + return; + }; + + if !incoming.alive { + debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Obsolete incoming, sending back dropped", index, incoming.peer_id); - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", incoming.peer_id); - self.peerset.dropped(&incoming.peer_id); - return - } - - let state = if let Some(state) = self.peers.get_mut(&incoming.peer_id) { - state - } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in peers \ + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", incoming.peer_id); + self.peerset.dropped(&incoming.peer_id); + return; + } + + let state = if let Some(state) = self.peers.get_mut(&incoming.peer_id) { + state + } else { + error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in peers \ corresponding to an alive incoming"); - return - }; + return; + }; - let connected_point = if let PeerState::Incoming { connected_point } = state { - connected_point.clone() - } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: entry in peers corresponding \ + let connected_point = if let PeerState::Incoming { connected_point } = state { + connected_point.clone() + } else { + error!(target: "sub-libp2p", "State mismatch in libp2p: entry in peers corresponding \ to an alive incoming is not in incoming state"); - return - }; + return; + }; - debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Enabling connection \ + debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Enabling connection \ through {:?}", index, incoming.peer_id, connected_point); - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", incoming.peer_id); - self.events.push(NetworkBehaviourAction::SendEvent { - peer_id: incoming.peer_id, - event: CustomProtoHandlerIn::Enable(connected_point.clone().into()), - }); - - *state = PeerState::Enabled { open: false, connected_point }; - } - - /// Function that is called when the peerset wants us to reject an incoming node. - fn peerset_report_reject(&mut self, index: substrate_peerset::IncomingIndex) { - let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) { - self.incoming.remove(pos) - } else { - error!(target: "sub-libp2p", "PSM => Reject({:?}): Invalid index", index); - return - }; - - if !incoming.alive { - error!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Obsolete incoming, \ + debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", incoming.peer_id); + self.events.push(NetworkBehaviourAction::SendEvent { + peer_id: incoming.peer_id, + event: CustomProtoHandlerIn::Enable(connected_point.clone().into()), + }); + + *state = PeerState::Enabled { + open: false, + connected_point, + }; + } + + /// Function that is called when the peerset wants us to reject an incoming node. + fn peerset_report_reject(&mut self, index: substrate_peerset::IncomingIndex) { + let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) + { + self.incoming.remove(pos) + } else { + error!(target: "sub-libp2p", "PSM => Reject({:?}): Invalid index", index); + return; + }; + + if !incoming.alive { + error!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Obsolete incoming, \ ignoring", index, incoming.peer_id); - return - } + return; + } - let state = if let Some(state) = self.peers.get_mut(&incoming.peer_id) { - state - } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in peers \ + let state = if let Some(state) = self.peers.get_mut(&incoming.peer_id) { + state + } else { + error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in peers \ corresponding to an alive incoming"); - return - }; + return; + }; - let connected_point = if let PeerState::Incoming { connected_point } = state { - connected_point.clone() - } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: entry in peers corresponding \ + let connected_point = if let PeerState::Incoming { connected_point } = state { + connected_point.clone() + } else { + error!(target: "sub-libp2p", "State mismatch in libp2p: entry in peers corresponding \ to an alive incoming is not in incoming state"); - return - }; + return; + }; - debug!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Rejecting connection through \ + debug!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Rejecting connection through \ {:?}", index, incoming.peer_id, connected_point); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", incoming.peer_id); - self.events.push(NetworkBehaviourAction::SendEvent { - peer_id: incoming.peer_id, - event: CustomProtoHandlerIn::Disable, - }); - *state = PeerState::Disabled { open: false, connected_point, banned_until: None }; - } + debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", incoming.peer_id); + self.events.push(NetworkBehaviourAction::SendEvent { + peer_id: incoming.peer_id, + event: CustomProtoHandlerIn::Disable, + }); + *state = PeerState::Disabled { + open: false, + connected_point, + banned_until: None, + }; + } } impl NetworkBehaviour for CustomProto where - TSubstream: AsyncRead + AsyncWrite, - TMessage: CustomMessage, + TSubstream: AsyncRead + AsyncWrite, + TMessage: CustomMessage, { - type ProtocolsHandler = CustomProtoHandlerProto; - type OutEvent = CustomProtoOut; - - fn new_handler(&mut self) -> Self::ProtocolsHandler { - CustomProtoHandlerProto::new(self.protocol.clone()) - } - - fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { - Vec::new() - } - - fn inject_connected(&mut self, peer_id: PeerId, connected_point: ConnectedPoint) { - match (self.peers.entry(peer_id), connected_point) { - (Entry::Occupied(mut entry), connected_point) => { - match mem::replace(entry.get_mut(), PeerState::Poisoned) { - PeerState::Requested | PeerState::PendingRequest { .. } | - PeerState::Banned { .. } => { - debug!(target: "sub-libp2p", "Libp2p => Connected({:?}): Connection \ + type ProtocolsHandler = CustomProtoHandlerProto; + type OutEvent = CustomProtoOut; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + CustomProtoHandlerProto::new(self.protocol.clone()) + } + + fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { + Vec::new() + } + + fn inject_connected(&mut self, peer_id: PeerId, connected_point: ConnectedPoint) { + match (self.peers.entry(peer_id), connected_point) { + (Entry::Occupied(mut entry), connected_point) => { + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + PeerState::Requested + | PeerState::PendingRequest { .. } + | PeerState::Banned { .. } => { + debug!(target: "sub-libp2p", "Libp2p => Connected({:?}): Connection \ requested by PSM (through {:?})", entry.key(), connected_point); - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", entry.key()); - self.events.push(NetworkBehaviourAction::SendEvent { - peer_id: entry.key().clone(), - event: CustomProtoHandlerIn::Enable(connected_point.clone().into()), - }); - *entry.into_mut() = PeerState::Enabled { open: false, connected_point }; - } - st @ _ => { - // This is a serious bug either in this state machine or in libp2p. - error!(target: "sub-libp2p", "Received inject_connected for \ + debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", entry.key()); + self.events.push(NetworkBehaviourAction::SendEvent { + peer_id: entry.key().clone(), + event: CustomProtoHandlerIn::Enable(connected_point.clone().into()), + }); + *entry.into_mut() = PeerState::Enabled { + open: false, + connected_point, + }; + } + st @ _ => { + // This is a serious bug either in this state machine or in libp2p. + error!(target: "sub-libp2p", "Received inject_connected for \ already-connected node; state is {:?}", st); - *entry.into_mut() = st; - return - } - } - } - - (Entry::Vacant(entry), connected_point @ ConnectedPoint::Listener { .. }) => { - let incoming_id = self.next_incoming_index.clone(); - self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { - Some(v) => v, - None => { - error!(target: "sub-libp2p", "Overflow in next_incoming_index"); - return - } - }; - debug!(target: "sub-libp2p", "Libp2p => Connected({:?}): Incoming connection", + *entry.into_mut() = st; + return; + } + } + } + + (Entry::Vacant(entry), connected_point @ ConnectedPoint::Listener { .. }) => { + let incoming_id = self.next_incoming_index.clone(); + self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { + Some(v) => v, + None => { + error!(target: "sub-libp2p", "Overflow in next_incoming_index"); + return; + } + }; + debug!(target: "sub-libp2p", "Libp2p => Connected({:?}): Incoming connection", entry.key()); - debug!(target: "sub-libp2p", "PSM <= Incoming({:?}, {:?}): Through {:?}", + debug!(target: "sub-libp2p", "PSM <= Incoming({:?}, {:?}): Through {:?}", incoming_id, entry.key(), connected_point); - self.peerset.incoming(entry.key().clone(), incoming_id); - self.incoming.push(IncomingPeer { - peer_id: entry.key().clone(), - alive: true, - incoming_id, - }); - entry.insert(PeerState::Incoming { connected_point }); - } - - (Entry::Vacant(entry), connected_point) => { - debug!(target: "sub-libp2p", "Libp2p => Connected({:?}): Requested by something \ + self.peerset.incoming(entry.key().clone(), incoming_id); + self.incoming.push(IncomingPeer { + peer_id: entry.key().clone(), + alive: true, + incoming_id, + }); + entry.insert(PeerState::Incoming { connected_point }); + } + + (Entry::Vacant(entry), connected_point) => { + debug!(target: "sub-libp2p", "Libp2p => Connected({:?}): Requested by something \ else than PSM, disabling", entry.key()); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", entry.key()); - self.events.push(NetworkBehaviourAction::SendEvent { - peer_id: entry.key().clone(), - event: CustomProtoHandlerIn::Disable, - }); - entry.insert(PeerState::Disabled { open: false, connected_point, banned_until: None }); - } - } - } - - fn inject_disconnected(&mut self, peer_id: &PeerId, endpoint: ConnectedPoint) { - match self.peers.remove(peer_id) { - None | Some(PeerState::Requested) | Some(PeerState::PendingRequest { .. }) | - Some(PeerState::Banned { .. }) => - // This is a serious bug either in this state machine or in libp2p. - error!(target: "sub-libp2p", "Received inject_disconnected for non-connected \ - node {:?}", peer_id), - - Some(PeerState::Disabled { open, banned_until, .. }) => { - debug!(target: "sub-libp2p", "Libp2p => Disconnected({:?}): Was disabled \ + debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", entry.key()); + self.events.push(NetworkBehaviourAction::SendEvent { + peer_id: entry.key().clone(), + event: CustomProtoHandlerIn::Disable, + }); + entry.insert(PeerState::Disabled { + open: false, + connected_point, + banned_until: None, + }); + } + } + } + + fn inject_disconnected(&mut self, peer_id: &PeerId, endpoint: ConnectedPoint) { + match self.peers.remove(peer_id) { + None + | Some(PeerState::Requested) + | Some(PeerState::PendingRequest { .. }) + | Some(PeerState::Banned { .. }) => + // This is a serious bug either in this state machine or in libp2p. + { + error!(target: "sub-libp2p", "Received inject_disconnected for non-connected \ + node {:?}", peer_id) + } + + Some(PeerState::Disabled { + open, banned_until, .. + }) => { + debug!(target: "sub-libp2p", "Libp2p => Disconnected({:?}): Was disabled \ (through {:?})", peer_id, endpoint); - if let Some(until) = banned_until { - self.peers.insert(peer_id.clone(), PeerState::Banned { until }); - } - if open { - debug!(target: "sub-libp2p", "External API <= Closed({:?})", peer_id); - let event = CustomProtoOut::CustomProtocolClosed { - peer_id: peer_id.clone(), - result: Ok(()), - }; - - self.events.push(NetworkBehaviourAction::GenerateEvent(event)); - } - } - - Some(PeerState::DisabledPendingEnable { open, timer, .. }) => { - debug!(target: "sub-libp2p", "Libp2p => Disconnected({:?}): Was disabled \ + if let Some(until) = banned_until { + self.peers + .insert(peer_id.clone(), PeerState::Banned { until }); + } + if open { + debug!(target: "sub-libp2p", "External API <= Closed({:?})", peer_id); + let event = CustomProtoOut::CustomProtocolClosed { + peer_id: peer_id.clone(), + result: Ok(()), + }; + + self.events + .push(NetworkBehaviourAction::GenerateEvent(event)); + } + } + + Some(PeerState::DisabledPendingEnable { open, timer, .. }) => { + debug!(target: "sub-libp2p", "Libp2p => Disconnected({:?}): Was disabled \ (through {:?}) but pending enable", peer_id, endpoint); - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); - self.peerset.dropped(peer_id); - self.peers.insert(peer_id.clone(), PeerState::Banned { until: timer.deadline() }); - if open { - debug!(target: "sub-libp2p", "External API <= Closed({:?})", peer_id); - let event = CustomProtoOut::CustomProtocolClosed { - peer_id: peer_id.clone(), - result: Ok(()), - }; - - self.events.push(NetworkBehaviourAction::GenerateEvent(event)); - } - } - - Some(PeerState::Enabled { open, .. }) => { - debug!(target: "sub-libp2p", "Libp2p => Disconnected({:?}): Was enabled \ + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); + self.peerset.dropped(peer_id); + self.peers.insert( + peer_id.clone(), + PeerState::Banned { + until: timer.deadline(), + }, + ); + if open { + debug!(target: "sub-libp2p", "External API <= Closed({:?})", peer_id); + let event = CustomProtoOut::CustomProtocolClosed { + peer_id: peer_id.clone(), + result: Ok(()), + }; + + self.events + .push(NetworkBehaviourAction::GenerateEvent(event)); + } + } + + Some(PeerState::Enabled { open, .. }) => { + debug!(target: "sub-libp2p", "Libp2p => Disconnected({:?}): Was enabled \ (through {:?})", peer_id, endpoint); - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); - self.peerset.dropped(peer_id); - - if open { - debug!(target: "sub-libp2p", "External API <= Closed({:?})", peer_id); - let event = CustomProtoOut::CustomProtocolClosed { - peer_id: peer_id.clone(), - result: Ok(()), - }; - - self.events.push(NetworkBehaviourAction::GenerateEvent(event)); - } - } - - // In the incoming state, we don't report "Dropped". Instead we will just ignore the - // corresponding Accept/Reject. - Some(PeerState::Incoming { .. }) => { - if let Some(state) = self.incoming.iter_mut().find(|i| i.peer_id == *peer_id) { - debug!(target: "sub-libp2p", "Libp2p => Disconnected({:?}): Was in incoming \ + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); + self.peerset.dropped(peer_id); + + if open { + debug!(target: "sub-libp2p", "External API <= Closed({:?})", peer_id); + let event = CustomProtoOut::CustomProtocolClosed { + peer_id: peer_id.clone(), + result: Ok(()), + }; + + self.events + .push(NetworkBehaviourAction::GenerateEvent(event)); + } + } + + // In the incoming state, we don't report "Dropped". Instead we will just ignore the + // corresponding Accept/Reject. + Some(PeerState::Incoming { .. }) => { + if let Some(state) = self.incoming.iter_mut().find(|i| i.peer_id == *peer_id) { + debug!(target: "sub-libp2p", "Libp2p => Disconnected({:?}): Was in incoming \ mode (id {:?}, through {:?})", peer_id, state.incoming_id, endpoint); - state.alive = false; - } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in incoming \ + state.alive = false; + } else { + error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in incoming \ corresponding to an incoming state in peers") - } - } - - Some(PeerState::Poisoned) => - error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id), - } - } - - fn inject_addr_reach_failure(&mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, error: &dyn error::Error) { - trace!(target: "sub-libp2p", "Libp2p => Reach failure for {:?} through {:?}: {:?}", peer_id, addr, error); - } - - fn inject_dial_failure(&mut self, peer_id: &PeerId) { - if let Entry::Occupied(mut entry) = self.peers.entry(peer_id.clone()) { - match mem::replace(entry.get_mut(), PeerState::Poisoned) { - // The node is not in our list. - st @ PeerState::Banned { .. } => { - trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); - *entry.into_mut() = st; - }, - - // "Basic" situation: we failed to reach a node that the peerset requested. - PeerState::Requested | PeerState::PendingRequest { .. } => { - debug!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); - *entry.into_mut() = PeerState::Banned { - until: Instant::now() + Duration::from_secs(5) - }; - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); - self.peerset.dropped(peer_id) - }, - - // We can still get dial failures even if we are already connected to the node, - // as an extra diagnostic for an earlier attempt. - st @ PeerState::Disabled { .. } | st @ PeerState::Enabled { .. } | - st @ PeerState::DisabledPendingEnable { .. } | st @ PeerState::Incoming { .. } => { - debug!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); - *entry.into_mut() = st; - }, - - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id), - } - - } else { - // The node is not in our list. - trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); - } - } - - fn inject_node_event( - &mut self, - source: PeerId, - event: CustomProtoHandlerOut, - ) { - match event { - CustomProtoHandlerOut::CustomProtocolClosed { result } => { - debug!(target: "sub-libp2p", "Handler({:?}) => Closed({:?})", source, result); - match self.peers.get_mut(&source) { - Some(PeerState::Enabled { ref mut open, .. }) if *open => - *open = false, - Some(PeerState::Disabled { ref mut open, .. }) if *open => - *open = false, - Some(PeerState::DisabledPendingEnable { ref mut open, .. }) if *open => - *open = false, - _ => error!(target: "sub-libp2p", "State mismatch in the custom protos handler"), - } - - debug!(target: "sub-libp2p", "External API <= Closed({:?})", source); - let event = CustomProtoOut::CustomProtocolClosed { - result, - peer_id: source, - }; - - self.events.push(NetworkBehaviourAction::GenerateEvent(event)); - } - - CustomProtoHandlerOut::CustomProtocolOpen { version } => { - debug!(target: "sub-libp2p", "Handler({:?}) => Open: version {:?}", source, version); - let endpoint = match self.peers.get_mut(&source) { - Some(PeerState::Enabled { ref mut open, ref connected_point }) | - Some(PeerState::DisabledPendingEnable { ref mut open, ref connected_point, .. }) | - Some(PeerState::Disabled { ref mut open, ref connected_point, .. }) if !*open => { - *open = true; - connected_point.clone() - } - _ => { - error!(target: "sub-libp2p", "State mismatch in the custom protos handler"); - return - } - }; - - debug!(target: "sub-libp2p", "External API <= Open({:?})", source); - let event = CustomProtoOut::CustomProtocolOpen { - version, - peer_id: source, - endpoint, - }; - - self.events.push(NetworkBehaviourAction::GenerateEvent(event)); - } - - CustomProtoHandlerOut::CustomMessage { message } => { - debug_assert!(self.is_open(&source)); - trace!(target: "sub-libp2p", "Handler({:?}) => Message", source); - trace!(target: "sub-libp2p", "External API <= Message({:?})", source); - let event = CustomProtoOut::CustomMessage { - peer_id: source, - message, - }; - - self.events.push(NetworkBehaviourAction::GenerateEvent(event)); - } - - CustomProtoHandlerOut::Clogged { messages } => { - debug_assert!(self.is_open(&source)); - trace!(target: "sub-libp2p", "Handler({:?}) => Clogged", source); - trace!(target: "sub-libp2p", "External API <= Clogged({:?})", source); - warn!(target: "sub-libp2p", "Queue of packets to send to {:?} is \ + } + } + + Some(PeerState::Poisoned) => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id) + } + } + } + + fn inject_addr_reach_failure( + &mut self, + peer_id: Option<&PeerId>, + addr: &Multiaddr, + error: &dyn error::Error, + ) { + trace!(target: "sub-libp2p", "Libp2p => Reach failure for {:?} through {:?}: {:?}", peer_id, addr, error); + } + + fn inject_dial_failure(&mut self, peer_id: &PeerId) { + if let Entry::Occupied(mut entry) = self.peers.entry(peer_id.clone()) { + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // The node is not in our list. + st @ PeerState::Banned { .. } => { + trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); + *entry.into_mut() = st; + } + + // "Basic" situation: we failed to reach a node that the peerset requested. + PeerState::Requested | PeerState::PendingRequest { .. } => { + debug!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); + *entry.into_mut() = PeerState::Banned { + until: Instant::now() + Duration::from_secs(5), + }; + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); + self.peerset.dropped(peer_id) + } + + // We can still get dial failures even if we are already connected to the node, + // as an extra diagnostic for an earlier attempt. + st @ PeerState::Disabled { .. } + | st @ PeerState::Enabled { .. } + | st @ PeerState::DisabledPendingEnable { .. } + | st @ PeerState::Incoming { .. } => { + debug!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); + *entry.into_mut() = st; + } + + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id) + } + } + } else { + // The node is not in our list. + trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); + } + } + + fn inject_node_event(&mut self, source: PeerId, event: CustomProtoHandlerOut) { + match event { + CustomProtoHandlerOut::CustomProtocolClosed { result } => { + debug!(target: "sub-libp2p", "Handler({:?}) => Closed({:?})", source, result); + match self.peers.get_mut(&source) { + Some(PeerState::Enabled { ref mut open, .. }) if *open => *open = false, + Some(PeerState::Disabled { ref mut open, .. }) if *open => *open = false, + Some(PeerState::DisabledPendingEnable { ref mut open, .. }) if *open => { + *open = false + } + _ => { + error!(target: "sub-libp2p", "State mismatch in the custom protos handler") + } + } + + debug!(target: "sub-libp2p", "External API <= Closed({:?})", source); + let event = CustomProtoOut::CustomProtocolClosed { + result, + peer_id: source, + }; + + self.events + .push(NetworkBehaviourAction::GenerateEvent(event)); + } + + CustomProtoHandlerOut::CustomProtocolOpen { version } => { + debug!(target: "sub-libp2p", "Handler({:?}) => Open: version {:?}", source, version); + let endpoint = match self.peers.get_mut(&source) { + Some(PeerState::Enabled { + ref mut open, + ref connected_point, + }) + | Some(PeerState::DisabledPendingEnable { + ref mut open, + ref connected_point, + .. + }) + | Some(PeerState::Disabled { + ref mut open, + ref connected_point, + .. + }) if !*open => { + *open = true; + connected_point.clone() + } + _ => { + error!(target: "sub-libp2p", "State mismatch in the custom protos handler"); + return; + } + }; + + debug!(target: "sub-libp2p", "External API <= Open({:?})", source); + let event = CustomProtoOut::CustomProtocolOpen { + version, + peer_id: source, + endpoint, + }; + + self.events + .push(NetworkBehaviourAction::GenerateEvent(event)); + } + + CustomProtoHandlerOut::CustomMessage { message } => { + debug_assert!(self.is_open(&source)); + trace!(target: "sub-libp2p", "Handler({:?}) => Message", source); + trace!(target: "sub-libp2p", "External API <= Message({:?})", source); + let event = CustomProtoOut::CustomMessage { + peer_id: source, + message, + }; + + self.events + .push(NetworkBehaviourAction::GenerateEvent(event)); + } + + CustomProtoHandlerOut::Clogged { messages } => { + debug_assert!(self.is_open(&source)); + trace!(target: "sub-libp2p", "Handler({:?}) => Clogged", source); + trace!(target: "sub-libp2p", "External API <= Clogged({:?})", source); + warn!(target: "sub-libp2p", "Queue of packets to send to {:?} is \ pretty large", source); - self.events.push(NetworkBehaviourAction::GenerateEvent(CustomProtoOut::Clogged { - peer_id: source, - messages, - })); - } - - // Don't do anything for non-severe errors except report them. - CustomProtoHandlerOut::ProtocolError { is_severe, ref error } if !is_severe => { - debug!(target: "sub-libp2p", "Handler({:?}) => Benign protocol error: {:?}", + self.events.push(NetworkBehaviourAction::GenerateEvent( + CustomProtoOut::Clogged { + peer_id: source, + messages, + }, + )); + } + + // Don't do anything for non-severe errors except report them. + CustomProtoHandlerOut::ProtocolError { + is_severe, + ref error, + } if !is_severe => { + debug!(target: "sub-libp2p", "Handler({:?}) => Benign protocol error: {:?}", source, error) - } + } - CustomProtoHandlerOut::ProtocolError { error, .. } => { - debug!(target: "sub-libp2p", "Handler({:?}) => Severe protocol error: {:?}", + CustomProtoHandlerOut::ProtocolError { error, .. } => { + debug!(target: "sub-libp2p", "Handler({:?}) => Severe protocol error: {:?}", source, error); - self.disconnect_peer_inner(&source, Some(Duration::from_secs(5))); - } - } - } - - fn poll( - &mut self, - _params: &mut PollParameters, - ) -> Async< - NetworkBehaviourAction< - CustomProtoHandlerIn, - Self::OutEvent, - >, - > { - // Poll for instructions from the peerset. - // Note that the peerset is a *best effort* crate, and we have to use defensive programming. - loop { - match self.peerset.poll() { - Ok(Async::Ready(Some(substrate_peerset::Message::Accept(index)))) => { - self.peerset_report_accept(index); - } - Ok(Async::Ready(Some(substrate_peerset::Message::Reject(index)))) => { - self.peerset_report_reject(index); - } - Ok(Async::Ready(Some(substrate_peerset::Message::Connect(id)))) => { - self.peerset_report_connect(id); - } - Ok(Async::Ready(Some(substrate_peerset::Message::Drop(id)))) => { - self.peerset_report_disconnect(id); - } - Ok(Async::Ready(None)) => { - error!(target: "sub-libp2p", "Peerset receiver stream has returned None"); - break; - } - Ok(Async::NotReady) => break, - Err(err) => { - error!(target: "sub-libp2p", "Peerset receiver stream has errored: {:?}", err); - break - } - } - } - - for (peer_id, peer_state) in self.peers.iter_mut() { - match mem::replace(peer_state, PeerState::Poisoned) { - PeerState::PendingRequest { mut timer } => { - if let Ok(Async::NotReady) = timer.poll() { - *peer_state = PeerState::PendingRequest { timer }; - continue; - } - - debug!(target: "sub-libp2p", "Libp2p <= Dial {:?} now that ban has expired", peer_id); - self.events.push(NetworkBehaviourAction::DialPeer { peer_id: peer_id.clone() }); - *peer_state = PeerState::Requested; - } - - PeerState::DisabledPendingEnable { mut timer, connected_point, open } => { - if let Ok(Async::NotReady) = timer.poll() { - *peer_state = PeerState::DisabledPendingEnable { timer, connected_point, open }; - continue; - } - - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable now that ban has expired", peer_id); - self.events.push(NetworkBehaviourAction::SendEvent { - peer_id: peer_id.clone(), - event: CustomProtoHandlerIn::Enable(connected_point.clone().into()), - }); - *peer_state = PeerState::Enabled { connected_point, open }; - } - - st @ _ => *peer_state = st, - } - } - - if !self.events.is_empty() { - return Async::Ready(self.events.remove(0)) - } - - Async::NotReady - } + self.disconnect_peer_inner(&source, Some(Duration::from_secs(5))); + } + } + } + + fn poll( + &mut self, + _params: &mut PollParameters, + ) -> Async, Self::OutEvent>> { + // Poll for instructions from the peerset. + // Note that the peerset is a *best effort* crate, and we have to use defensive programming. + loop { + match self.peerset.poll() { + Ok(Async::Ready(Some(substrate_peerset::Message::Accept(index)))) => { + self.peerset_report_accept(index); + } + Ok(Async::Ready(Some(substrate_peerset::Message::Reject(index)))) => { + self.peerset_report_reject(index); + } + Ok(Async::Ready(Some(substrate_peerset::Message::Connect(id)))) => { + self.peerset_report_connect(id); + } + Ok(Async::Ready(Some(substrate_peerset::Message::Drop(id)))) => { + self.peerset_report_disconnect(id); + } + Ok(Async::Ready(None)) => { + error!(target: "sub-libp2p", "Peerset receiver stream has returned None"); + break; + } + Ok(Async::NotReady) => break, + Err(err) => { + error!(target: "sub-libp2p", "Peerset receiver stream has errored: {:?}", err); + break; + } + } + } + + for (peer_id, peer_state) in self.peers.iter_mut() { + match mem::replace(peer_state, PeerState::Poisoned) { + PeerState::PendingRequest { mut timer } => { + if let Ok(Async::NotReady) = timer.poll() { + *peer_state = PeerState::PendingRequest { timer }; + continue; + } + + debug!(target: "sub-libp2p", "Libp2p <= Dial {:?} now that ban has expired", peer_id); + self.events.push(NetworkBehaviourAction::DialPeer { + peer_id: peer_id.clone(), + }); + *peer_state = PeerState::Requested; + } + + PeerState::DisabledPendingEnable { + mut timer, + connected_point, + open, + } => { + if let Ok(Async::NotReady) = timer.poll() { + *peer_state = PeerState::DisabledPendingEnable { + timer, + connected_point, + open, + }; + continue; + } + + debug!(target: "sub-libp2p", "Handler({:?}) <= Enable now that ban has expired", peer_id); + self.events.push(NetworkBehaviourAction::SendEvent { + peer_id: peer_id.clone(), + event: CustomProtoHandlerIn::Enable(connected_point.clone().into()), + }); + *peer_state = PeerState::Enabled { + connected_point, + open, + }; + } + + st @ _ => *peer_state = st, + } + } + + if !self.events.is_empty() { + return Async::Ready(self.events.remove(0)); + } + + Async::NotReady + } } diff --git a/core/network-libp2p/src/custom_proto/handler.rs b/core/network-libp2p/src/custom_proto/handler.rs index 516130602e..a439846a89 100644 --- a/core/network-libp2p/src/custom_proto/handler.rs +++ b/core/network-libp2p/src/custom_proto/handler.rs @@ -18,11 +18,11 @@ use crate::custom_proto::upgrade::{CustomMessage, CustomMessageId, RegisteredPro use crate::custom_proto::upgrade::{RegisteredProtocolEvent, RegisteredProtocolSubstream}; use futures::prelude::*; use libp2p::core::{ - PeerId, Endpoint, ProtocolsHandler, ProtocolsHandlerEvent, - protocols_handler::IntoProtocolsHandler, - protocols_handler::KeepAlive, - protocols_handler::ProtocolsHandlerUpgrErr, - upgrade::{InboundUpgrade, OutboundUpgrade} + protocols_handler::IntoProtocolsHandler, + protocols_handler::KeepAlive, + protocols_handler::ProtocolsHandlerUpgrErr, + upgrade::{InboundUpgrade, OutboundUpgrade}, + Endpoint, PeerId, ProtocolsHandler, ProtocolsHandlerEvent, }; use log::{debug, error, warn}; use smallvec::{smallvec, SmallVec}; @@ -65,810 +65,872 @@ use void::Void; /// opening an outbound substream. /// pub struct CustomProtoHandlerProto { - /// Configuration for the protocol upgrade to negotiate. - protocol: RegisteredProtocol, + /// Configuration for the protocol upgrade to negotiate. + protocol: RegisteredProtocol, - /// Marker to pin the generic type. - marker: PhantomData, + /// Marker to pin the generic type. + marker: PhantomData, } impl CustomProtoHandlerProto where - TSubstream: AsyncRead + AsyncWrite, - TMessage: CustomMessage, + TSubstream: AsyncRead + AsyncWrite, + TMessage: CustomMessage, { - /// Builds a new `CustomProtoHandlerProto`. - pub fn new(protocol: RegisteredProtocol) -> Self { - CustomProtoHandlerProto { - protocol, - marker: PhantomData, - } - } + /// Builds a new `CustomProtoHandlerProto`. + pub fn new(protocol: RegisteredProtocol) -> Self { + CustomProtoHandlerProto { + protocol, + marker: PhantomData, + } + } } impl IntoProtocolsHandler for CustomProtoHandlerProto where - TSubstream: AsyncRead + AsyncWrite, - TMessage: CustomMessage, + TSubstream: AsyncRead + AsyncWrite, + TMessage: CustomMessage, { - type Handler = CustomProtoHandler; - - fn into_handler(self, remote_peer_id: &PeerId) -> Self::Handler { - CustomProtoHandler { - protocol: self.protocol, - remote_peer_id: remote_peer_id.clone(), - state: ProtocolState::Init { - substreams: SmallVec::new(), - init_deadline: Delay::new(Instant::now() + Duration::from_secs(5)) - }, - events_queue: SmallVec::new(), - warm_up_end: Instant::now() + Duration::from_secs(5), - } - } + type Handler = CustomProtoHandler; + + fn into_handler(self, remote_peer_id: &PeerId) -> Self::Handler { + CustomProtoHandler { + protocol: self.protocol, + remote_peer_id: remote_peer_id.clone(), + state: ProtocolState::Init { + substreams: SmallVec::new(), + init_deadline: Delay::new(Instant::now() + Duration::from_secs(5)), + }, + events_queue: SmallVec::new(), + warm_up_end: Instant::now() + Duration::from_secs(5), + } + } } /// The actual handler once the connection has been established. pub struct CustomProtoHandler { - /// Configuration for the protocol upgrade to negotiate. - protocol: RegisteredProtocol, - - /// State of the communications with the remote. - state: ProtocolState, - - /// Identifier of the node we're talking to. Used only for logging purposes and shouldn't have - /// any influence on the behaviour. - remote_peer_id: PeerId, - - /// Queue of events to send to the outside. - /// - /// This queue must only ever be modified to insert elements at the back, or remove the first - /// element. - events_queue: SmallVec<[ProtocolsHandlerEvent, (), CustomProtoHandlerOut>; 16]>, - - /// We have a warm-up period after creating the handler during which we don't shut down the - /// connection. - warm_up_end: Instant, + /// Configuration for the protocol upgrade to negotiate. + protocol: RegisteredProtocol, + + /// State of the communications with the remote. + state: ProtocolState, + + /// Identifier of the node we're talking to. Used only for logging purposes and shouldn't have + /// any influence on the behaviour. + remote_peer_id: PeerId, + + /// Queue of events to send to the outside. + /// + /// This queue must only ever be modified to insert elements at the back, or remove the first + /// element. + events_queue: SmallVec< + [ProtocolsHandlerEvent, (), CustomProtoHandlerOut>; + 16], + >, + + /// We have a warm-up period after creating the handler during which we don't shut down the + /// connection. + warm_up_end: Instant, } /// State of the handler. enum ProtocolState { - /// Waiting for the behaviour to tell the handler whether it is enabled or disabled. - Init { - /// List of substreams opened by the remote but that haven't been processed yet. - substreams: SmallVec<[RegisteredProtocolSubstream; 6]>, - /// Deadline after which the initialization is abnormally long. - init_deadline: Delay, - }, - - /// Handler is opening a substream in order to activate itself. - /// If we are in this state, we haven't sent any `CustomProtocolOpen` yet. - Opening { - /// Deadline after which the opening is abnormally long. - deadline: Delay, - }, - - /// Backwards-compatible mode. Contains the unique substream that is open. - /// If we are in this state, we have sent a `CustomProtocolOpen` message to the outside. - BackCompat { - /// The unique substream where bidirectional communications happen. - substream: RegisteredProtocolSubstream, - /// Contains substreams which are being shut down. - shutdown: SmallVec<[RegisteredProtocolSubstream; 4]>, - }, - - /// Normal functionning. Contains the substreams that are open. - /// If we are in this state, we have sent a `CustomProtocolOpen` message to the outside. - Normal(PerProtocolNormalState), - - /// We are disabled. Contains substreams that are being closed. - /// If we are in this state, either we have sent a `CustomProtocolClosed` message to the - /// outside or we have never sent any `CustomProtocolOpen` in the first place. - Disabled { - /// List of substreams to shut down. - shutdown: SmallVec<[RegisteredProtocolSubstream; 6]>, - - /// If true, we should reactivate the handler after all the substreams in `shutdown` have - /// been closed. - /// - /// Since we don't want to mix old and new substreams, we wait for all old substreams to - /// be closed before opening any new one. - reenable: bool, - }, - - /// We sometimes temporarily switch to this state during processing. If we are in this state - /// at the beginning of a method, that means something bad happend in the source code. - Poisoned, + /// Waiting for the behaviour to tell the handler whether it is enabled or disabled. + Init { + /// List of substreams opened by the remote but that haven't been processed yet. + substreams: SmallVec<[RegisteredProtocolSubstream; 6]>, + /// Deadline after which the initialization is abnormally long. + init_deadline: Delay, + }, + + /// Handler is opening a substream in order to activate itself. + /// If we are in this state, we haven't sent any `CustomProtocolOpen` yet. + Opening { + /// Deadline after which the opening is abnormally long. + deadline: Delay, + }, + + /// Backwards-compatible mode. Contains the unique substream that is open. + /// If we are in this state, we have sent a `CustomProtocolOpen` message to the outside. + BackCompat { + /// The unique substream where bidirectional communications happen. + substream: RegisteredProtocolSubstream, + /// Contains substreams which are being shut down. + shutdown: SmallVec<[RegisteredProtocolSubstream; 4]>, + }, + + /// Normal functionning. Contains the substreams that are open. + /// If we are in this state, we have sent a `CustomProtocolOpen` message to the outside. + Normal(PerProtocolNormalState), + + /// We are disabled. Contains substreams that are being closed. + /// If we are in this state, either we have sent a `CustomProtocolClosed` message to the + /// outside or we have never sent any `CustomProtocolOpen` in the first place. + Disabled { + /// List of substreams to shut down. + shutdown: SmallVec<[RegisteredProtocolSubstream; 6]>, + + /// If true, we should reactivate the handler after all the substreams in `shutdown` have + /// been closed. + /// + /// Since we don't want to mix old and new substreams, we wait for all old substreams to + /// be closed before opening any new one. + reenable: bool, + }, + + /// We sometimes temporarily switch to this state during processing. If we are in this state + /// at the beginning of a method, that means something bad happend in the source code. + Poisoned, } /// Normal functionning mode for a protocol. struct PerProtocolNormalState { - /// Optional substream that we opened. - outgoing_substream: Option>, + /// Optional substream that we opened. + outgoing_substream: Option>, - /// Substreams that have been opened by the remote. We are waiting for a packet from it. - incoming_substreams: SmallVec<[RegisteredProtocolSubstream; 4]>, + /// Substreams that have been opened by the remote. We are waiting for a packet from it. + incoming_substreams: SmallVec<[RegisteredProtocolSubstream; 4]>, - /// For each request that has been sent to the remote, contains the substream where we - /// expect a response. - pending_response: SmallVec<[(u64, RegisteredProtocolSubstream); 4]>, + /// For each request that has been sent to the remote, contains the substream where we + /// expect a response. + pending_response: SmallVec<[(u64, RegisteredProtocolSubstream); 4]>, - /// For each request received by the remote, contains the substream where to send back our - /// response. Once a response has been sent, the substream closes. - pending_send_back: SmallVec<[(u64, RegisteredProtocolSubstream); 4]>, + /// For each request received by the remote, contains the substream where to send back our + /// response. Once a response has been sent, the substream closes. + pending_send_back: SmallVec<[(u64, RegisteredProtocolSubstream); 4]>, - /// List of messages waiting for a substream to open in order to be sent. - pending_messages: SmallVec<[TMessage; 6]>, + /// List of messages waiting for a substream to open in order to be sent. + pending_messages: SmallVec<[TMessage; 6]>, - /// Contains substreams which are being shut down. - shutdown: SmallVec<[RegisteredProtocolSubstream; 4]>, + /// Contains substreams which are being shut down. + shutdown: SmallVec<[RegisteredProtocolSubstream; 4]>, } impl PerProtocolNormalState -where TMessage: CustomMessage, TSubstream: AsyncRead + AsyncWrite { - /// Polls for things that are new. Same API constraints as `Future::poll()`. - /// Optionally returns the event to produce. - /// You must pass the `protocol_id` as we need have to inject it in the returned event. - /// API note: Ideally we wouldn't need to be passed a `ProtocolId`, and we would return a - /// different enum that doesn't contain any `protocol_id`, and the caller would inject - /// the ID itself, but that's a ton of code for not much gain. - fn poll(&mut self) -> Option> { - for n in (0..self.pending_response.len()).rev() { - let (request_id, mut substream) = self.pending_response.swap_remove(n); - match substream.poll() { - Ok(Async::Ready(Some(RegisteredProtocolEvent::Message(message)))) => { - if message.request_id() == CustomMessageId::Response(request_id) { - let event = CustomProtoHandlerOut::CustomMessage { - message - }; - self.shutdown.push(substream); - return Some(event); - } else { - self.shutdown.push(substream); - let event = CustomProtoHandlerOut::ProtocolError { - is_severe: true, - error: format!("Request ID doesn't match substream: expected {:?}, \ - got {:?}", request_id, message.request_id()).into(), - }; - return Some(event); - } - }, - Ok(Async::Ready(Some(RegisteredProtocolEvent::Clogged { .. }))) => - unreachable!("Cannot receive Clogged message with new protocol version; QED"), - Ok(Async::NotReady) => - self.pending_response.push((request_id, substream)), - Ok(Async::Ready(None)) => { - self.shutdown.push(substream); - let event = CustomProtoHandlerOut::ProtocolError { - is_severe: false, - error: format!("Request ID {:?} didn't receive an answer", request_id).into(), - }; - return Some(event); - } - Err(err) => { - self.shutdown.push(substream); - let event = CustomProtoHandlerOut::ProtocolError { - is_severe: false, - error: format!("Error while waiting for an answer for {:?}: {}", - request_id, err).into(), - }; - return Some(event); - } - } - } - - for n in (0..self.incoming_substreams.len()).rev() { - let mut substream = self.incoming_substreams.swap_remove(n); - match substream.poll() { - Ok(Async::Ready(Some(RegisteredProtocolEvent::Message(message)))) => { - return match message.request_id() { - CustomMessageId::Request(id) => { - self.pending_send_back.push((id, substream)); - Some(CustomProtoHandlerOut::CustomMessage { - message - }) - } - CustomMessageId::OneWay => { - self.shutdown.push(substream); - Some(CustomProtoHandlerOut::CustomMessage { - message - }) - } - _ => { - self.shutdown.push(substream); - Some(CustomProtoHandlerOut::ProtocolError { - is_severe: true, - error: format!("Received response in new substream").into(), - }) - } - } - }, - Ok(Async::Ready(Some(RegisteredProtocolEvent::Clogged { .. }))) => - unreachable!("Cannot receive Clogged message with new protocol version; QED"), - Ok(Async::NotReady) => - self.incoming_substreams.push(substream), - Ok(Async::Ready(None)) => {} - Err(err) => { - self.shutdown.push(substream); - return Some(CustomProtoHandlerOut::ProtocolError { - is_severe: false, - error: format!("Error in incoming substream: {}", err).into(), - }); - } - } - } - - shutdown_list(&mut self.shutdown); - None - } +where + TMessage: CustomMessage, + TSubstream: AsyncRead + AsyncWrite, +{ + /// Polls for things that are new. Same API constraints as `Future::poll()`. + /// Optionally returns the event to produce. + /// You must pass the `protocol_id` as we need have to inject it in the returned event. + /// API note: Ideally we wouldn't need to be passed a `ProtocolId`, and we would return a + /// different enum that doesn't contain any `protocol_id`, and the caller would inject + /// the ID itself, but that's a ton of code for not much gain. + fn poll(&mut self) -> Option> { + for n in (0..self.pending_response.len()).rev() { + let (request_id, mut substream) = self.pending_response.swap_remove(n); + match substream.poll() { + Ok(Async::Ready(Some(RegisteredProtocolEvent::Message(message)))) => { + if message.request_id() == CustomMessageId::Response(request_id) { + let event = CustomProtoHandlerOut::CustomMessage { message }; + self.shutdown.push(substream); + return Some(event); + } else { + self.shutdown.push(substream); + let event = CustomProtoHandlerOut::ProtocolError { + is_severe: true, + error: format!( + "Request ID doesn't match substream: expected {:?}, \ + got {:?}", + request_id, + message.request_id() + ) + .into(), + }; + return Some(event); + } + } + Ok(Async::Ready(Some(RegisteredProtocolEvent::Clogged { .. }))) => { + unreachable!("Cannot receive Clogged message with new protocol version; QED") + } + Ok(Async::NotReady) => self.pending_response.push((request_id, substream)), + Ok(Async::Ready(None)) => { + self.shutdown.push(substream); + let event = CustomProtoHandlerOut::ProtocolError { + is_severe: false, + error: format!("Request ID {:?} didn't receive an answer", request_id) + .into(), + }; + return Some(event); + } + Err(err) => { + self.shutdown.push(substream); + let event = CustomProtoHandlerOut::ProtocolError { + is_severe: false, + error: format!( + "Error while waiting for an answer for {:?}: {}", + request_id, err + ) + .into(), + }; + return Some(event); + } + } + } + + for n in (0..self.incoming_substreams.len()).rev() { + let mut substream = self.incoming_substreams.swap_remove(n); + match substream.poll() { + Ok(Async::Ready(Some(RegisteredProtocolEvent::Message(message)))) => { + return match message.request_id() { + CustomMessageId::Request(id) => { + self.pending_send_back.push((id, substream)); + Some(CustomProtoHandlerOut::CustomMessage { message }) + } + CustomMessageId::OneWay => { + self.shutdown.push(substream); + Some(CustomProtoHandlerOut::CustomMessage { message }) + } + _ => { + self.shutdown.push(substream); + Some(CustomProtoHandlerOut::ProtocolError { + is_severe: true, + error: format!("Received response in new substream").into(), + }) + } + }; + } + Ok(Async::Ready(Some(RegisteredProtocolEvent::Clogged { .. }))) => { + unreachable!("Cannot receive Clogged message with new protocol version; QED") + } + Ok(Async::NotReady) => self.incoming_substreams.push(substream), + Ok(Async::Ready(None)) => {} + Err(err) => { + self.shutdown.push(substream); + return Some(CustomProtoHandlerOut::ProtocolError { + is_severe: false, + error: format!("Error in incoming substream: {}", err).into(), + }); + } + } + } + + shutdown_list(&mut self.shutdown); + None + } } /// Event that can be received by a `CustomProtoHandler`. #[derive(Debug)] pub enum CustomProtoHandlerIn { - /// The node should start using custom protocols. Contains whether we are the dialer or the - /// listener of the connection. - Enable(Endpoint), - - /// The node should stop using custom protocols. - Disable, - - /// Sends a message through a custom protocol substream. - SendCustomMessage { - /// The message to send. - message: TMessage, - }, + /// The node should start using custom protocols. Contains whether we are the dialer or the + /// listener of the connection. + Enable(Endpoint), + + /// The node should stop using custom protocols. + Disable, + + /// Sends a message through a custom protocol substream. + SendCustomMessage { + /// The message to send. + message: TMessage, + }, } /// Event that can be emitted by a `CustomProtoHandler`. #[derive(Debug)] pub enum CustomProtoHandlerOut { - /// Opened a custom protocol with the remote. - CustomProtocolOpen { - /// Version of the protocol that has been opened. - version: u8, - }, - - /// Closed a custom protocol with the remote. - CustomProtocolClosed { - /// Reason why the substream closed. If `Ok`, then it's a graceful exit (EOF). - result: io::Result<()>, - }, - - /// Receives a message on a custom protocol substream. - CustomMessage { - /// Message that has been received. - message: TMessage, - }, - - /// A substream to the remote is clogged. The send buffer is very large, and we should print - /// a diagnostic message and/or avoid sending more data. - Clogged { - /// Copy of the messages that are within the buffer, for further diagnostic. - messages: Vec, - }, - - /// An error has happened on the protocol level with this node. - ProtocolError { - /// If true the error is severe, such as a protocol violation. - is_severe: bool, - /// The error that happened. - error: Box, - }, + /// Opened a custom protocol with the remote. + CustomProtocolOpen { + /// Version of the protocol that has been opened. + version: u8, + }, + + /// Closed a custom protocol with the remote. + CustomProtocolClosed { + /// Reason why the substream closed. If `Ok`, then it's a graceful exit (EOF). + result: io::Result<()>, + }, + + /// Receives a message on a custom protocol substream. + CustomMessage { + /// Message that has been received. + message: TMessage, + }, + + /// A substream to the remote is clogged. The send buffer is very large, and we should print + /// a diagnostic message and/or avoid sending more data. + Clogged { + /// Copy of the messages that are within the buffer, for further diagnostic. + messages: Vec, + }, + + /// An error has happened on the protocol level with this node. + ProtocolError { + /// If true the error is severe, such as a protocol violation. + is_severe: bool, + /// The error that happened. + error: Box, + }, } impl CustomProtoHandler where - TSubstream: AsyncRead + AsyncWrite, - TMessage: CustomMessage, + TSubstream: AsyncRead + AsyncWrite, + TMessage: CustomMessage, { - /// Enables the handler. - fn enable(&mut self, endpoint: Endpoint) { - self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", + /// Enables the handler. + fn enable(&mut self, endpoint: Endpoint) { + self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { + ProtocolState::Poisoned => { + error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", self.remote_peer_id); - ProtocolState::Poisoned - } - - ProtocolState::Init { substreams: incoming, .. } => { - if incoming.is_empty() { - if let Endpoint::Dialer = endpoint { - self.events_queue.push(ProtocolsHandlerEvent::OutboundSubstreamRequest { - upgrade: self.protocol.clone(), - info: (), - }); - } - ProtocolState::Opening { - deadline: Delay::new(Instant::now() + Duration::from_secs(60)) - } - - } else if incoming.iter().any(|s| s.is_multiplex()) { - let event = CustomProtoHandlerOut::CustomProtocolOpen { - version: incoming[0].protocol_version() - }; - self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); - ProtocolState::Normal(PerProtocolNormalState { - outgoing_substream: None, - incoming_substreams: incoming.into_iter().collect(), - pending_response: SmallVec::new(), - pending_send_back: SmallVec::new(), - pending_messages: SmallVec::new(), - shutdown: SmallVec::new(), - }) - - } else { - let event = CustomProtoHandlerOut::CustomProtocolOpen { - version: incoming[0].protocol_version() - }; - self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); - ProtocolState::BackCompat { - substream: incoming.into_iter().next() - .expect("We have a check above that incoming isn't empty; QED"), - shutdown: SmallVec::new() - } - } - } - - st @ ProtocolState::Opening { .. } => st, - st @ ProtocolState::BackCompat { .. } => st, - st @ ProtocolState::Normal { .. } => st, - ProtocolState::Disabled { shutdown, .. } => { - ProtocolState::Disabled { shutdown, reenable: true } - } - } - } - - /// Disables the handler. - fn disable(&mut self) { - self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", + ProtocolState::Poisoned + } + + ProtocolState::Init { + substreams: incoming, + .. + } => { + if incoming.is_empty() { + if let Endpoint::Dialer = endpoint { + self.events_queue + .push(ProtocolsHandlerEvent::OutboundSubstreamRequest { + upgrade: self.protocol.clone(), + info: (), + }); + } + ProtocolState::Opening { + deadline: Delay::new(Instant::now() + Duration::from_secs(60)), + } + } else if incoming.iter().any(|s| s.is_multiplex()) { + let event = CustomProtoHandlerOut::CustomProtocolOpen { + version: incoming[0].protocol_version(), + }; + self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); + ProtocolState::Normal(PerProtocolNormalState { + outgoing_substream: None, + incoming_substreams: incoming.into_iter().collect(), + pending_response: SmallVec::new(), + pending_send_back: SmallVec::new(), + pending_messages: SmallVec::new(), + shutdown: SmallVec::new(), + }) + } else { + let event = CustomProtoHandlerOut::CustomProtocolOpen { + version: incoming[0].protocol_version(), + }; + self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); + ProtocolState::BackCompat { + substream: incoming + .into_iter() + .next() + .expect("We have a check above that incoming isn't empty; QED"), + shutdown: SmallVec::new(), + } + } + } + + st @ ProtocolState::Opening { .. } => st, + st @ ProtocolState::BackCompat { .. } => st, + st @ ProtocolState::Normal { .. } => st, + ProtocolState::Disabled { shutdown, .. } => ProtocolState::Disabled { + shutdown, + reenable: true, + }, + } + } + + /// Disables the handler. + fn disable(&mut self) { + self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { + ProtocolState::Poisoned => { + error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", self.remote_peer_id); - ProtocolState::Poisoned - } - - ProtocolState::Init { substreams: mut shutdown, .. } => { - for s in &mut shutdown { - s.shutdown(); - } - ProtocolState::Disabled { shutdown, reenable: false } - } - - ProtocolState::Opening { .. } => { - ProtocolState::Disabled { shutdown: SmallVec::new(), reenable: false } - } - - ProtocolState::BackCompat { mut substream, mut shutdown } => { - substream.shutdown(); - shutdown.push(substream); - let event = CustomProtoHandlerOut::CustomProtocolClosed { - result: Ok(()) - }; - self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); - ProtocolState::Disabled { - shutdown: shutdown.into_iter().collect(), - reenable: false - } - } - - ProtocolState::Normal(state) => { - let mut out: SmallVec<[_; 6]> = SmallVec::new(); - out.extend(state.outgoing_substream.into_iter()); - out.extend(state.incoming_substreams.into_iter()); - out.extend(state.pending_response.into_iter().map(|(_, s)| s)); - out.extend(state.pending_send_back.into_iter().map(|(_, s)| s)); - for s in &mut out { - s.shutdown(); - } - out.extend(state.shutdown.into_iter()); - let event = CustomProtoHandlerOut::CustomProtocolClosed { - result: Ok(()) - }; - self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); - ProtocolState::Disabled { shutdown: out, reenable: false } - } - - ProtocolState::Disabled { shutdown, .. } => - ProtocolState::Disabled { shutdown, reenable: false }, - }; - } - - /// Polls the state for events. Optionally returns an event to produce. - #[must_use] - fn poll_state(&mut self) - -> Option, (), CustomProtoHandlerOut>> { - let return_value; - self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", + ProtocolState::Poisoned + } + + ProtocolState::Init { + substreams: mut shutdown, + .. + } => { + for s in &mut shutdown { + s.shutdown(); + } + ProtocolState::Disabled { + shutdown, + reenable: false, + } + } + + ProtocolState::Opening { .. } => ProtocolState::Disabled { + shutdown: SmallVec::new(), + reenable: false, + }, + + ProtocolState::BackCompat { + mut substream, + mut shutdown, + } => { + substream.shutdown(); + shutdown.push(substream); + let event = CustomProtoHandlerOut::CustomProtocolClosed { result: Ok(()) }; + self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); + ProtocolState::Disabled { + shutdown: shutdown.into_iter().collect(), + reenable: false, + } + } + + ProtocolState::Normal(state) => { + let mut out: SmallVec<[_; 6]> = SmallVec::new(); + out.extend(state.outgoing_substream.into_iter()); + out.extend(state.incoming_substreams.into_iter()); + out.extend(state.pending_response.into_iter().map(|(_, s)| s)); + out.extend(state.pending_send_back.into_iter().map(|(_, s)| s)); + for s in &mut out { + s.shutdown(); + } + out.extend(state.shutdown.into_iter()); + let event = CustomProtoHandlerOut::CustomProtocolClosed { result: Ok(()) }; + self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); + ProtocolState::Disabled { + shutdown: out, + reenable: false, + } + } + + ProtocolState::Disabled { shutdown, .. } => ProtocolState::Disabled { + shutdown, + reenable: false, + }, + }; + } + + /// Polls the state for events. Optionally returns an event to produce. + #[must_use] + fn poll_state( + &mut self, + ) -> Option< + ProtocolsHandlerEvent, (), CustomProtoHandlerOut>, + > { + let return_value; + self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { + ProtocolState::Poisoned => { + error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", self.remote_peer_id); - return_value = None; - ProtocolState::Poisoned - } - - ProtocolState::Init { substreams, mut init_deadline } => { - match init_deadline.poll() { - Ok(Async::Ready(())) => { - init_deadline.reset(Instant::now() + Duration::from_secs(60)); - error!(target: "sub-libp2p", "Handler initialization process is too long \ + return_value = None; + ProtocolState::Poisoned + } + + ProtocolState::Init { + substreams, + mut init_deadline, + } => { + match init_deadline.poll() { + Ok(Async::Ready(())) => { + init_deadline.reset(Instant::now() + Duration::from_secs(60)); + error!(target: "sub-libp2p", "Handler initialization process is too long \ with {:?}", self.remote_peer_id) - }, - Ok(Async::NotReady) => {} - Err(_) => error!(target: "sub-libp2p", "Tokio timer has errored") - } - - return_value = None; - ProtocolState::Init { substreams, init_deadline } - } - - ProtocolState::Opening { mut deadline } => { - match deadline.poll() { - Ok(Async::Ready(())) => { - deadline.reset(Instant::now() + Duration::from_secs(60)); - let event = CustomProtoHandlerOut::ProtocolError { - is_severe: false, - error: "Timeout when opening protocol".to_string().into(), - }; - return_value = Some(ProtocolsHandlerEvent::Custom(event)); - ProtocolState::Opening { deadline } - }, - Ok(Async::NotReady) => { - return_value = None; - ProtocolState::Opening { deadline } - }, - Err(_) => { - error!(target: "sub-libp2p", "Tokio timer has errored"); - deadline.reset(Instant::now() + Duration::from_secs(60)); - return_value = None; - ProtocolState::Opening { deadline } - }, - } - } - - ProtocolState::BackCompat { mut substream, shutdown } => { - match substream.poll() { - Ok(Async::Ready(Some(RegisteredProtocolEvent::Message(message)))) => { - let event = CustomProtoHandlerOut::CustomMessage { - message - }; - return_value = Some(ProtocolsHandlerEvent::Custom(event)); - ProtocolState::BackCompat { substream, shutdown } - }, - Ok(Async::Ready(Some(RegisteredProtocolEvent::Clogged { messages }))) => { - let event = CustomProtoHandlerOut::Clogged { - messages, - }; - return_value = Some(ProtocolsHandlerEvent::Custom(event)); - ProtocolState::BackCompat { substream, shutdown } - } - Ok(Async::NotReady) => { - return_value = None; - ProtocolState::BackCompat { substream, shutdown } - } - Ok(Async::Ready(None)) => { - let event = CustomProtoHandlerOut::CustomProtocolClosed { - result: Ok(()) - }; - return_value = Some(ProtocolsHandlerEvent::Custom(event)); - ProtocolState::Disabled { - shutdown: shutdown.into_iter().collect(), - reenable: false - } - } - Err(err) => { - let event = CustomProtoHandlerOut::CustomProtocolClosed { - result: Err(err), - }; - return_value = Some(ProtocolsHandlerEvent::Custom(event)); - ProtocolState::Disabled { - shutdown: shutdown.into_iter().collect(), - reenable: false - } - } - } - } - - ProtocolState::Normal(mut norm_state) => { - if let Some(event) = norm_state.poll() { - return_value = Some(ProtocolsHandlerEvent::Custom(event)); - } else { - return_value = None; - } - - ProtocolState::Normal(norm_state) - } - - ProtocolState::Disabled { mut shutdown, reenable } => { - shutdown_list(&mut shutdown); - // If `reenable` is `true`, that means we should open the substreams system again - // after all the substreams are closed. - if reenable && shutdown.is_empty() { - return_value = Some(ProtocolsHandlerEvent::OutboundSubstreamRequest { - upgrade: self.protocol.clone(), - info: (), - }); - ProtocolState::Opening { - deadline: Delay::new(Instant::now() + Duration::from_secs(60)) - } - } else { - return_value = None; - ProtocolState::Disabled { shutdown, reenable } - } - } - }; - - return_value - } - - /// Called by `inject_fully_negotiated_inbound` and `inject_fully_negotiated_outbound`. - fn inject_fully_negotiated( - &mut self, - mut substream: RegisteredProtocolSubstream - ) { - self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", + } + Ok(Async::NotReady) => {} + Err(_) => error!(target: "sub-libp2p", "Tokio timer has errored"), + } + + return_value = None; + ProtocolState::Init { + substreams, + init_deadline, + } + } + + ProtocolState::Opening { mut deadline } => match deadline.poll() { + Ok(Async::Ready(())) => { + deadline.reset(Instant::now() + Duration::from_secs(60)); + let event = CustomProtoHandlerOut::ProtocolError { + is_severe: false, + error: "Timeout when opening protocol".to_string().into(), + }; + return_value = Some(ProtocolsHandlerEvent::Custom(event)); + ProtocolState::Opening { deadline } + } + Ok(Async::NotReady) => { + return_value = None; + ProtocolState::Opening { deadline } + } + Err(_) => { + error!(target: "sub-libp2p", "Tokio timer has errored"); + deadline.reset(Instant::now() + Duration::from_secs(60)); + return_value = None; + ProtocolState::Opening { deadline } + } + }, + + ProtocolState::BackCompat { + mut substream, + shutdown, + } => match substream.poll() { + Ok(Async::Ready(Some(RegisteredProtocolEvent::Message(message)))) => { + let event = CustomProtoHandlerOut::CustomMessage { message }; + return_value = Some(ProtocolsHandlerEvent::Custom(event)); + ProtocolState::BackCompat { + substream, + shutdown, + } + } + Ok(Async::Ready(Some(RegisteredProtocolEvent::Clogged { messages }))) => { + let event = CustomProtoHandlerOut::Clogged { messages }; + return_value = Some(ProtocolsHandlerEvent::Custom(event)); + ProtocolState::BackCompat { + substream, + shutdown, + } + } + Ok(Async::NotReady) => { + return_value = None; + ProtocolState::BackCompat { + substream, + shutdown, + } + } + Ok(Async::Ready(None)) => { + let event = CustomProtoHandlerOut::CustomProtocolClosed { result: Ok(()) }; + return_value = Some(ProtocolsHandlerEvent::Custom(event)); + ProtocolState::Disabled { + shutdown: shutdown.into_iter().collect(), + reenable: false, + } + } + Err(err) => { + let event = CustomProtoHandlerOut::CustomProtocolClosed { result: Err(err) }; + return_value = Some(ProtocolsHandlerEvent::Custom(event)); + ProtocolState::Disabled { + shutdown: shutdown.into_iter().collect(), + reenable: false, + } + } + }, + + ProtocolState::Normal(mut norm_state) => { + if let Some(event) = norm_state.poll() { + return_value = Some(ProtocolsHandlerEvent::Custom(event)); + } else { + return_value = None; + } + + ProtocolState::Normal(norm_state) + } + + ProtocolState::Disabled { + mut shutdown, + reenable, + } => { + shutdown_list(&mut shutdown); + // If `reenable` is `true`, that means we should open the substreams system again + // after all the substreams are closed. + if reenable && shutdown.is_empty() { + return_value = Some(ProtocolsHandlerEvent::OutboundSubstreamRequest { + upgrade: self.protocol.clone(), + info: (), + }); + ProtocolState::Opening { + deadline: Delay::new(Instant::now() + Duration::from_secs(60)), + } + } else { + return_value = None; + ProtocolState::Disabled { shutdown, reenable } + } + } + }; + + return_value + } + + /// Called by `inject_fully_negotiated_inbound` and `inject_fully_negotiated_outbound`. + fn inject_fully_negotiated( + &mut self, + mut substream: RegisteredProtocolSubstream, + ) { + self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { + ProtocolState::Poisoned => { + error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", self.remote_peer_id); - ProtocolState::Poisoned - } - - ProtocolState::Init { mut substreams, init_deadline } => { - if substream.endpoint() == Endpoint::Dialer { - error!(target: "sub-libp2p", "Opened dialing substream with {:?} before \ + ProtocolState::Poisoned + } + + ProtocolState::Init { + mut substreams, + init_deadline, + } => { + if substream.endpoint() == Endpoint::Dialer { + error!(target: "sub-libp2p", "Opened dialing substream with {:?} before \ initialization", self.remote_peer_id); - } - substreams.push(substream); - ProtocolState::Init { substreams, init_deadline } - } - - ProtocolState::Opening { .. } => { - let event = CustomProtoHandlerOut::CustomProtocolOpen { - version: substream.protocol_version() - }; - self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); - - match (substream.endpoint(), substream.is_multiplex()) { - (Endpoint::Dialer, true) => { - ProtocolState::Normal(PerProtocolNormalState { - outgoing_substream: Some(substream), - incoming_substreams: SmallVec::new(), - pending_response: SmallVec::new(), - pending_send_back: SmallVec::new(), - pending_messages: SmallVec::new(), - shutdown: SmallVec::new(), - }) - }, - (Endpoint::Listener, true) => { - ProtocolState::Normal(PerProtocolNormalState { - outgoing_substream: None, - incoming_substreams: smallvec![substream], - pending_response: SmallVec::new(), - pending_send_back: SmallVec::new(), - pending_messages: SmallVec::new(), - shutdown: SmallVec::new(), - }) - }, - (_, false) => { - ProtocolState::BackCompat { - substream, - shutdown: SmallVec::new() - } - }, - } - } - - ProtocolState::BackCompat { substream: existing, mut shutdown } => { - warn!(target: "sub-libp2p", "Received extra substream after having already one \ + } + substreams.push(substream); + ProtocolState::Init { + substreams, + init_deadline, + } + } + + ProtocolState::Opening { .. } => { + let event = CustomProtoHandlerOut::CustomProtocolOpen { + version: substream.protocol_version(), + }; + self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); + + match (substream.endpoint(), substream.is_multiplex()) { + (Endpoint::Dialer, true) => ProtocolState::Normal(PerProtocolNormalState { + outgoing_substream: Some(substream), + incoming_substreams: SmallVec::new(), + pending_response: SmallVec::new(), + pending_send_back: SmallVec::new(), + pending_messages: SmallVec::new(), + shutdown: SmallVec::new(), + }), + (Endpoint::Listener, true) => ProtocolState::Normal(PerProtocolNormalState { + outgoing_substream: None, + incoming_substreams: smallvec![substream], + pending_response: SmallVec::new(), + pending_send_back: SmallVec::new(), + pending_messages: SmallVec::new(), + shutdown: SmallVec::new(), + }), + (_, false) => ProtocolState::BackCompat { + substream, + shutdown: SmallVec::new(), + }, + } + } + + ProtocolState::BackCompat { + substream: existing, + mut shutdown, + } => { + warn!(target: "sub-libp2p", "Received extra substream after having already one \ open in backwards-compatibility mode with {:?}", self.remote_peer_id); - substream.shutdown(); - shutdown.push(substream); - ProtocolState::BackCompat { substream: existing, shutdown } - } - - ProtocolState::Normal(mut state) => { - if substream.endpoint() == Endpoint::Listener { - state.incoming_substreams.push(substream); - } else if !state.pending_messages.is_empty() { - let message = state.pending_messages.remove(0); - let request_id = message.request_id(); - substream.send_message(message); - if let CustomMessageId::Request(request_id) = request_id { - state.pending_response.push((request_id, substream)); - } else { - state.shutdown.push(substream); - } - } else { - debug!(target: "sub-libp2p", "Opened spurious outbound substream with {:?}", + substream.shutdown(); + shutdown.push(substream); + ProtocolState::BackCompat { + substream: existing, + shutdown, + } + } + + ProtocolState::Normal(mut state) => { + if substream.endpoint() == Endpoint::Listener { + state.incoming_substreams.push(substream); + } else if !state.pending_messages.is_empty() { + let message = state.pending_messages.remove(0); + let request_id = message.request_id(); + substream.send_message(message); + if let CustomMessageId::Request(request_id) = request_id { + state.pending_response.push((request_id, substream)); + } else { + state.shutdown.push(substream); + } + } else { + debug!(target: "sub-libp2p", "Opened spurious outbound substream with {:?}", self.remote_peer_id); - substream.shutdown(); - state.shutdown.push(substream); - } - - ProtocolState::Normal(state) - } - - ProtocolState::Disabled { mut shutdown, .. } => { - substream.shutdown(); - shutdown.push(substream); - ProtocolState::Disabled { shutdown, reenable: false } - } - }; - } - - /// Sends a message to the remote. - fn send_message(&mut self, message: TMessage) { - match self.state { - ProtocolState::BackCompat { ref mut substream, .. } => - substream.send_message(message), - - ProtocolState::Normal(ref mut state) => { - if let CustomMessageId::Request(request_id) = message.request_id() { - if let Some(mut outgoing_substream) = state.outgoing_substream.take() { - outgoing_substream.send_message(message); - state.pending_response.push((request_id, outgoing_substream)); - } else { - if state.pending_messages.len() >= 2048 { - let event = CustomProtoHandlerOut::Clogged { - messages: Vec::new(), - }; - self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); - } - state.pending_messages.push(message); - self.events_queue.push(ProtocolsHandlerEvent::OutboundSubstreamRequest { - upgrade: self.protocol.clone(), - info: () - }); - } - } else if let CustomMessageId::Response(request_id) = message.request_id() { - if let Some(pos) = state.pending_send_back.iter().position(|(id, _)| *id == request_id) { - let (_, mut substream) = state.pending_send_back.remove(pos); - substream.send_message(message); - state.shutdown.push(substream); - } else { - warn!(target: "sub-libp2p", "Libp2p layer received response to a \ + substream.shutdown(); + state.shutdown.push(substream); + } + + ProtocolState::Normal(state) + } + + ProtocolState::Disabled { mut shutdown, .. } => { + substream.shutdown(); + shutdown.push(substream); + ProtocolState::Disabled { + shutdown, + reenable: false, + } + } + }; + } + + /// Sends a message to the remote. + fn send_message(&mut self, message: TMessage) { + match self.state { + ProtocolState::BackCompat { + ref mut substream, .. + } => substream.send_message(message), + + ProtocolState::Normal(ref mut state) => { + if let CustomMessageId::Request(request_id) = message.request_id() { + if let Some(mut outgoing_substream) = state.outgoing_substream.take() { + outgoing_substream.send_message(message); + state + .pending_response + .push((request_id, outgoing_substream)); + } else { + if state.pending_messages.len() >= 2048 { + let event = CustomProtoHandlerOut::Clogged { + messages: Vec::new(), + }; + self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); + } + state.pending_messages.push(message); + self.events_queue + .push(ProtocolsHandlerEvent::OutboundSubstreamRequest { + upgrade: self.protocol.clone(), + info: (), + }); + } + } else if let CustomMessageId::Response(request_id) = message.request_id() { + if let Some(pos) = state + .pending_send_back + .iter() + .position(|(id, _)| *id == request_id) + { + let (_, mut substream) = state.pending_send_back.remove(pos); + substream.send_message(message); + state.shutdown.push(substream); + } else { + warn!(target: "sub-libp2p", "Libp2p layer received response to a \ non-existing request ID {:?} with {:?}", request_id, self.remote_peer_id); - } - } else if let Some(mut outgoing_substream) = state.outgoing_substream.take() { - outgoing_substream.send_message(message); - state.shutdown.push(outgoing_substream); - } else { - if state.pending_messages.len() >= 2048 { - let event = CustomProtoHandlerOut::Clogged { - messages: Vec::new(), - }; - self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); - } - state.pending_messages.push(message); - self.events_queue.push(ProtocolsHandlerEvent::OutboundSubstreamRequest { - upgrade: self.protocol.clone(), - info: () - }); - } - } - - _ => debug!(target: "sub-libp2p", "Tried to send message over closed protocol \ - with {:?}", self.remote_peer_id) - } - } + } + } else if let Some(mut outgoing_substream) = state.outgoing_substream.take() { + outgoing_substream.send_message(message); + state.shutdown.push(outgoing_substream); + } else { + if state.pending_messages.len() >= 2048 { + let event = CustomProtoHandlerOut::Clogged { + messages: Vec::new(), + }; + self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); + } + state.pending_messages.push(message); + self.events_queue + .push(ProtocolsHandlerEvent::OutboundSubstreamRequest { + upgrade: self.protocol.clone(), + info: (), + }); + } + } + + _ => debug!(target: "sub-libp2p", "Tried to send message over closed protocol \ + with {:?}", self.remote_peer_id), + } + } } impl ProtocolsHandler for CustomProtoHandler -where TSubstream: AsyncRead + AsyncWrite, TMessage: CustomMessage { - type InEvent = CustomProtoHandlerIn; - type OutEvent = CustomProtoHandlerOut; - type Substream = TSubstream; - type Error = Void; - type InboundProtocol = RegisteredProtocol; - type OutboundProtocol = RegisteredProtocol; - type OutboundOpenInfo = (); - - fn listen_protocol(&self) -> Self::InboundProtocol { - self.protocol.clone() - } - - fn inject_fully_negotiated_inbound( - &mut self, - proto: >::Output - ) { - self.inject_fully_negotiated(proto); - } - - fn inject_fully_negotiated_outbound( - &mut self, - proto: >::Output, - _: Self::OutboundOpenInfo - ) { - self.inject_fully_negotiated(proto); - } - - fn inject_event(&mut self, message: CustomProtoHandlerIn) { - match message { - CustomProtoHandlerIn::Disable => self.disable(), - CustomProtoHandlerIn::Enable(endpoint) => self.enable(endpoint), - CustomProtoHandlerIn::SendCustomMessage { message } => - self.send_message(message), - } - } - - #[inline] - fn inject_dial_upgrade_error(&mut self, _: (), err: ProtocolsHandlerUpgrErr) { - let is_severe = match err { - ProtocolsHandlerUpgrErr::Upgrade(_) => true, - _ => false, - }; - - self.events_queue.push(ProtocolsHandlerEvent::Custom(CustomProtoHandlerOut::ProtocolError { - is_severe, - error: Box::new(err), - })); - } - - fn connection_keep_alive(&self) -> KeepAlive { - if self.warm_up_end >= Instant::now() { - return KeepAlive::Until(self.warm_up_end) - } - - let mut keep_forever = false; - - match self.state { - ProtocolState::Init { .. } | ProtocolState::Opening { .. } => {} - ProtocolState::BackCompat { .. } | ProtocolState::Normal { .. } => - keep_forever = true, - ProtocolState::Disabled { .. } | ProtocolState::Poisoned => return KeepAlive::Now, - } - - if keep_forever { - KeepAlive::Forever - } else { - KeepAlive::Now - } - } - - fn poll( - &mut self, - ) -> Poll< - ProtocolsHandlerEvent, - Self::Error, - > { - // Flush the events queue if necessary. - if !self.events_queue.is_empty() { - let event = self.events_queue.remove(0); - return Ok(Async::Ready(event)) - } - - // Process all the substreams. - if let Some(event) = self.poll_state() { - return Ok(Async::Ready(event)) - } - - Ok(Async::NotReady) - } +where + TSubstream: AsyncRead + AsyncWrite, + TMessage: CustomMessage, +{ + type InEvent = CustomProtoHandlerIn; + type OutEvent = CustomProtoHandlerOut; + type Substream = TSubstream; + type Error = Void; + type InboundProtocol = RegisteredProtocol; + type OutboundProtocol = RegisteredProtocol; + type OutboundOpenInfo = (); + + fn listen_protocol(&self) -> Self::InboundProtocol { + self.protocol.clone() + } + + fn inject_fully_negotiated_inbound( + &mut self, + proto: >::Output, + ) { + self.inject_fully_negotiated(proto); + } + + fn inject_fully_negotiated_outbound( + &mut self, + proto: >::Output, + _: Self::OutboundOpenInfo, + ) { + self.inject_fully_negotiated(proto); + } + + fn inject_event(&mut self, message: CustomProtoHandlerIn) { + match message { + CustomProtoHandlerIn::Disable => self.disable(), + CustomProtoHandlerIn::Enable(endpoint) => self.enable(endpoint), + CustomProtoHandlerIn::SendCustomMessage { message } => self.send_message(message), + } + } + + #[inline] + fn inject_dial_upgrade_error(&mut self, _: (), err: ProtocolsHandlerUpgrErr) { + let is_severe = match err { + ProtocolsHandlerUpgrErr::Upgrade(_) => true, + _ => false, + }; + + self.events_queue.push(ProtocolsHandlerEvent::Custom( + CustomProtoHandlerOut::ProtocolError { + is_severe, + error: Box::new(err), + }, + )); + } + + fn connection_keep_alive(&self) -> KeepAlive { + if self.warm_up_end >= Instant::now() { + return KeepAlive::Until(self.warm_up_end); + } + + let mut keep_forever = false; + + match self.state { + ProtocolState::Init { .. } | ProtocolState::Opening { .. } => {} + ProtocolState::BackCompat { .. } | ProtocolState::Normal { .. } => keep_forever = true, + ProtocolState::Disabled { .. } | ProtocolState::Poisoned => return KeepAlive::Now, + } + + if keep_forever { + KeepAlive::Forever + } else { + KeepAlive::Now + } + } + + fn poll( + &mut self, + ) -> Poll< + ProtocolsHandlerEvent, + Self::Error, + > { + // Flush the events queue if necessary. + if !self.events_queue.is_empty() { + let event = self.events_queue.remove(0); + return Ok(Async::Ready(event)); + } + + // Process all the substreams. + if let Some(event) = self.poll_state() { + return Ok(Async::Ready(event)); + } + + Ok(Async::NotReady) + } } impl fmt::Debug for CustomProtoHandler where - TSubstream: AsyncRead + AsyncWrite, + TSubstream: AsyncRead + AsyncWrite, { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("CustomProtoHandler") - .finish() - } + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + f.debug_struct("CustomProtoHandler").finish() + } } /// Given a list of substreams, tries to shut them down. The substreams that have been successfully /// shut down are removed from the list. -fn shutdown_list - (list: &mut SmallVec>>) -where TSubstream: AsyncRead + AsyncWrite, TMessage: CustomMessage { - 'outer: for n in (0..list.len()).rev() { - let mut substream = list.swap_remove(n); - loop { - match substream.poll() { - Ok(Async::Ready(Some(_))) => {} - Ok(Async::NotReady) => break, - Err(_) | Ok(Async::Ready(None)) => continue 'outer, - } - } - list.push(substream); - } +fn shutdown_list( + list: &mut SmallVec< + impl smallvec::Array>, + >, +) where + TSubstream: AsyncRead + AsyncWrite, + TMessage: CustomMessage, +{ + 'outer: for n in (0..list.len()).rev() { + let mut substream = list.swap_remove(n); + loop { + match substream.poll() { + Ok(Async::Ready(Some(_))) => {} + Ok(Async::NotReady) => break, + Err(_) | Ok(Async::Ready(None)) => continue 'outer, + } + } + list.push(substream); + } } diff --git a/core/network-libp2p/src/custom_proto/upgrade.rs b/core/network-libp2p/src/custom_proto/upgrade.rs index fc9ed5bddb..9973be8375 100644 --- a/core/network-libp2p/src/custom_proto/upgrade.rs +++ b/core/network-libp2p/src/custom_proto/upgrade.rs @@ -16,11 +16,13 @@ use crate::ProtocolId; use bytes::Bytes; -use libp2p::core::{Negotiated, Endpoint, UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade::ProtocolName}; +use futures::{future, prelude::*, stream}; +use libp2p::core::{ + upgrade::ProtocolName, Endpoint, InboundUpgrade, Negotiated, OutboundUpgrade, UpgradeInfo, +}; use libp2p::tokio_codec::Framed; use log::warn; use std::{collections::VecDeque, io, iter, marker::PhantomData, vec::IntoIter as VecIntoIter}; -use futures::{prelude::*, future, stream}; use tokio_io::{AsyncRead, AsyncWrite}; use unsigned_varint::codec::UviBytes; @@ -29,411 +31,420 @@ use unsigned_varint::codec::UviBytes; /// Note that "a single protocol" here refers to `par` for example. However /// each protocol can have multiple different versions for networking purposes. pub struct RegisteredProtocol { - /// Id of the protocol for API purposes. - id: ProtocolId, - /// Base name of the protocol as advertised on the network. - /// Ends with `/` so that we can append a version number behind. - base_name: Bytes, - /// List of protocol versions that we support. - /// Ordered in descending order so that the best comes first. - supported_versions: Vec, - /// Marker to pin the generic. - marker: PhantomData, + /// Id of the protocol for API purposes. + id: ProtocolId, + /// Base name of the protocol as advertised on the network. + /// Ends with `/` so that we can append a version number behind. + base_name: Bytes, + /// List of protocol versions that we support. + /// Ordered in descending order so that the best comes first. + supported_versions: Vec, + /// Marker to pin the generic. + marker: PhantomData, } impl RegisteredProtocol { - /// Creates a new `RegisteredProtocol`. The `custom_data` parameter will be - /// passed inside the `RegisteredProtocolOutput`. - pub fn new(protocol: ProtocolId, versions: &[u8]) - -> Self { - let mut base_name = Bytes::from_static(b"/substrate/"); - base_name.extend_from_slice(&protocol); - base_name.extend_from_slice(b"/"); - - RegisteredProtocol { - base_name, - id: protocol, - supported_versions: { - let mut tmp = versions.to_vec(); - tmp.sort_unstable_by(|a, b| b.cmp(&a)); - tmp - }, - marker: PhantomData, - } - } - - /// Returns the ID of the protocol. - #[inline] - pub fn id(&self) -> ProtocolId { - self.id - } + /// Creates a new `RegisteredProtocol`. The `custom_data` parameter will be + /// passed inside the `RegisteredProtocolOutput`. + pub fn new(protocol: ProtocolId, versions: &[u8]) -> Self { + let mut base_name = Bytes::from_static(b"/substrate/"); + base_name.extend_from_slice(&protocol); + base_name.extend_from_slice(b"/"); + + RegisteredProtocol { + base_name, + id: protocol, + supported_versions: { + let mut tmp = versions.to_vec(); + tmp.sort_unstable_by(|a, b| b.cmp(&a)); + tmp + }, + marker: PhantomData, + } + } + + /// Returns the ID of the protocol. + #[inline] + pub fn id(&self) -> ProtocolId { + self.id + } } impl Clone for RegisteredProtocol { - fn clone(&self) -> Self { - RegisteredProtocol { - id: self.id, - base_name: self.base_name.clone(), - supported_versions: self.supported_versions.clone(), - marker: PhantomData, - } - } + fn clone(&self) -> Self { + RegisteredProtocol { + id: self.id, + base_name: self.base_name.clone(), + supported_versions: self.supported_versions.clone(), + marker: PhantomData, + } + } } /// Output of a `RegisteredProtocol` upgrade. pub struct RegisteredProtocolSubstream { - /// If true, we are in the process of closing the sink. - is_closing: bool, - /// Whether the local node opened this substream (dialer), or we received this substream from - /// the remote (listener). - endpoint: Endpoint, - /// Buffer of packets to send. - send_queue: VecDeque>, - /// If true, we should call `poll_complete` on the inner sink. - requires_poll_complete: bool, - /// The underlying substream. - inner: stream::Fuse, UviBytes>>>, - /// Id of the protocol. - protocol_id: ProtocolId, - /// Version of the protocol that was negotiated. - protocol_version: u8, - /// If true, we have sent a "remote is clogged" event recently and shouldn't send another one - /// unless the buffer empties then fills itself again. - clogged_fuse: bool, - /// If true, then this substream uses the "/multi/" version of the protocol. This is a hint - /// that the handler can behave differently. - is_multiplex: bool, - /// Marker to pin the generic. - marker: PhantomData, + /// If true, we are in the process of closing the sink. + is_closing: bool, + /// Whether the local node opened this substream (dialer), or we received this substream from + /// the remote (listener). + endpoint: Endpoint, + /// Buffer of packets to send. + send_queue: VecDeque>, + /// If true, we should call `poll_complete` on the inner sink. + requires_poll_complete: bool, + /// The underlying substream. + inner: stream::Fuse, UviBytes>>>, + /// Id of the protocol. + protocol_id: ProtocolId, + /// Version of the protocol that was negotiated. + protocol_version: u8, + /// If true, we have sent a "remote is clogged" event recently and shouldn't send another one + /// unless the buffer empties then fills itself again. + clogged_fuse: bool, + /// If true, then this substream uses the "/multi/" version of the protocol. This is a hint + /// that the handler can behave differently. + is_multiplex: bool, + /// Marker to pin the generic. + marker: PhantomData, } impl RegisteredProtocolSubstream { - /// Returns the protocol id. - #[inline] - pub fn protocol_id(&self) -> ProtocolId { - self.protocol_id - } - - /// Returns the version of the protocol that was negotiated. - #[inline] - pub fn protocol_version(&self) -> u8 { - self.protocol_version - } - - /// Returns whether the local node opened this substream (dialer), or we received this - /// substream from the remote (listener). - pub fn endpoint(&self) -> Endpoint { - self.endpoint - } - - /// Returns true if we negotiated the "multiplexed" version. This means that the handler can - /// open multiple substreams instead of just one. - pub fn is_multiplex(&self) -> bool { - self.is_multiplex - } - - /// Starts a graceful shutdown process on this substream. - /// - /// Note that "graceful" means that we sent a closing message. We don't wait for any - /// confirmation from the remote. - /// - /// After calling this, the stream is guaranteed to finish soon-ish. - pub fn shutdown(&mut self) { - self.is_closing = true; - self.send_queue.clear(); - } - - /// Sends a message to the substream. - pub fn send_message(&mut self, data: TMessage) - where TMessage: CustomMessage { - if self.is_closing { - return - } - - self.send_queue.push_back(data.into_bytes()); - } + /// Returns the protocol id. + #[inline] + pub fn protocol_id(&self) -> ProtocolId { + self.protocol_id + } + + /// Returns the version of the protocol that was negotiated. + #[inline] + pub fn protocol_version(&self) -> u8 { + self.protocol_version + } + + /// Returns whether the local node opened this substream (dialer), or we received this + /// substream from the remote (listener). + pub fn endpoint(&self) -> Endpoint { + self.endpoint + } + + /// Returns true if we negotiated the "multiplexed" version. This means that the handler can + /// open multiple substreams instead of just one. + pub fn is_multiplex(&self) -> bool { + self.is_multiplex + } + + /// Starts a graceful shutdown process on this substream. + /// + /// Note that "graceful" means that we sent a closing message. We don't wait for any + /// confirmation from the remote. + /// + /// After calling this, the stream is guaranteed to finish soon-ish. + pub fn shutdown(&mut self) { + self.is_closing = true; + self.send_queue.clear(); + } + + /// Sends a message to the substream. + pub fn send_message(&mut self, data: TMessage) + where + TMessage: CustomMessage, + { + if self.is_closing { + return; + } + + self.send_queue.push_back(data.into_bytes()); + } } /// Implemented on messages that can be sent or received on the network. pub trait CustomMessage { - /// Turns a message into the raw bytes to send over the network. - fn into_bytes(self) -> Vec; - - /// Tries to parse `bytes` received from the network into a message. - fn from_bytes(bytes: &[u8]) -> Result - where Self: Sized; - - /// Returns a unique ID that is used to match request and responses. - /// - /// The networking layer employs multiplexing in order to have multiple parallel data streams. - /// Transmitting messages over the network uses two kinds of substreams: - /// - /// - Undirectional substreams, where we send a single message then close the substream. - /// - Bidirectional substreams, where we send a message then wait for a response. Once the - /// response has arrived, we close the substream. - /// - /// If `request_id()` returns `OneWay`, then this message will be sent or received over a - /// unidirectional substream. If instead it returns `Request` or `Response`, then we use the - /// value to match a request with its response. - fn request_id(&self) -> CustomMessageId; + /// Turns a message into the raw bytes to send over the network. + fn into_bytes(self) -> Vec; + + /// Tries to parse `bytes` received from the network into a message. + fn from_bytes(bytes: &[u8]) -> Result + where + Self: Sized; + + /// Returns a unique ID that is used to match request and responses. + /// + /// The networking layer employs multiplexing in order to have multiple parallel data streams. + /// Transmitting messages over the network uses two kinds of substreams: + /// + /// - Undirectional substreams, where we send a single message then close the substream. + /// - Bidirectional substreams, where we send a message then wait for a response. Once the + /// response has arrived, we close the substream. + /// + /// If `request_id()` returns `OneWay`, then this message will be sent or received over a + /// unidirectional substream. If instead it returns `Request` or `Response`, then we use the + /// value to match a request with its response. + fn request_id(&self) -> CustomMessageId; } /// See the documentation of `CustomMessage::request_id`. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum CustomMessageId { - OneWay, - Request(u64), - Response(u64), + OneWay, + Request(u64), + Response(u64), } // These trait implementations exist mostly for testing convenience. This should eventually be // removed. impl CustomMessage for Vec { - fn into_bytes(self) -> Vec { - self - } + fn into_bytes(self) -> Vec { + self + } - fn from_bytes(bytes: &[u8]) -> Result { - Ok(bytes.to_vec()) - } + fn from_bytes(bytes: &[u8]) -> Result { + Ok(bytes.to_vec()) + } - fn request_id(&self) -> CustomMessageId { - CustomMessageId::OneWay - } + fn request_id(&self) -> CustomMessageId { + CustomMessageId::OneWay + } } impl CustomMessage for (Option, Vec) { - fn into_bytes(self) -> Vec { - use byteorder::WriteBytesExt; - use std::io::Write; - let mut out = Vec::new(); - out.write_u64::(self.0.unwrap_or(u64::max_value())) - .expect("Writing to a Vec can never fail"); - out.write_all(&self.1).expect("Writing to a Vec can never fail"); - out - } - - fn from_bytes(bytes: &[u8]) -> Result { - use byteorder::ReadBytesExt; - use std::io::Read; - let mut rdr = std::io::Cursor::new(bytes); - let id = rdr.read_u64::().map_err(|_| ())?; - let mut out = Vec::new(); - rdr.read_to_end(&mut out).map_err(|_| ())?; - let id = if id == u64::max_value() { - None - } else { - Some(id) - }; - Ok((id, out)) - } - - fn request_id(&self) -> CustomMessageId { - if let Some(id) = self.0 { - CustomMessageId::Request(id) - } else { - CustomMessageId::OneWay - } - } + fn into_bytes(self) -> Vec { + use byteorder::WriteBytesExt; + use std::io::Write; + let mut out = Vec::new(); + out.write_u64::(self.0.unwrap_or(u64::max_value())) + .expect("Writing to a Vec can never fail"); + out.write_all(&self.1) + .expect("Writing to a Vec can never fail"); + out + } + + fn from_bytes(bytes: &[u8]) -> Result { + use byteorder::ReadBytesExt; + use std::io::Read; + let mut rdr = std::io::Cursor::new(bytes); + let id = rdr.read_u64::().map_err(|_| ())?; + let mut out = Vec::new(); + rdr.read_to_end(&mut out).map_err(|_| ())?; + let id = if id == u64::max_value() { + None + } else { + Some(id) + }; + Ok((id, out)) + } + + fn request_id(&self) -> CustomMessageId { + if let Some(id) = self.0 { + CustomMessageId::Request(id) + } else { + CustomMessageId::OneWay + } + } } /// Event produced by the `RegisteredProtocolSubstream`. #[derive(Debug, Clone)] pub enum RegisteredProtocolEvent { - /// Received a message from the remote. - Message(TMessage), - - /// Diagnostic event indicating that the connection is clogged and we should avoid sending too - /// many messages to it. - Clogged { - /// Copy of the messages that are within the buffer, for further diagnostic. - messages: Vec, - }, + /// Received a message from the remote. + Message(TMessage), + + /// Diagnostic event indicating that the connection is clogged and we should avoid sending too + /// many messages to it. + Clogged { + /// Copy of the messages that are within the buffer, for further diagnostic. + messages: Vec, + }, } impl Stream for RegisteredProtocolSubstream -where TSubstream: AsyncRead + AsyncWrite, TMessage: CustomMessage { - type Item = RegisteredProtocolEvent; - type Error = io::Error; - - fn poll(&mut self) -> Poll, Self::Error> { - // Flushing the local queue. - while let Some(packet) = self.send_queue.pop_front() { - match self.inner.start_send(packet)? { - AsyncSink::NotReady(packet) => { - self.send_queue.push_front(packet); - break - }, - AsyncSink::Ready => self.requires_poll_complete = true, - } - } - - // If we are closing, close as soon as the Sink is closed. - if self.is_closing { - return Ok(self.inner.close()?.map(|()| None)) - } - - // Indicating that the remote is clogged if that's the case. - if self.send_queue.len() >= 2048 { - if !self.clogged_fuse { - // Note: this fuse is important not just for preventing us from flooding the logs; - // if you remove the fuse, then we will always return early from this function and - // thus never read any message from the network. - self.clogged_fuse = true; - return Ok(Async::Ready(Some(RegisteredProtocolEvent::Clogged { - messages: self.send_queue.iter() - .map(|m| CustomMessage::from_bytes(&m)) - .filter_map(Result::ok) - .collect(), - }))) - } - } else { - self.clogged_fuse = false; - } - - // Flushing if necessary. - if self.requires_poll_complete { - if let Async::Ready(()) = self.inner.poll_complete()? { - self.requires_poll_complete = false; - } - } - - // Receiving incoming packets. - // Note that `inner` is wrapped in a `Fuse`, therefore we can poll it forever. - match self.inner.poll()? { - Async::Ready(Some(data)) => { - let message = ::from_bytes(&data) +where + TSubstream: AsyncRead + AsyncWrite, + TMessage: CustomMessage, +{ + type Item = RegisteredProtocolEvent; + type Error = io::Error; + + fn poll(&mut self) -> Poll, Self::Error> { + // Flushing the local queue. + while let Some(packet) = self.send_queue.pop_front() { + match self.inner.start_send(packet)? { + AsyncSink::NotReady(packet) => { + self.send_queue.push_front(packet); + break; + } + AsyncSink::Ready => self.requires_poll_complete = true, + } + } + + // If we are closing, close as soon as the Sink is closed. + if self.is_closing { + return Ok(self.inner.close()?.map(|()| None)); + } + + // Indicating that the remote is clogged if that's the case. + if self.send_queue.len() >= 2048 { + if !self.clogged_fuse { + // Note: this fuse is important not just for preventing us from flooding the logs; + // if you remove the fuse, then we will always return early from this function and + // thus never read any message from the network. + self.clogged_fuse = true; + return Ok(Async::Ready(Some(RegisteredProtocolEvent::Clogged { + messages: self + .send_queue + .iter() + .map(|m| CustomMessage::from_bytes(&m)) + .filter_map(Result::ok) + .collect(), + }))); + } + } else { + self.clogged_fuse = false; + } + + // Flushing if necessary. + if self.requires_poll_complete { + if let Async::Ready(()) = self.inner.poll_complete()? { + self.requires_poll_complete = false; + } + } + + // Receiving incoming packets. + // Note that `inner` is wrapped in a `Fuse`, therefore we can poll it forever. + match self.inner.poll()? { + Async::Ready(Some(data)) => { + let message = ::from_bytes(&data) .map_err(|()| { warn!(target: "sub-libp2p", "Couldn't decode packet sent by the remote: {:?}", data); io::ErrorKind::InvalidData })?; - Ok(Async::Ready(Some(RegisteredProtocolEvent::Message(message)))) - } - Async::Ready(None) => - if !self.requires_poll_complete && self.send_queue.is_empty() { - Ok(Async::Ready(None)) - } else { - Ok(Async::NotReady) - } - Async::NotReady => Ok(Async::NotReady), - } - } + Ok(Async::Ready(Some(RegisteredProtocolEvent::Message( + message, + )))) + } + Async::Ready(None) => { + if !self.requires_poll_complete && self.send_queue.is_empty() { + Ok(Async::Ready(None)) + } else { + Ok(Async::NotReady) + } + } + Async::NotReady => Ok(Async::NotReady), + } + } } impl UpgradeInfo for RegisteredProtocol { - type Info = RegisteredProtocolName; - type InfoIter = VecIntoIter; - - #[inline] - fn protocol_info(&self) -> Self::InfoIter { - // Report each version as an individual protocol. - self.supported_versions.iter().flat_map(|&version| { - let num = version.to_string(); - - // Note that `name1` is the multiplex version, as we priviledge it over the old one. - let mut name1 = self.base_name.clone(); - name1.extend_from_slice(b"multi/"); - name1.extend_from_slice(num.as_bytes()); - let proto1 = RegisteredProtocolName { - name: name1, - version, - is_multiplex: true, - }; - - let mut name2 = self.base_name.clone(); - name2.extend_from_slice(num.as_bytes()); - let proto2 = RegisteredProtocolName { - name: name2, - version, - is_multiplex: false, - }; - - // Important note: we prioritize the backwards compatible mode for now. - // After some intensive testing has been done, we should switch to the new mode by - // default. - // Then finally we can remove the old mode after everyone has switched. - // See https://github.com/paritytech/substrate/issues/1692 - iter::once(proto2).chain(iter::once(proto1)) - }).collect::>().into_iter() - } + type Info = RegisteredProtocolName; + type InfoIter = VecIntoIter; + + #[inline] + fn protocol_info(&self) -> Self::InfoIter { + // Report each version as an individual protocol. + self.supported_versions + .iter() + .flat_map(|&version| { + let num = version.to_string(); + + // Note that `name1` is the multiplex version, as we priviledge it over the old one. + let mut name1 = self.base_name.clone(); + name1.extend_from_slice(b"multi/"); + name1.extend_from_slice(num.as_bytes()); + let proto1 = RegisteredProtocolName { + name: name1, + version, + is_multiplex: true, + }; + + let mut name2 = self.base_name.clone(); + name2.extend_from_slice(num.as_bytes()); + let proto2 = RegisteredProtocolName { + name: name2, + version, + is_multiplex: false, + }; + + // Important note: we prioritize the backwards compatible mode for now. + // After some intensive testing has been done, we should switch to the new mode by + // default. + // Then finally we can remove the old mode after everyone has switched. + // See https://github.com/paritytech/substrate/issues/1692 + iter::once(proto2).chain(iter::once(proto1)) + }) + .collect::>() + .into_iter() + } } /// Implementation of `ProtocolName` for a custom protocol. #[derive(Debug, Clone)] pub struct RegisteredProtocolName { - /// Protocol name, as advertised on the wire. - name: Bytes, - /// Version number. Stored in string form in `name`, but duplicated here for easier retrieval. - version: u8, - /// If true, then this version is the one with the multiplexing. - is_multiplex: bool, + /// Protocol name, as advertised on the wire. + name: Bytes, + /// Version number. Stored in string form in `name`, but duplicated here for easier retrieval. + version: u8, + /// If true, then this version is the one with the multiplexing. + is_multiplex: bool, } impl ProtocolName for RegisteredProtocolName { - fn protocol_name(&self) -> &[u8] { - &self.name - } + fn protocol_name(&self) -> &[u8] { + &self.name + } } impl InboundUpgrade for RegisteredProtocol -where TSubstream: AsyncRead + AsyncWrite, +where + TSubstream: AsyncRead + AsyncWrite, { - type Output = RegisteredProtocolSubstream; - type Future = future::FutureResult; - type Error = io::Error; - - fn upgrade_inbound( - self, - socket: Negotiated, - info: Self::Info, - ) -> Self::Future { - let framed = { - let mut codec = UviBytes::default(); - codec.set_max_len(16 * 1024 * 1024); // 16 MiB hard limit for packets. - Framed::new(socket, codec) - }; - - future::ok(RegisteredProtocolSubstream { - is_closing: false, - endpoint: Endpoint::Listener, - send_queue: VecDeque::new(), - requires_poll_complete: false, - inner: framed.fuse(), - protocol_id: self.id, - protocol_version: info.version, - clogged_fuse: false, - is_multiplex: info.is_multiplex, - marker: PhantomData, - }) - } + type Output = RegisteredProtocolSubstream; + type Future = future::FutureResult; + type Error = io::Error; + + fn upgrade_inbound(self, socket: Negotiated, info: Self::Info) -> Self::Future { + let framed = { + let mut codec = UviBytes::default(); + codec.set_max_len(16 * 1024 * 1024); // 16 MiB hard limit for packets. + Framed::new(socket, codec) + }; + + future::ok(RegisteredProtocolSubstream { + is_closing: false, + endpoint: Endpoint::Listener, + send_queue: VecDeque::new(), + requires_poll_complete: false, + inner: framed.fuse(), + protocol_id: self.id, + protocol_version: info.version, + clogged_fuse: false, + is_multiplex: info.is_multiplex, + marker: PhantomData, + }) + } } impl OutboundUpgrade for RegisteredProtocol -where TSubstream: AsyncRead + AsyncWrite, +where + TSubstream: AsyncRead + AsyncWrite, { - type Output = >::Output; - type Future = >::Future; - type Error = >::Error; - - fn upgrade_outbound( - self, - socket: Negotiated, - info: Self::Info, - ) -> Self::Future { - let framed = Framed::new(socket, UviBytes::default()); - - future::ok(RegisteredProtocolSubstream { - is_closing: false, - endpoint: Endpoint::Dialer, - send_queue: VecDeque::new(), - requires_poll_complete: false, - inner: framed.fuse(), - protocol_id: self.id, - protocol_version: info.version, - clogged_fuse: false, - is_multiplex: info.is_multiplex, - marker: PhantomData, - }) - } + type Output = >::Output; + type Future = >::Future; + type Error = >::Error; + + fn upgrade_outbound(self, socket: Negotiated, info: Self::Info) -> Self::Future { + let framed = Framed::new(socket, UviBytes::default()); + + future::ok(RegisteredProtocolSubstream { + is_closing: false, + endpoint: Endpoint::Dialer, + send_queue: VecDeque::new(), + requires_poll_complete: false, + inner: framed.fuse(), + protocol_id: self.id, + protocol_version: info.version, + clogged_fuse: false, + is_multiplex: info.is_multiplex, + marker: PhantomData, + }) + } } diff --git a/core/network-libp2p/src/lib.rs b/core/network-libp2p/src/lib.rs index 5b73db636b..53d0fcda98 100644 --- a/core/network-libp2p/src/lib.rs +++ b/core/network-libp2p/src/lib.rs @@ -24,41 +24,46 @@ mod transport; pub use crate::behaviour::Severity; pub use crate::config::*; +pub use crate::config::{NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, Secret}; pub use crate::custom_proto::{CustomMessage, CustomMessageId, RegisteredProtocol}; -pub use crate::config::{NetworkConfiguration, NodeKeyConfig, Secret, NonReservedPeerMode}; pub use crate::service_task::{start_service, Service, ServiceEvent}; -pub use libp2p::{Multiaddr, multiaddr, build_multiaddr}; -pub use libp2p::{identity, PeerId, core::PublicKey}; +pub use libp2p::{build_multiaddr, multiaddr, Multiaddr}; +pub use libp2p::{core::PublicKey, identity, PeerId}; use libp2p::core::nodes::ConnectedPoint; use serde_derive::Serialize; -use std::{collections::{HashMap, HashSet}, error, fmt, time::Duration}; +use std::{ + collections::{HashMap, HashSet}, + error, fmt, + time::Duration, +}; /// Protocol / handler id pub type ProtocolId = [u8; 3]; /// Parses a string address and returns the component, if valid. pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), ParseErr> { - let mut addr: Multiaddr = addr_str.parse()?; + let mut addr: Multiaddr = addr_str.parse()?; - let who = match addr.pop() { - Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key) - .map_err(|_| ParseErr::InvalidPeerId)?, - _ => return Err(ParseErr::PeerIdMissing), - }; + let who = match addr.pop() { + Some(multiaddr::Protocol::P2p(key)) => { + PeerId::from_multihash(key).map_err(|_| ParseErr::InvalidPeerId)? + } + _ => return Err(ParseErr::PeerIdMissing), + }; - Ok((who, addr)) + Ok((who, addr)) } /// Error that can be generated by `parse_str_addr`. #[derive(Debug)] pub enum ParseErr { - /// Error while parsing the multiaddress. - MultiaddrParse(multiaddr::Error), - /// Multihash of the peer ID is invalid. - InvalidPeerId, - /// The peer ID is missing from the address. - PeerIdMissing, + /// Error while parsing the multiaddress. + MultiaddrParse(multiaddr::Error), + /// Multihash of the peer ID is invalid. + InvalidPeerId, + /// The peer ID is missing from the address. + PeerIdMissing, } impl fmt::Display for ParseErr { @@ -82,9 +87,9 @@ impl error::Error for ParseErr { } impl From for ParseErr { - fn from(err: multiaddr::Error) -> ParseErr { - ParseErr::MultiaddrParse(err) - } + fn from(err: multiaddr::Error) -> ParseErr { + ParseErr::MultiaddrParse(err) + } } /// Returns general information about the networking. @@ -95,74 +100,75 @@ impl From for ParseErr { #[derive(Debug, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub struct NetworkState { - /// PeerId of the local node. - pub peer_id: String, - /// List of addresses the node is currently listening on. - pub listened_addresses: HashSet, - /// List of addresses the node knows it can be reached as. - pub external_addresses: HashSet, - /// List of node we're connected to. - pub connected_peers: HashMap, - /// List of node that we know of but that we're not connected to. - pub not_connected_peers: HashMap, - /// Downloaded bytes per second averaged over the past few seconds. - pub average_download_per_sec: u64, - /// Uploaded bytes per second averaged over the past few seconds. - pub average_upload_per_sec: u64, - /// State of the peerset manager. - pub peerset: serde_json::Value, + /// PeerId of the local node. + pub peer_id: String, + /// List of addresses the node is currently listening on. + pub listened_addresses: HashSet, + /// List of addresses the node knows it can be reached as. + pub external_addresses: HashSet, + /// List of node we're connected to. + pub connected_peers: HashMap, + /// List of node that we know of but that we're not connected to. + pub not_connected_peers: HashMap, + /// Downloaded bytes per second averaged over the past few seconds. + pub average_download_per_sec: u64, + /// Uploaded bytes per second averaged over the past few seconds. + pub average_upload_per_sec: u64, + /// State of the peerset manager. + pub peerset: serde_json::Value, } #[derive(Debug, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub struct NetworkStatePeer { - /// How we are connected to the node. - pub endpoint: NetworkStatePeerEndpoint, - /// Node information, as provided by the node itself. Can be empty if not known yet. - pub version_string: Option, - /// Latest ping duration with this node. - pub latest_ping_time: Option, - /// If true, the peer is "enabled", which means that we try to open Substrate-related protocols - /// with this peer. If false, we stick to Kademlia and/or other network-only protocols. - pub enabled: bool, - /// If true, the peer is "open", which means that we have a Substrate-related protocol - /// with this peer. - pub open: bool, - /// List of addresses known for this node. - pub known_addresses: HashSet, + /// How we are connected to the node. + pub endpoint: NetworkStatePeerEndpoint, + /// Node information, as provided by the node itself. Can be empty if not known yet. + pub version_string: Option, + /// Latest ping duration with this node. + pub latest_ping_time: Option, + /// If true, the peer is "enabled", which means that we try to open Substrate-related protocols + /// with this peer. If false, we stick to Kademlia and/or other network-only protocols. + pub enabled: bool, + /// If true, the peer is "open", which means that we have a Substrate-related protocol + /// with this peer. + pub open: bool, + /// List of addresses known for this node. + pub known_addresses: HashSet, } #[derive(Debug, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub struct NetworkStateNotConnectedPeer { - /// List of addresses known for this node. - pub known_addresses: HashSet, + /// List of addresses known for this node. + pub known_addresses: HashSet, } #[derive(Debug, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub enum NetworkStatePeerEndpoint { - /// We are dialing the given address. - Dialing(Multiaddr), - /// We are listening. - Listening { - /// Address we're listening on that received the connection. - listen_addr: Multiaddr, - /// Address data is sent back to. - send_back_addr: Multiaddr, - }, + /// We are dialing the given address. + Dialing(Multiaddr), + /// We are listening. + Listening { + /// Address we're listening on that received the connection. + listen_addr: Multiaddr, + /// Address data is sent back to. + send_back_addr: Multiaddr, + }, } impl From for NetworkStatePeerEndpoint { - fn from(endpoint: ConnectedPoint) -> Self { - match endpoint { - ConnectedPoint::Dialer { address } => - NetworkStatePeerEndpoint::Dialing(address), - ConnectedPoint::Listener { listen_addr, send_back_addr } => - NetworkStatePeerEndpoint::Listening { - listen_addr: listen_addr, - send_back_addr: send_back_addr - } - } - } + fn from(endpoint: ConnectedPoint) -> Self { + match endpoint { + ConnectedPoint::Dialer { address } => NetworkStatePeerEndpoint::Dialing(address), + ConnectedPoint::Listener { + listen_addr, + send_back_addr, + } => NetworkStatePeerEndpoint::Listening { + listen_addr: listen_addr, + send_back_addr: send_back_addr, + }, + } + } } diff --git a/core/network-libp2p/src/service_task.rs b/core/network-libp2p/src/service_task.rs index 37c4b05eaa..a470cbbfe3 100644 --- a/core/network-libp2p/src/service_task.rs +++ b/core/network-libp2p/src/service_task.rs @@ -14,17 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use crate::custom_proto::{CustomMessage, RegisteredProtocol}; use crate::{ - behaviour::Behaviour, behaviour::BehaviourOut, - transport, NetworkState, NetworkStatePeer, NetworkStateNotConnectedPeer + behaviour::Behaviour, behaviour::BehaviourOut, transport, NetworkState, + NetworkStateNotConnectedPeer, NetworkStatePeer, }; -use crate::custom_proto::{CustomMessage, RegisteredProtocol}; -use crate::{NetworkConfiguration, NonReservedPeerMode, parse_str_addr}; +use crate::{parse_str_addr, NetworkConfiguration, NonReservedPeerMode}; use fnv::FnvHashMap; use futures::{prelude::*, Stream}; -use libp2p::{multiaddr::Protocol, Multiaddr, core::swarm::NetworkBehaviour, PeerId}; -use libp2p::core::{Swarm, nodes::Substream, transport::boxed::Boxed, muxing::StreamMuxerBox}; use libp2p::core::nodes::ConnectedPoint; +use libp2p::core::{muxing::StreamMuxerBox, nodes::Substream, transport::boxed::Boxed, Swarm}; +use libp2p::{core::swarm::NetworkBehaviour, multiaddr::Protocol, Multiaddr, PeerId}; use log::{debug, info, warn}; use std::fs; use std::io::Error as IoError; @@ -36,356 +36,406 @@ use std::time::Duration; /// /// Returns a stream that must be polled regularly in order for the networking to function. pub fn start_service( - config: NetworkConfiguration, - registered_custom: RegisteredProtocol, + config: NetworkConfiguration, + registered_custom: RegisteredProtocol, ) -> Result<(Service, Arc), IoError> -where TMessage: CustomMessage + Send + 'static { - - if let Some(ref path) = config.net_config_path { - fs::create_dir_all(Path::new(path))?; - } - - // List of multiaddresses that we know in the network. - let mut known_addresses = Vec::new(); - let mut bootnodes = Vec::new(); - let mut reserved_nodes = Vec::new(); - - // Process the bootnodes. - for bootnode in config.boot_nodes.iter() { - match parse_str_addr(bootnode) { - Ok((peer_id, addr)) => { - bootnodes.push(peer_id.clone()); - known_addresses.push((peer_id, addr)); - }, - Err(_) => warn!(target: "sub-libp2p", "Not a valid bootnode address: {}", bootnode), - } - } - - // Initialize the reserved peers. - for reserved in config.reserved_nodes.iter() { - if let Ok((peer_id, addr)) = parse_str_addr(reserved) { - reserved_nodes.push(peer_id.clone()); - known_addresses.push((peer_id, addr)); - } else { - warn!(target: "sub-libp2p", "Not a valid reserved node address: {}", reserved); - } - } - - // Build the peerset. - let (peerset, peerset_receiver) = substrate_peerset::Peerset::from_config(substrate_peerset::PeersetConfig { - in_peers: config.in_peers, - out_peers: config.out_peers, - bootnodes, - reserved_only: config.non_reserved_mode == NonReservedPeerMode::Deny, - reserved_nodes, - }); - - // Private and public keys configuration. - let local_identity = config.node_key.clone().into_keypair()?; - let local_public = local_identity.public(); - let local_peer_id = local_public.clone().into_peer_id(); - - // Build the swarm. - let (mut swarm, bandwidth) = { - let user_agent = format!("{} ({})", config.client_version, config.node_name); - let behaviour = Behaviour::new(user_agent, local_public, registered_custom, known_addresses, peerset_receiver, config.enable_mdns); - let (transport, bandwidth) = transport::build_transport(local_identity); - (Swarm::new(transport, behaviour, local_peer_id.clone()), bandwidth) - }; - - // Listen on multiaddresses. - for addr in &config.listen_addresses { - match Swarm::listen_on(&mut swarm, addr.clone()) { - Ok(mut new_addr) => { - new_addr.append(Protocol::P2p(local_peer_id.clone().into())); - info!(target: "sub-libp2p", "Local node address is: {}", new_addr); - }, - Err(err) => warn!(target: "sub-libp2p", "Can't listen on {} because: {:?}", addr, err) - } - } - - // Add external addresses. - for addr in &config.public_addresses { - Swarm::add_external_address(&mut swarm, addr.clone()); - } - - let service = Service { - swarm, - bandwidth, - nodes_info: Default::default(), - injected_events: Vec::new(), - }; - - Ok((service, peerset)) +where + TMessage: CustomMessage + Send + 'static, +{ + if let Some(ref path) = config.net_config_path { + fs::create_dir_all(Path::new(path))?; + } + + // List of multiaddresses that we know in the network. + let mut known_addresses = Vec::new(); + let mut bootnodes = Vec::new(); + let mut reserved_nodes = Vec::new(); + + // Process the bootnodes. + for bootnode in config.boot_nodes.iter() { + match parse_str_addr(bootnode) { + Ok((peer_id, addr)) => { + bootnodes.push(peer_id.clone()); + known_addresses.push((peer_id, addr)); + } + Err(_) => warn!(target: "sub-libp2p", "Not a valid bootnode address: {}", bootnode), + } + } + + // Initialize the reserved peers. + for reserved in config.reserved_nodes.iter() { + if let Ok((peer_id, addr)) = parse_str_addr(reserved) { + reserved_nodes.push(peer_id.clone()); + known_addresses.push((peer_id, addr)); + } else { + warn!(target: "sub-libp2p", "Not a valid reserved node address: {}", reserved); + } + } + + // Build the peerset. + let (peerset, peerset_receiver) = + substrate_peerset::Peerset::from_config(substrate_peerset::PeersetConfig { + in_peers: config.in_peers, + out_peers: config.out_peers, + bootnodes, + reserved_only: config.non_reserved_mode == NonReservedPeerMode::Deny, + reserved_nodes, + }); + + // Private and public keys configuration. + let local_identity = config.node_key.clone().into_keypair()?; + let local_public = local_identity.public(); + let local_peer_id = local_public.clone().into_peer_id(); + + // Build the swarm. + let (mut swarm, bandwidth) = { + let user_agent = format!("{} ({})", config.client_version, config.node_name); + let behaviour = Behaviour::new( + user_agent, + local_public, + registered_custom, + known_addresses, + peerset_receiver, + config.enable_mdns, + ); + let (transport, bandwidth) = transport::build_transport(local_identity); + ( + Swarm::new(transport, behaviour, local_peer_id.clone()), + bandwidth, + ) + }; + + // Listen on multiaddresses. + for addr in &config.listen_addresses { + match Swarm::listen_on(&mut swarm, addr.clone()) { + Ok(mut new_addr) => { + new_addr.append(Protocol::P2p(local_peer_id.clone().into())); + info!(target: "sub-libp2p", "Local node address is: {}", new_addr); + } + Err(err) => warn!(target: "sub-libp2p", "Can't listen on {} because: {:?}", addr, err), + } + } + + // Add external addresses. + for addr in &config.public_addresses { + Swarm::add_external_address(&mut swarm, addr.clone()); + } + + let service = Service { + swarm, + bandwidth, + nodes_info: Default::default(), + injected_events: Vec::new(), + }; + + Ok((service, peerset)) } /// Event produced by the service. #[derive(Debug)] pub enum ServiceEvent { - /// A custom protocol substream has been opened with a node. - OpenedCustomProtocol { - /// Identity of the node. - peer_id: PeerId, - /// Version of the protocol that was opened. - version: u8, - /// Node debug info - debug_info: String, - }, - - /// A custom protocol substream has been closed. - ClosedCustomProtocol { - /// Identity of the node. - peer_id: PeerId, - /// Node debug info - debug_info: String, - }, - - /// Receives a message on a custom protocol stream. - CustomMessage { - /// Identity of the node. - peer_id: PeerId, - /// Message that has been received. - message: TMessage, - }, - - /// The substream with a node is clogged. We should avoid sending data to it if possible. - Clogged { - /// Index of the node. - peer_id: PeerId, - /// Copy of the messages that are within the buffer, for further diagnostic. - messages: Vec, - }, + /// A custom protocol substream has been opened with a node. + OpenedCustomProtocol { + /// Identity of the node. + peer_id: PeerId, + /// Version of the protocol that was opened. + version: u8, + /// Node debug info + debug_info: String, + }, + + /// A custom protocol substream has been closed. + ClosedCustomProtocol { + /// Identity of the node. + peer_id: PeerId, + /// Node debug info + debug_info: String, + }, + + /// Receives a message on a custom protocol stream. + CustomMessage { + /// Identity of the node. + peer_id: PeerId, + /// Message that has been received. + message: TMessage, + }, + + /// The substream with a node is clogged. We should avoid sending data to it if possible. + Clogged { + /// Index of the node. + peer_id: PeerId, + /// Copy of the messages that are within the buffer, for further diagnostic. + messages: Vec, + }, } /// Network service. Must be polled regularly in order for the networking to work. -pub struct Service where TMessage: CustomMessage { - /// Stream of events of the swarm. - swarm: Swarm, Behaviour>>, - - /// Bandwidth logging system. Can be queried to know the average bandwidth consumed. - bandwidth: Arc, - - /// Information about all the nodes we're connected to. - nodes_info: FnvHashMap, - - /// Events to produce on the Stream. - injected_events: Vec>, +pub struct Service +where + TMessage: CustomMessage, +{ + /// Stream of events of the swarm. + swarm: Swarm< + Boxed<(PeerId, StreamMuxerBox), IoError>, + Behaviour>, + >, + + /// Bandwidth logging system. Can be queried to know the average bandwidth consumed. + bandwidth: Arc, + + /// Information about all the nodes we're connected to. + nodes_info: FnvHashMap, + + /// Events to produce on the Stream. + injected_events: Vec>, } /// Information about a node we're connected to. #[derive(Debug)] struct NodeInfo { - /// How we're connected to the node. - endpoint: ConnectedPoint, - /// Version reported by the remote, or `None` if unknown. - client_version: Option, - /// Latest ping time with this node. - latest_ping: Option, + /// How we're connected to the node. + endpoint: ConnectedPoint, + /// Version reported by the remote, or `None` if unknown. + client_version: Option, + /// Latest ping time with this node. + latest_ping: Option, } impl Service -where TMessage: CustomMessage + Send + 'static { - /// Returns a struct containing tons of useful information about the network. - pub fn state(&mut self) -> NetworkState { - let connected_peers = { - let swarm = &mut self.swarm; - self.nodes_info.iter().map(move |(peer_id, info)| { - let known_addresses = NetworkBehaviour::addresses_of_peer(&mut **swarm, peer_id) - .into_iter().collect(); - - (peer_id.to_base58(), NetworkStatePeer { - endpoint: info.endpoint.clone().into(), - version_string: info.client_version.clone(), - latest_ping_time: info.latest_ping, - enabled: swarm.is_enabled(&peer_id), - open: swarm.is_open(&peer_id), - known_addresses, - }) - }).collect() - }; - - let not_connected_peers = { - let swarm = &mut self.swarm; - let nodes_info = &self.nodes_info; - let list = swarm.known_peers().filter(|p| !nodes_info.contains_key(p)) - .cloned().collect::>(); - list.into_iter().map(move |peer_id| { - (peer_id.to_base58(), NetworkStateNotConnectedPeer { - known_addresses: NetworkBehaviour::addresses_of_peer(&mut **swarm, &peer_id) - .into_iter().collect(), - }) - }).collect() - }; - - NetworkState { - peer_id: Swarm::local_peer_id(&self.swarm).to_base58(), - listened_addresses: Swarm::listeners(&self.swarm).cloned().collect(), - external_addresses: Swarm::external_addresses(&self.swarm).cloned().collect(), - average_download_per_sec: self.bandwidth.average_download_per_sec(), - average_upload_per_sec: self.bandwidth.average_upload_per_sec(), - connected_peers, - not_connected_peers, - peerset: self.swarm.peerset_debug_info(), - } - } - - /// Returns an iterator that produces the list of addresses we're listening on. - #[inline] - pub fn listeners(&self) -> impl Iterator { - Swarm::listeners(&self.swarm) - } - - /// Returns the downloaded bytes per second averaged over the past few seconds. - #[inline] - pub fn average_download_per_sec(&self) -> u64 { - self.bandwidth.average_download_per_sec() - } - - /// Returns the uploaded bytes per second averaged over the past few seconds. - #[inline] - pub fn average_upload_per_sec(&self) -> u64 { - self.bandwidth.average_upload_per_sec() - } - - /// Returns the peer id of the local node. - #[inline] - pub fn peer_id(&self) -> &PeerId { - Swarm::local_peer_id(&self.swarm) - } - - /// Returns the list of all the peers we are connected to. - #[inline] - pub fn connected_peers<'a>(&'a self) -> impl Iterator + 'a { - self.nodes_info.keys() - } - - /// Returns the way we are connected to a node. - #[inline] - pub fn node_endpoint(&self, peer_id: &PeerId) -> Option<&ConnectedPoint> { - self.nodes_info.get(peer_id).map(|info| &info.endpoint) - } - - /// Returns the client version reported by a node. - pub fn node_client_version(&self, peer_id: &PeerId) -> Option<&str> { - self.nodes_info.get(peer_id) - .and_then(|info| info.client_version.as_ref().map(|s| &s[..])) - } - - /// Sends a message to a peer using the custom protocol. - /// - /// Has no effect if the connection to the node has been closed, or if the node index is - /// invalid. - pub fn send_custom_message( - &mut self, - peer_id: &PeerId, - message: TMessage - ) { - self.swarm.send_custom_message(peer_id, message); - } - - /// Disconnects a peer. - /// - /// This is asynchronous and will not immediately close the peer. - /// Corresponding closing events will be generated once the closing actually happens. - pub fn drop_node(&mut self, peer_id: &PeerId) { - if let Some(info) = self.nodes_info.get(peer_id) { - debug!(target: "sub-libp2p", "Dropping {:?} on purpose ({:?}, {:?})", +where + TMessage: CustomMessage + Send + 'static, +{ + /// Returns a struct containing tons of useful information about the network. + pub fn state(&mut self) -> NetworkState { + let connected_peers = { + let swarm = &mut self.swarm; + self.nodes_info + .iter() + .map(move |(peer_id, info)| { + let known_addresses = + NetworkBehaviour::addresses_of_peer(&mut **swarm, peer_id) + .into_iter() + .collect(); + + ( + peer_id.to_base58(), + NetworkStatePeer { + endpoint: info.endpoint.clone().into(), + version_string: info.client_version.clone(), + latest_ping_time: info.latest_ping, + enabled: swarm.is_enabled(&peer_id), + open: swarm.is_open(&peer_id), + known_addresses, + }, + ) + }) + .collect() + }; + + let not_connected_peers = { + let swarm = &mut self.swarm; + let nodes_info = &self.nodes_info; + let list = swarm + .known_peers() + .filter(|p| !nodes_info.contains_key(p)) + .cloned() + .collect::>(); + list.into_iter() + .map(move |peer_id| { + ( + peer_id.to_base58(), + NetworkStateNotConnectedPeer { + known_addresses: NetworkBehaviour::addresses_of_peer( + &mut **swarm, + &peer_id, + ) + .into_iter() + .collect(), + }, + ) + }) + .collect() + }; + + NetworkState { + peer_id: Swarm::local_peer_id(&self.swarm).to_base58(), + listened_addresses: Swarm::listeners(&self.swarm).cloned().collect(), + external_addresses: Swarm::external_addresses(&self.swarm).cloned().collect(), + average_download_per_sec: self.bandwidth.average_download_per_sec(), + average_upload_per_sec: self.bandwidth.average_upload_per_sec(), + connected_peers, + not_connected_peers, + peerset: self.swarm.peerset_debug_info(), + } + } + + /// Returns an iterator that produces the list of addresses we're listening on. + #[inline] + pub fn listeners(&self) -> impl Iterator { + Swarm::listeners(&self.swarm) + } + + /// Returns the downloaded bytes per second averaged over the past few seconds. + #[inline] + pub fn average_download_per_sec(&self) -> u64 { + self.bandwidth.average_download_per_sec() + } + + /// Returns the uploaded bytes per second averaged over the past few seconds. + #[inline] + pub fn average_upload_per_sec(&self) -> u64 { + self.bandwidth.average_upload_per_sec() + } + + /// Returns the peer id of the local node. + #[inline] + pub fn peer_id(&self) -> &PeerId { + Swarm::local_peer_id(&self.swarm) + } + + /// Returns the list of all the peers we are connected to. + #[inline] + pub fn connected_peers<'a>(&'a self) -> impl Iterator + 'a { + self.nodes_info.keys() + } + + /// Returns the way we are connected to a node. + #[inline] + pub fn node_endpoint(&self, peer_id: &PeerId) -> Option<&ConnectedPoint> { + self.nodes_info.get(peer_id).map(|info| &info.endpoint) + } + + /// Returns the client version reported by a node. + pub fn node_client_version(&self, peer_id: &PeerId) -> Option<&str> { + self.nodes_info + .get(peer_id) + .and_then(|info| info.client_version.as_ref().map(|s| &s[..])) + } + + /// Sends a message to a peer using the custom protocol. + /// + /// Has no effect if the connection to the node has been closed, or if the node index is + /// invalid. + pub fn send_custom_message(&mut self, peer_id: &PeerId, message: TMessage) { + self.swarm.send_custom_message(peer_id, message); + } + + /// Disconnects a peer. + /// + /// This is asynchronous and will not immediately close the peer. + /// Corresponding closing events will be generated once the closing actually happens. + pub fn drop_node(&mut self, peer_id: &PeerId) { + if let Some(info) = self.nodes_info.get(peer_id) { + debug!(target: "sub-libp2p", "Dropping {:?} on purpose ({:?}, {:?})", peer_id, info.endpoint, info.client_version); - self.swarm.drop_node(peer_id); - } - } - - /// Adds a hard-coded address for the given peer, that never expires. - pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { - self.swarm.add_known_address(peer_id, addr) - } - - /// Get debug info for a given peer. - pub fn peer_debug_info(&self, who: &PeerId) -> String { - if let Some(info) = self.nodes_info.get(who) { - format!("{:?} (version: {:?}) through {:?}", who, info.client_version, info.endpoint) - } else { - "unknown".to_string() - } - } - - /// Polls for what happened on the network. - fn poll_swarm(&mut self) -> Poll>, IoError> { - loop { - match self.swarm.poll() { - Ok(Async::Ready(Some(BehaviourOut::CustomProtocolOpen { peer_id, version, endpoint }))) => { - self.nodes_info.insert(peer_id.clone(), NodeInfo { - endpoint, - client_version: None, - latest_ping: None, - }); - let debug_info = self.peer_debug_info(&peer_id); - break Ok(Async::Ready(Some(ServiceEvent::OpenedCustomProtocol { - peer_id, - version, - debug_info, - }))) - } - Ok(Async::Ready(Some(BehaviourOut::CustomProtocolClosed { peer_id, .. }))) => { - let debug_info = self.peer_debug_info(&peer_id); - self.nodes_info.remove(&peer_id); - break Ok(Async::Ready(Some(ServiceEvent::ClosedCustomProtocol { - peer_id, - debug_info, - }))) - } - Ok(Async::Ready(Some(BehaviourOut::CustomMessage { peer_id, message }))) => { - break Ok(Async::Ready(Some(ServiceEvent::CustomMessage { - peer_id, - message, - }))) - } - Ok(Async::Ready(Some(BehaviourOut::Clogged { peer_id, messages }))) => { - break Ok(Async::Ready(Some(ServiceEvent::Clogged { - peer_id, - messages, - }))) - } - Ok(Async::Ready(Some(BehaviourOut::Identified { peer_id, info }))) => { - // Contrary to the other events, this one can happen even on nodes which don't - // have any open custom protocol slot. Therefore it is not necessarily in the - // list. - if let Some(n) = self.nodes_info.get_mut(&peer_id) { - n.client_version = Some(info.agent_version); - } - } - Ok(Async::Ready(Some(BehaviourOut::PingSuccess { peer_id, ping_time }))) => { - // Contrary to the other events, this one can happen even on nodes which don't - // have any open custom protocol slot. Therefore it is not necessarily in the - // list. - if let Some(n) = self.nodes_info.get_mut(&peer_id) { - n.latest_ping = Some(ping_time); - } - } - Ok(Async::NotReady) => break Ok(Async::NotReady), - Ok(Async::Ready(None)) => unreachable!("The Swarm stream never ends"), - Err(_) => unreachable!("The Swarm never errors"), - } - } - } + self.swarm.drop_node(peer_id); + } + } + + /// Adds a hard-coded address for the given peer, that never expires. + pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { + self.swarm.add_known_address(peer_id, addr) + } + + /// Get debug info for a given peer. + pub fn peer_debug_info(&self, who: &PeerId) -> String { + if let Some(info) = self.nodes_info.get(who) { + format!( + "{:?} (version: {:?}) through {:?}", + who, info.client_version, info.endpoint + ) + } else { + "unknown".to_string() + } + } + + /// Polls for what happened on the network. + fn poll_swarm(&mut self) -> Poll>, IoError> { + loop { + match self.swarm.poll() { + Ok(Async::Ready(Some(BehaviourOut::CustomProtocolOpen { + peer_id, + version, + endpoint, + }))) => { + self.nodes_info.insert( + peer_id.clone(), + NodeInfo { + endpoint, + client_version: None, + latest_ping: None, + }, + ); + let debug_info = self.peer_debug_info(&peer_id); + break Ok(Async::Ready(Some(ServiceEvent::OpenedCustomProtocol { + peer_id, + version, + debug_info, + }))); + } + Ok(Async::Ready(Some(BehaviourOut::CustomProtocolClosed { peer_id, .. }))) => { + let debug_info = self.peer_debug_info(&peer_id); + self.nodes_info.remove(&peer_id); + break Ok(Async::Ready(Some(ServiceEvent::ClosedCustomProtocol { + peer_id, + debug_info, + }))); + } + Ok(Async::Ready(Some(BehaviourOut::CustomMessage { peer_id, message }))) => { + break Ok(Async::Ready(Some(ServiceEvent::CustomMessage { + peer_id, + message, + }))); + } + Ok(Async::Ready(Some(BehaviourOut::Clogged { peer_id, messages }))) => { + break Ok(Async::Ready(Some(ServiceEvent::Clogged { + peer_id, + messages, + }))); + } + Ok(Async::Ready(Some(BehaviourOut::Identified { peer_id, info }))) => { + // Contrary to the other events, this one can happen even on nodes which don't + // have any open custom protocol slot. Therefore it is not necessarily in the + // list. + if let Some(n) = self.nodes_info.get_mut(&peer_id) { + n.client_version = Some(info.agent_version); + } + } + Ok(Async::Ready(Some(BehaviourOut::PingSuccess { peer_id, ping_time }))) => { + // Contrary to the other events, this one can happen even on nodes which don't + // have any open custom protocol slot. Therefore it is not necessarily in the + // list. + if let Some(n) = self.nodes_info.get_mut(&peer_id) { + n.latest_ping = Some(ping_time); + } + } + Ok(Async::NotReady) => break Ok(Async::NotReady), + Ok(Async::Ready(None)) => unreachable!("The Swarm stream never ends"), + Err(_) => unreachable!("The Swarm never errors"), + } + } + } } -impl Stream for Service where TMessage: CustomMessage + Send + 'static { - type Item = ServiceEvent; - type Error = IoError; - - fn poll(&mut self) -> Poll, Self::Error> { - if !self.injected_events.is_empty() { - return Ok(Async::Ready(Some(self.injected_events.remove(0)))); - } - - match self.poll_swarm()? { - Async::Ready(value) => return Ok(Async::Ready(value)), - Async::NotReady => (), - } - - // The only way we reach this is if we went through all the `NotReady` paths above, - // ensuring the current task is registered everywhere. - Ok(Async::NotReady) - } +impl Stream for Service +where + TMessage: CustomMessage + Send + 'static, +{ + type Item = ServiceEvent; + type Error = IoError; + + fn poll(&mut self) -> Poll, Self::Error> { + if !self.injected_events.is_empty() { + return Ok(Async::Ready(Some(self.injected_events.remove(0)))); + } + + match self.poll_swarm()? { + Async::Ready(value) => return Ok(Async::Ready(value)), + Async::NotReady => (), + } + + // The only way we reach this is if we went through all the `NotReady` paths above, + // ensuring the current task is registered everywhere. + Ok(Async::NotReady) + } } diff --git a/core/network-libp2p/src/transport.rs b/core/network-libp2p/src/transport.rs index 404fdb6bda..18cb4955b2 100644 --- a/core/network-libp2p/src/transport.rs +++ b/core/network-libp2p/src/transport.rs @@ -15,11 +15,11 @@ // along with Substrate. If not, see . use futures::prelude::*; +use libp2p::core::{self, muxing::StreamMuxerBox, transport::boxed::Boxed}; use libp2p::{ - InboundUpgradeExt, OutboundUpgradeExt, PeerId, Transport, - mplex, identity, secio, yamux, tcp, dns, websocket, bandwidth + bandwidth, dns, identity, mplex, secio, tcp, websocket, yamux, InboundUpgradeExt, + OutboundUpgradeExt, PeerId, Transport, }; -use libp2p::core::{self, transport::boxed::Boxed, muxing::StreamMuxerBox}; use std::{io, sync::Arc, time::Duration, usize}; pub use self::bandwidth::BandwidthSinks; @@ -29,33 +29,36 @@ pub use self::bandwidth::BandwidthSinks; /// Returns a `BandwidthSinks` object that allows querying the average bandwidth produced by all /// the connections spawned with this transport. pub fn build_transport( - keypair: identity::Keypair -) -> (Boxed<(PeerId, StreamMuxerBox), io::Error>, Arc) { - let mut mplex_config = mplex::MplexConfig::new(); - mplex_config.max_buffer_len_behaviour(mplex::MaxBufferBehaviour::Block); - mplex_config.max_buffer_len(usize::MAX); - - let transport = tcp::TcpConfig::new(); - let transport = websocket::WsConfig::new(transport.clone()).or_transport(transport); - let transport = dns::DnsConfig::new(transport); - let (transport, sinks) = bandwidth::BandwidthLogging::new(transport, Duration::from_secs(5)); - - // TODO: rework the transport creation (https://github.com/libp2p/rust-libp2p/issues/783) - let transport = transport - .with_upgrade(secio::SecioConfig::new(keypair)) - .and_then(move |out, endpoint| { - let peer_id = out.remote_key.into_peer_id(); - let peer_id2 = peer_id.clone(); - let upgrade = core::upgrade::SelectUpgrade::new(yamux::Config::default(), mplex_config) - .map_inbound(move |muxer| (peer_id, muxer)) - .map_outbound(move |muxer| (peer_id2, muxer)); - - core::upgrade::apply(out.stream, upgrade, endpoint) - .map(|(id, muxer)| (id, core::muxing::StreamMuxerBox::new(muxer))) - }) - .with_timeout(Duration::from_secs(20)) - .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) - .boxed(); - - (transport, sinks) + keypair: identity::Keypair, +) -> ( + Boxed<(PeerId, StreamMuxerBox), io::Error>, + Arc, +) { + let mut mplex_config = mplex::MplexConfig::new(); + mplex_config.max_buffer_len_behaviour(mplex::MaxBufferBehaviour::Block); + mplex_config.max_buffer_len(usize::MAX); + + let transport = tcp::TcpConfig::new(); + let transport = websocket::WsConfig::new(transport.clone()).or_transport(transport); + let transport = dns::DnsConfig::new(transport); + let (transport, sinks) = bandwidth::BandwidthLogging::new(transport, Duration::from_secs(5)); + + // TODO: rework the transport creation (https://github.com/libp2p/rust-libp2p/issues/783) + let transport = transport + .with_upgrade(secio::SecioConfig::new(keypair)) + .and_then(move |out, endpoint| { + let peer_id = out.remote_key.into_peer_id(); + let peer_id2 = peer_id.clone(); + let upgrade = core::upgrade::SelectUpgrade::new(yamux::Config::default(), mplex_config) + .map_inbound(move |muxer| (peer_id, muxer)) + .map_outbound(move |muxer| (peer_id2, muxer)); + + core::upgrade::apply(out.stream, upgrade, endpoint) + .map(|(id, muxer)| (id, core::muxing::StreamMuxerBox::new(muxer))) + }) + .with_timeout(Duration::from_secs(20)) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) + .boxed(); + + (transport, sinks) } diff --git a/core/network-libp2p/tests/test.rs b/core/network-libp2p/tests/test.rs index 437f651184..ec1b0620ae 100644 --- a/core/network-libp2p/tests/test.rs +++ b/core/network-libp2p/tests/test.rs @@ -14,245 +14,266 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use futures::{future, stream, prelude::*, try_ready}; +use futures::{future, prelude::*, stream, try_ready}; use rand::seq::SliceRandom; use std::io; -use substrate_network_libp2p::{CustomMessage, multiaddr::Protocol, ServiceEvent, build_multiaddr}; +use substrate_network_libp2p::{build_multiaddr, multiaddr::Protocol, CustomMessage, ServiceEvent}; /// Builds two services. The second one and further have the first one as its bootstrap node. /// This is to be used only for testing, and a panic will happen if something goes wrong. fn build_nodes(num: usize) -> Vec> - where TMsg: CustomMessage + Send + 'static +where + TMsg: CustomMessage + Send + 'static, { - let mut result: Vec> = Vec::with_capacity(num); - - for _ in 0 .. num { - let mut boot_nodes = Vec::new(); - if !result.is_empty() { - let mut bootnode = result[0].listeners().next().unwrap().clone(); - bootnode.append(Protocol::P2p(result[0].peer_id().clone().into())); - boot_nodes.push(bootnode.to_string()); - } - - let config = substrate_network_libp2p::NetworkConfiguration { - listen_addresses: vec![build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0u16)]], - boot_nodes, - ..substrate_network_libp2p::NetworkConfiguration::default() - }; - - let proto = substrate_network_libp2p::RegisteredProtocol::new(*b"tst", &[1]); - result.push(substrate_network_libp2p::start_service(config, proto).unwrap().0); - } - - result + let mut result: Vec> = Vec::with_capacity(num); + + for _ in 0..num { + let mut boot_nodes = Vec::new(); + if !result.is_empty() { + let mut bootnode = result[0].listeners().next().unwrap().clone(); + bootnode.append(Protocol::P2p(result[0].peer_id().clone().into())); + boot_nodes.push(bootnode.to_string()); + } + + let config = substrate_network_libp2p::NetworkConfiguration { + listen_addresses: vec![build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0u16)]], + boot_nodes, + ..substrate_network_libp2p::NetworkConfiguration::default() + }; + + let proto = substrate_network_libp2p::RegisteredProtocol::new(*b"tst", &[1]); + result.push( + substrate_network_libp2p::start_service(config, proto) + .unwrap() + .0, + ); + } + + result } #[test] fn basic_two_nodes_connectivity() { - let (mut service1, mut service2) = { - let mut l = build_nodes::>(2).into_iter(); - let a = l.next().unwrap(); - let b = l.next().unwrap(); - (a, b) - }; - - let fut1 = future::poll_fn(move || -> io::Result<_> { - match try_ready!(service1.poll()) { - Some(ServiceEvent::OpenedCustomProtocol { version, .. }) => { - assert_eq!(version, 1); - Ok(Async::Ready(())) - }, - _ => panic!(), - } - }); - - let fut2 = future::poll_fn(move || -> io::Result<_> { - match try_ready!(service2.poll()) { - Some(ServiceEvent::OpenedCustomProtocol { version, .. }) => { - assert_eq!(version, 1); - Ok(Async::Ready(())) - }, - _ => panic!(), - } - }); - - let combined = fut1.select(fut2).map_err(|(err, _)| err); - tokio::runtime::Runtime::new().unwrap().block_on_all(combined).unwrap(); + let (mut service1, mut service2) = { + let mut l = build_nodes::>(2).into_iter(); + let a = l.next().unwrap(); + let b = l.next().unwrap(); + (a, b) + }; + + let fut1 = future::poll_fn(move || -> io::Result<_> { + match try_ready!(service1.poll()) { + Some(ServiceEvent::OpenedCustomProtocol { version, .. }) => { + assert_eq!(version, 1); + Ok(Async::Ready(())) + } + _ => panic!(), + } + }); + + let fut2 = future::poll_fn(move || -> io::Result<_> { + match try_ready!(service2.poll()) { + Some(ServiceEvent::OpenedCustomProtocol { version, .. }) => { + assert_eq!(version, 1); + Ok(Async::Ready(())) + } + _ => panic!(), + } + }); + + let combined = fut1.select(fut2).map_err(|(err, _)| err); + tokio::runtime::Runtime::new() + .unwrap() + .block_on_all(combined) + .unwrap(); } #[test] fn two_nodes_transfer_lots_of_packets() { - // We spawn two nodes, then make the first one send lots of packets to the second one. The test - // ends when the second one has received all of them. - - // Note that if we go too high, we will reach the limit to the number of simultaneous - // substreams allowed by the multiplexer. - const NUM_PACKETS: u32 = 5000; - - let (mut service1, mut service2) = { - let mut l = build_nodes::>(2).into_iter(); - let a = l.next().unwrap(); - let b = l.next().unwrap(); - (a, b) - }; - - let fut1 = future::poll_fn(move || -> io::Result<_> { - loop { - match try_ready!(service1.poll()) { - Some(ServiceEvent::OpenedCustomProtocol { peer_id, .. }) => { - for n in 0 .. NUM_PACKETS { - service1.send_custom_message(&peer_id, vec![(n % 256) as u8]); - } - }, - _ => panic!(), - } - } - }); - - let mut packet_counter = 0u32; - let fut2 = future::poll_fn(move || -> io::Result<_> { - loop { - match try_ready!(service2.poll()) { - Some(ServiceEvent::OpenedCustomProtocol { .. }) => {}, - Some(ServiceEvent::CustomMessage { message, .. }) => { - assert_eq!(message.len(), 1); - packet_counter += 1; - if packet_counter == NUM_PACKETS { - return Ok(Async::Ready(())) - } - } - _ => panic!(), - } - } - }); - - let combined = fut1.select(fut2).map_err(|(err, _)| err); - tokio::runtime::Runtime::new().unwrap().block_on(combined).unwrap(); + // We spawn two nodes, then make the first one send lots of packets to the second one. The test + // ends when the second one has received all of them. + + // Note that if we go too high, we will reach the limit to the number of simultaneous + // substreams allowed by the multiplexer. + const NUM_PACKETS: u32 = 5000; + + let (mut service1, mut service2) = { + let mut l = build_nodes::>(2).into_iter(); + let a = l.next().unwrap(); + let b = l.next().unwrap(); + (a, b) + }; + + let fut1 = future::poll_fn(move || -> io::Result<_> { + loop { + match try_ready!(service1.poll()) { + Some(ServiceEvent::OpenedCustomProtocol { peer_id, .. }) => { + for n in 0..NUM_PACKETS { + service1.send_custom_message(&peer_id, vec![(n % 256) as u8]); + } + } + _ => panic!(), + } + } + }); + + let mut packet_counter = 0u32; + let fut2 = future::poll_fn(move || -> io::Result<_> { + loop { + match try_ready!(service2.poll()) { + Some(ServiceEvent::OpenedCustomProtocol { .. }) => {} + Some(ServiceEvent::CustomMessage { message, .. }) => { + assert_eq!(message.len(), 1); + packet_counter += 1; + if packet_counter == NUM_PACKETS { + return Ok(Async::Ready(())); + } + } + _ => panic!(), + } + } + }); + + let combined = fut1.select(fut2).map_err(|(err, _)| err); + tokio::runtime::Runtime::new() + .unwrap() + .block_on(combined) + .unwrap(); } #[test] fn many_nodes_connectivity() { - // Creates many nodes, then make sure that they are all connected to each other. - // Note: if you increase this number, keep in mind that there's a limit to the number of - // simultaneous connections which will make the test fail if it is reached. This can be - // increased in the `NetworkConfiguration`. - const NUM_NODES: usize = 25; - - let mut futures = build_nodes::>(NUM_NODES) - .into_iter() - .map(move |mut node| { - let mut num_connecs = 0; - stream::poll_fn(move || -> io::Result<_> { - loop { - const MAX_BANDWIDTH: u64 = NUM_NODES as u64 * 1024; // 1kiB/s/node - assert!(node.average_download_per_sec() < MAX_BANDWIDTH); - assert!(node.average_upload_per_sec() < MAX_BANDWIDTH); - - match try_ready!(node.poll()) { - Some(ServiceEvent::OpenedCustomProtocol { .. }) => { - num_connecs += 1; - assert!(num_connecs < NUM_NODES); - if num_connecs == NUM_NODES - 1 { - return Ok(Async::Ready(Some(true))) - } - } - Some(ServiceEvent::ClosedCustomProtocol { .. }) => { - let was_success = num_connecs == NUM_NODES - 1; - num_connecs -= 1; - if was_success && num_connecs < NUM_NODES - 1 { - return Ok(Async::Ready(Some(false))) - } - } - _ => panic!(), - } - } - }) - }) - .collect::>(); - - let mut successes = 0; - let combined = future::poll_fn(move || -> io::Result<_> { - for node in futures.iter_mut() { - match node.poll()? { - Async::Ready(Some(true)) => successes += 1, - Async::Ready(Some(false)) => successes -= 1, - Async::Ready(None) => unreachable!(), - Async::NotReady => () - } - } - - if successes == NUM_NODES { - Ok(Async::Ready(())) - } else { - Ok(Async::NotReady) - } - }); - - tokio::runtime::Runtime::new().unwrap().block_on(combined).unwrap(); + // Creates many nodes, then make sure that they are all connected to each other. + // Note: if you increase this number, keep in mind that there's a limit to the number of + // simultaneous connections which will make the test fail if it is reached. This can be + // increased in the `NetworkConfiguration`. + const NUM_NODES: usize = 25; + + let mut futures = build_nodes::>(NUM_NODES) + .into_iter() + .map(move |mut node| { + let mut num_connecs = 0; + stream::poll_fn(move || -> io::Result<_> { + loop { + const MAX_BANDWIDTH: u64 = NUM_NODES as u64 * 1024; // 1kiB/s/node + assert!(node.average_download_per_sec() < MAX_BANDWIDTH); + assert!(node.average_upload_per_sec() < MAX_BANDWIDTH); + + match try_ready!(node.poll()) { + Some(ServiceEvent::OpenedCustomProtocol { .. }) => { + num_connecs += 1; + assert!(num_connecs < NUM_NODES); + if num_connecs == NUM_NODES - 1 { + return Ok(Async::Ready(Some(true))); + } + } + Some(ServiceEvent::ClosedCustomProtocol { .. }) => { + let was_success = num_connecs == NUM_NODES - 1; + num_connecs -= 1; + if was_success && num_connecs < NUM_NODES - 1 { + return Ok(Async::Ready(Some(false))); + } + } + _ => panic!(), + } + } + }) + }) + .collect::>(); + + let mut successes = 0; + let combined = future::poll_fn(move || -> io::Result<_> { + for node in futures.iter_mut() { + match node.poll()? { + Async::Ready(Some(true)) => successes += 1, + Async::Ready(Some(false)) => successes -= 1, + Async::Ready(None) => unreachable!(), + Async::NotReady => (), + } + } + + if successes == NUM_NODES { + Ok(Async::Ready(())) + } else { + Ok(Async::NotReady) + } + }); + + tokio::runtime::Runtime::new() + .unwrap() + .block_on(combined) + .unwrap(); } #[test] fn basic_two_nodes_requests_in_parallel() { - let (mut service1, mut service2) = { - let mut l = build_nodes::<(Option, Vec)>(2).into_iter(); - let a = l.next().unwrap(); - let b = l.next().unwrap(); - (a, b) - }; - - // Generate random messages with or without a request id. - let mut to_send = { - let mut to_send = Vec::new(); - let mut next_id = 0; - for _ in 0..200 { // Note: don't make that number too high or the CPU usage will explode. - let id = if rand::random::() % 4 != 0 { - let i = next_id; - next_id += 1; - Some(i) - } else { - None - }; - - let msg = (id, (0..10).map(|_| rand::random::()).collect::>()); - to_send.push(msg); - } - to_send - }; - - // Clone `to_send` in `to_receive`. Below we will remove from `to_receive` the messages we - // receive, until the list is empty. - let mut to_receive = to_send.clone(); - to_send.shuffle(&mut rand::thread_rng()); - - let fut1 = future::poll_fn(move || -> io::Result<_> { - loop { - match try_ready!(service1.poll()) { - Some(ServiceEvent::OpenedCustomProtocol { peer_id, .. }) => { - for msg in to_send.drain(..) { - service1.send_custom_message(&peer_id, msg); - } - }, - _ => panic!(), - } - } - }); - - let fut2 = future::poll_fn(move || -> io::Result<_> { - loop { - match try_ready!(service2.poll()) { - Some(ServiceEvent::OpenedCustomProtocol { .. }) => {}, - Some(ServiceEvent::CustomMessage { message, .. }) => { - let pos = to_receive.iter().position(|m| *m == message).unwrap(); - to_receive.remove(pos); - if to_receive.is_empty() { - return Ok(Async::Ready(())) - } - } - _ => panic!(), - } - } - }); - - let combined = fut1.select(fut2).map_err(|(err, _)| err); - tokio::runtime::Runtime::new().unwrap().block_on_all(combined).unwrap(); + let (mut service1, mut service2) = { + let mut l = build_nodes::<(Option, Vec)>(2).into_iter(); + let a = l.next().unwrap(); + let b = l.next().unwrap(); + (a, b) + }; + + // Generate random messages with or without a request id. + let mut to_send = { + let mut to_send = Vec::new(); + let mut next_id = 0; + for _ in 0..200 { + // Note: don't make that number too high or the CPU usage will explode. + let id = if rand::random::() % 4 != 0 { + let i = next_id; + next_id += 1; + Some(i) + } else { + None + }; + + let msg = ( + id, + (0..10).map(|_| rand::random::()).collect::>(), + ); + to_send.push(msg); + } + to_send + }; + + // Clone `to_send` in `to_receive`. Below we will remove from `to_receive` the messages we + // receive, until the list is empty. + let mut to_receive = to_send.clone(); + to_send.shuffle(&mut rand::thread_rng()); + + let fut1 = future::poll_fn(move || -> io::Result<_> { + loop { + match try_ready!(service1.poll()) { + Some(ServiceEvent::OpenedCustomProtocol { peer_id, .. }) => { + for msg in to_send.drain(..) { + service1.send_custom_message(&peer_id, msg); + } + } + _ => panic!(), + } + } + }); + + let fut2 = future::poll_fn(move || -> io::Result<_> { + loop { + match try_ready!(service2.poll()) { + Some(ServiceEvent::OpenedCustomProtocol { .. }) => {} + Some(ServiceEvent::CustomMessage { message, .. }) => { + let pos = to_receive.iter().position(|m| *m == message).unwrap(); + to_receive.remove(pos); + if to_receive.is_empty() { + return Ok(Async::Ready(())); + } + } + _ => panic!(), + } + } + }); + + let combined = fut1.select(fut2).map_err(|(err, _)| err); + tokio::runtime::Runtime::new() + .unwrap() + .block_on_all(combined) + .unwrap(); } diff --git a/core/network/src/blocks.rs b/core/network/src/blocks.rs index 60c6886f09..bf21cd723d 100644 --- a/core/network/src/blocks.rs +++ b/core/network/src/blocks.rs @@ -14,274 +14,374 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::mem; -use std::cmp; -use std::ops::Range; -use std::collections::{HashMap, BTreeMap}; -use std::collections::hash_map::Entry; +use crate::message; use log::trace; use network_libp2p::PeerId; -use runtime_primitives::traits::{Block as BlockT, NumberFor, As}; -use crate::message; +use runtime_primitives::traits::{As, Block as BlockT, NumberFor}; +use std::cmp; +use std::collections::hash_map::Entry; +use std::collections::{BTreeMap, HashMap}; +use std::mem; +use std::ops::Range; const MAX_PARALLEL_DOWNLOADS: u32 = 1; /// Block data with origin. #[derive(Debug, Clone, PartialEq, Eq)] pub struct BlockData { - /// The Block Message from the wire - pub block: message::BlockData, - /// The peer, we received this from - pub origin: Option, + /// The Block Message from the wire + pub block: message::BlockData, + /// The peer, we received this from + pub origin: Option, } #[derive(Debug)] enum BlockRangeState { - Downloading { - len: NumberFor, - downloading: u32, - }, - Complete(Vec>), + Downloading { len: NumberFor, downloading: u32 }, + Complete(Vec>), } impl BlockRangeState { - pub fn len(&self) -> NumberFor { - match *self { - BlockRangeState::Downloading { len, .. } => len, - BlockRangeState::Complete(ref blocks) => As::sa(blocks.len() as u64), - } - } + pub fn len(&self) -> NumberFor { + match *self { + BlockRangeState::Downloading { len, .. } => len, + BlockRangeState::Complete(ref blocks) => As::sa(blocks.len() as u64), + } + } } /// A collection of blocks being downloaded. #[derive(Default)] pub struct BlockCollection { - /// Downloaded blocks. - blocks: BTreeMap, BlockRangeState>, - peer_requests: HashMap>, + /// Downloaded blocks. + blocks: BTreeMap, BlockRangeState>, + peer_requests: HashMap>, } impl BlockCollection { - /// Create a new instance. - pub fn new() -> Self { - BlockCollection { - blocks: BTreeMap::new(), - peer_requests: HashMap::new(), - } - } - - /// Clear everything. - pub fn clear(&mut self) { - self.blocks.clear(); - self.peer_requests.clear(); - } - - /// Insert a set of blocks into collection. - pub fn insert(&mut self, start: NumberFor, blocks: Vec>, who: PeerId) { - if blocks.is_empty() { - return; - } - - match self.blocks.get(&start) { - Some(&BlockRangeState::Downloading { .. }) => { - trace!(target: "sync", "Ignored block data still marked as being downloaded: {}", start); - debug_assert!(false); - return; - }, - Some(&BlockRangeState::Complete(ref existing)) if existing.len() >= blocks.len() => { - trace!(target: "sync", "Ignored block data already downloaded: {}", start); - return; - }, - _ => (), - } - - self.blocks.insert(start, BlockRangeState::Complete(blocks.into_iter() - .map(|b| BlockData { origin: Some(who.clone()), block: b }).collect())); - } - - /// Returns a set of block hashes that require a header download. The returned set is marked as being downloaded. - pub fn needed_blocks(&mut self, who: PeerId, count: usize, peer_best: NumberFor, common: NumberFor) -> Option>> { - // First block number that we need to download - let first_different = common + As::sa(1); - let count = As::sa(count as u64); - let (mut range, downloading) = { - let mut downloading_iter = self.blocks.iter().peekable(); - let mut prev: Option<(&NumberFor, &BlockRangeState)> = None; - loop { - let next = downloading_iter.next(); - break match &(prev, next) { - &(Some((start, &BlockRangeState::Downloading { ref len, downloading })), _) if downloading < MAX_PARALLEL_DOWNLOADS => - (*start .. *start + *len, downloading), - &(Some((start, r)), Some((next_start, _))) if *start + r.len() < *next_start => - (*start + r.len() .. cmp::min(*next_start, *start + r.len() + count), 0), // gap - &(Some((start, r)), None) => - (*start + r.len() .. *start + r.len() + count, 0), // last range - &(None, None) => - (first_different .. first_different + count, 0), // empty - &(None, Some((start, _))) if *start > first_different => - (first_different .. cmp::min(first_different + count, *start), 0), // gap at the start - _ => { - prev = next; - continue - }, - } - } - }; - // crop to peers best - if range.start > peer_best { - trace!(target: "sync", "Out of range for peer {} ({} vs {})", who, range.start, peer_best); - return None; - } - range.end = cmp::min(peer_best + As::sa(1), range.end); - self.peer_requests.insert(who, range.start); - self.blocks.insert(range.start, BlockRangeState::Downloading { len: range.end - range.start, downloading: downloading + 1 }); - if range.end <= range.start { - panic!("Empty range {:?}, count={}, peer_best={}, common={}, blocks={:?}", range, count, peer_best, common, self.blocks); - } - Some(range) - } - - /// Get a valid chain of blocks ordered in descending order and ready for importing into blockchain. - pub fn drain(&mut self, from: NumberFor) -> Vec> { - let mut drained = Vec::new(); - let mut ranges = Vec::new(); - { - let mut prev = from; - for (start, range_data) in &mut self.blocks { - match range_data { - &mut BlockRangeState::Complete(ref mut blocks) if *start <= prev => { - prev = *start + As::sa(blocks.len() as u64); - let mut blocks = mem::replace(blocks, Vec::new()); - drained.append(&mut blocks); - ranges.push(*start); - }, - _ => break, - } - } - } - for r in ranges { - self.blocks.remove(&r); - } - trace!(target: "sync", "Drained {} blocks", drained.len()); - drained - } - - pub fn clear_peer_download(&mut self, who: &PeerId) { - match self.peer_requests.entry(who.clone()) { - Entry::Occupied(entry) => { - let start = entry.remove(); - let remove = match self.blocks.get_mut(&start) { - Some(&mut BlockRangeState::Downloading { ref mut downloading, .. }) if *downloading > 1 => { - *downloading = *downloading - 1; - false - }, - Some(&mut BlockRangeState::Downloading { .. }) => { - true - }, - _ => { - debug_assert!(false); - false - } - }; - if remove { - self.blocks.remove(&start); - } - }, - _ => (), - } - } + /// Create a new instance. + pub fn new() -> Self { + BlockCollection { + blocks: BTreeMap::new(), + peer_requests: HashMap::new(), + } + } + + /// Clear everything. + pub fn clear(&mut self) { + self.blocks.clear(); + self.peer_requests.clear(); + } + + /// Insert a set of blocks into collection. + pub fn insert(&mut self, start: NumberFor, blocks: Vec>, who: PeerId) { + if blocks.is_empty() { + return; + } + + match self.blocks.get(&start) { + Some(&BlockRangeState::Downloading { .. }) => { + trace!(target: "sync", "Ignored block data still marked as being downloaded: {}", start); + debug_assert!(false); + return; + } + Some(&BlockRangeState::Complete(ref existing)) if existing.len() >= blocks.len() => { + trace!(target: "sync", "Ignored block data already downloaded: {}", start); + return; + } + _ => (), + } + + self.blocks.insert( + start, + BlockRangeState::Complete( + blocks + .into_iter() + .map(|b| BlockData { + origin: Some(who.clone()), + block: b, + }) + .collect(), + ), + ); + } + + /// Returns a set of block hashes that require a header download. The returned set is marked as being downloaded. + pub fn needed_blocks( + &mut self, + who: PeerId, + count: usize, + peer_best: NumberFor, + common: NumberFor, + ) -> Option>> { + // First block number that we need to download + let first_different = common + As::sa(1); + let count = As::sa(count as u64); + let (mut range, downloading) = { + let mut downloading_iter = self.blocks.iter().peekable(); + let mut prev: Option<(&NumberFor, &BlockRangeState)> = None; + loop { + let next = downloading_iter.next(); + break match &(prev, next) { + &( + Some(( + start, + &BlockRangeState::Downloading { + ref len, + downloading, + }, + )), + _, + ) if downloading < MAX_PARALLEL_DOWNLOADS => { + (*start..*start + *len, downloading) + } + &(Some((start, r)), Some((next_start, _))) + if *start + r.len() < *next_start => + { + ( + *start + r.len()..cmp::min(*next_start, *start + r.len() + count), + 0, + ) + } // gap + &(Some((start, r)), None) => (*start + r.len()..*start + r.len() + count, 0), // last range + &(None, None) => (first_different..first_different + count, 0), // empty + &(None, Some((start, _))) if *start > first_different => ( + first_different..cmp::min(first_different + count, *start), + 0, + ), // gap at the start + _ => { + prev = next; + continue; + } + }; + } + }; + // crop to peers best + if range.start > peer_best { + trace!(target: "sync", "Out of range for peer {} ({} vs {})", who, range.start, peer_best); + return None; + } + range.end = cmp::min(peer_best + As::sa(1), range.end); + self.peer_requests.insert(who, range.start); + self.blocks.insert( + range.start, + BlockRangeState::Downloading { + len: range.end - range.start, + downloading: downloading + 1, + }, + ); + if range.end <= range.start { + panic!( + "Empty range {:?}, count={}, peer_best={}, common={}, blocks={:?}", + range, count, peer_best, common, self.blocks + ); + } + Some(range) + } + + /// Get a valid chain of blocks ordered in descending order and ready for importing into blockchain. + pub fn drain(&mut self, from: NumberFor) -> Vec> { + let mut drained = Vec::new(); + let mut ranges = Vec::new(); + { + let mut prev = from; + for (start, range_data) in &mut self.blocks { + match range_data { + &mut BlockRangeState::Complete(ref mut blocks) if *start <= prev => { + prev = *start + As::sa(blocks.len() as u64); + let mut blocks = mem::replace(blocks, Vec::new()); + drained.append(&mut blocks); + ranges.push(*start); + } + _ => break, + } + } + } + for r in ranges { + self.blocks.remove(&r); + } + trace!(target: "sync", "Drained {} blocks", drained.len()); + drained + } + + pub fn clear_peer_download(&mut self, who: &PeerId) { + match self.peer_requests.entry(who.clone()) { + Entry::Occupied(entry) => { + let start = entry.remove(); + let remove = match self.blocks.get_mut(&start) { + Some(&mut BlockRangeState::Downloading { + ref mut downloading, + .. + }) if *downloading > 1 => { + *downloading = *downloading - 1; + false + } + Some(&mut BlockRangeState::Downloading { .. }) => true, + _ => { + debug_assert!(false); + false + } + }; + if remove { + self.blocks.remove(&start); + } + } + _ => (), + } + } } #[cfg(test)] mod test { - use super::{BlockCollection, BlockData, BlockRangeState}; - use crate::{message, PeerId}; - use runtime_primitives::testing::{Block as RawBlock, ExtrinsicWrapper}; - use primitives::H256; - - type Block = RawBlock>; - - fn is_empty(bc: &BlockCollection) -> bool { - bc.blocks.is_empty() && - bc.peer_requests.is_empty() - } - - fn generate_blocks(n: usize) -> Vec> { - (0 .. n).map(|_| message::generic::BlockData { - hash: H256::random(), - header: None, - body: None, - message_queue: None, - receipt: None, - justification: None, - }).collect() - } - - #[test] - fn create_clear() { - let mut bc = BlockCollection::new(); - assert!(is_empty(&bc)); - bc.insert(1, generate_blocks(100), PeerId::random()); - assert!(!is_empty(&bc)); - bc.clear(); - assert!(is_empty(&bc)); - } - - #[test] - fn insert_blocks() { - let mut bc = BlockCollection::new(); - assert!(is_empty(&bc)); - let peer0 = PeerId::random(); - let peer1 = PeerId::random(); - let peer2 = PeerId::random(); - - let blocks = generate_blocks(150); - assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0), Some(1 .. 41)); - assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0), Some(41 .. 81)); - assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 0), Some(81 .. 121)); - - bc.clear_peer_download(&peer1); - bc.insert(41, blocks[41..81].to_vec(), peer1.clone()); - assert_eq!(bc.drain(1), vec![]); - assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0), Some(121 .. 151)); - bc.clear_peer_download(&peer0); - bc.insert(1, blocks[1..11].to_vec(), peer0.clone()); - - assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0), Some(11 .. 41)); - assert_eq!(bc.drain(1), blocks[1..11].iter().map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }).collect::>()); - - bc.clear_peer_download(&peer0); - bc.insert(11, blocks[11..41].to_vec(), peer0.clone()); - - let drained = bc.drain(12); - assert_eq!(drained[..30], blocks[11..41].iter().map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }).collect::>()[..]); - assert_eq!(drained[30..], blocks[41..81].iter().map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }).collect::>()[..]); - - bc.clear_peer_download(&peer2); - assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 80), Some(81 .. 121)); - bc.clear_peer_download(&peer2); - bc.insert(81, blocks[81..121].to_vec(), peer2.clone()); - bc.clear_peer_download(&peer1); - bc.insert(121, blocks[121..150].to_vec(), peer1.clone()); - - assert_eq!(bc.drain(80), vec![]); - let drained = bc.drain(81); - assert_eq!(drained[..40], blocks[81..121].iter().map(|b| BlockData { block: b.clone(), origin: Some(peer2.clone()) }).collect::>()[..]); - assert_eq!(drained[40..], blocks[121..150].iter().map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }).collect::>()[..]); - } - - #[test] - fn large_gap() { - let mut bc: BlockCollection = BlockCollection::new(); - bc.blocks.insert(100, BlockRangeState::Downloading { - len: 128, - downloading: 1, - }); - let blocks = generate_blocks(10).into_iter().map(|b| BlockData { block: b, origin: None }).collect(); - bc.blocks.insert(114305, BlockRangeState::Complete(blocks)); - - let peer0 = PeerId::random(); - assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 000), Some(1 .. 100)); - assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 600), Some(100 + 128 .. 100 + 128 + 128)); - } + use super::{BlockCollection, BlockData, BlockRangeState}; + use crate::{message, PeerId}; + use primitives::H256; + use runtime_primitives::testing::{Block as RawBlock, ExtrinsicWrapper}; + + type Block = RawBlock>; + + fn is_empty(bc: &BlockCollection) -> bool { + bc.blocks.is_empty() && bc.peer_requests.is_empty() + } + + fn generate_blocks(n: usize) -> Vec> { + (0..n) + .map(|_| message::generic::BlockData { + hash: H256::random(), + header: None, + body: None, + message_queue: None, + receipt: None, + justification: None, + }) + .collect() + } + + #[test] + fn create_clear() { + let mut bc = BlockCollection::new(); + assert!(is_empty(&bc)); + bc.insert(1, generate_blocks(100), PeerId::random()); + assert!(!is_empty(&bc)); + bc.clear(); + assert!(is_empty(&bc)); + } + + #[test] + fn insert_blocks() { + let mut bc = BlockCollection::new(); + assert!(is_empty(&bc)); + let peer0 = PeerId::random(); + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + + let blocks = generate_blocks(150); + assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0), Some(1..41)); + assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0), Some(41..81)); + assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 0), Some(81..121)); + + bc.clear_peer_download(&peer1); + bc.insert(41, blocks[41..81].to_vec(), peer1.clone()); + assert_eq!(bc.drain(1), vec![]); + assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0), Some(121..151)); + bc.clear_peer_download(&peer0); + bc.insert(1, blocks[1..11].to_vec(), peer0.clone()); + + assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0), Some(11..41)); + assert_eq!( + bc.drain(1), + blocks[1..11] + .iter() + .map(|b| BlockData { + block: b.clone(), + origin: Some(peer0.clone()) + }) + .collect::>() + ); + + bc.clear_peer_download(&peer0); + bc.insert(11, blocks[11..41].to_vec(), peer0.clone()); + + let drained = bc.drain(12); + assert_eq!( + drained[..30], + blocks[11..41] + .iter() + .map(|b| BlockData { + block: b.clone(), + origin: Some(peer0.clone()) + }) + .collect::>()[..] + ); + assert_eq!( + drained[30..], + blocks[41..81] + .iter() + .map(|b| BlockData { + block: b.clone(), + origin: Some(peer1.clone()) + }) + .collect::>()[..] + ); + + bc.clear_peer_download(&peer2); + assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 80), Some(81..121)); + bc.clear_peer_download(&peer2); + bc.insert(81, blocks[81..121].to_vec(), peer2.clone()); + bc.clear_peer_download(&peer1); + bc.insert(121, blocks[121..150].to_vec(), peer1.clone()); + + assert_eq!(bc.drain(80), vec![]); + let drained = bc.drain(81); + assert_eq!( + drained[..40], + blocks[81..121] + .iter() + .map(|b| BlockData { + block: b.clone(), + origin: Some(peer2.clone()) + }) + .collect::>()[..] + ); + assert_eq!( + drained[40..], + blocks[121..150] + .iter() + .map(|b| BlockData { + block: b.clone(), + origin: Some(peer1.clone()) + }) + .collect::>()[..] + ); + } + + #[test] + fn large_gap() { + let mut bc: BlockCollection = BlockCollection::new(); + bc.blocks.insert( + 100, + BlockRangeState::Downloading { + len: 128, + downloading: 1, + }, + ); + let blocks = generate_blocks(10) + .into_iter() + .map(|b| BlockData { + block: b, + origin: None, + }) + .collect(); + bc.blocks.insert(114305, BlockRangeState::Complete(blocks)); + + let peer0 = PeerId::random(); + assert_eq!( + bc.needed_blocks(peer0.clone(), 128, 10000, 000), + Some(1..100) + ); + assert_eq!( + bc.needed_blocks(peer0.clone(), 128, 10000, 600), + Some(100 + 128..100 + 128 + 128) + ); + } } diff --git a/core/network/src/chain.rs b/core/network/src/chain.rs index 92236e7c63..224dab0875 100644 --- a/core/network/src/chain.rs +++ b/core/network/src/chain.rs @@ -16,123 +16,150 @@ //! Blockchain access trait -use client::{self, Client as SubstrateClient, ClientInfo, BlockStatus, CallExecutor}; use client::error::Error; use client::light::fetcher::ChangesProof; +use client::{self, BlockStatus, CallExecutor, Client as SubstrateClient, ClientInfo}; use consensus::{BlockImport, Error as ConsensusError}; +use primitives::{storage::StorageKey, Blake2Hasher, H256}; +use runtime_primitives::generic::BlockId; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT}; -use runtime_primitives::generic::{BlockId}; use runtime_primitives::Justification; -use primitives::{H256, Blake2Hasher, storage::StorageKey}; /// Local client abstraction for the network. pub trait Client: Send + Sync { - /// Get blockchain info. - fn info(&self) -> Result, Error>; - - /// Get block status. - fn block_status(&self, id: &BlockId) -> Result; - - /// Get block hash by number. - fn block_hash(&self, block_number: ::Number) -> Result, Error>; - - /// Get block header. - fn header(&self, id: &BlockId) -> Result, Error>; - - /// Get block body. - fn body(&self, id: &BlockId) -> Result>, Error>; - - /// Get block justification. - fn justification(&self, id: &BlockId) -> Result, Error>; - - /// Get block header proof. - fn header_proof(&self, block_number: ::Number) -> Result<(Block::Header, Vec>), Error>; - - /// Get storage read execution proof. - fn read_proof(&self, block: &Block::Hash, key: &[u8]) -> Result>, Error>; - - /// Get method execution proof. - fn execution_proof(&self, block: &Block::Hash, method: &str, data: &[u8]) -> Result<(Vec, Vec>), Error>; - - /// Get key changes proof. - fn key_changes_proof( - &self, - first: Block::Hash, - last: Block::Hash, - min: Block::Hash, - max: Block::Hash, - key: &StorageKey - ) -> Result, Error>; - - /// Returns `true` if the given `block` is a descendent of `base`. - fn is_descendent_of(&self, base: &Block::Hash, block: &Block::Hash) -> Result; + /// Get blockchain info. + fn info(&self) -> Result, Error>; + + /// Get block status. + fn block_status(&self, id: &BlockId) -> Result; + + /// Get block hash by number. + fn block_hash( + &self, + block_number: ::Number, + ) -> Result, Error>; + + /// Get block header. + fn header(&self, id: &BlockId) -> Result, Error>; + + /// Get block body. + fn body(&self, id: &BlockId) -> Result>, Error>; + + /// Get block justification. + fn justification(&self, id: &BlockId) -> Result, Error>; + + /// Get block header proof. + fn header_proof( + &self, + block_number: ::Number, + ) -> Result<(Block::Header, Vec>), Error>; + + /// Get storage read execution proof. + fn read_proof(&self, block: &Block::Hash, key: &[u8]) -> Result>, Error>; + + /// Get method execution proof. + fn execution_proof( + &self, + block: &Block::Hash, + method: &str, + data: &[u8], + ) -> Result<(Vec, Vec>), Error>; + + /// Get key changes proof. + fn key_changes_proof( + &self, + first: Block::Hash, + last: Block::Hash, + min: Block::Hash, + max: Block::Hash, + key: &StorageKey, + ) -> Result, Error>; + + /// Returns `true` if the given `block` is a descendent of `base`. + fn is_descendent_of(&self, base: &Block::Hash, block: &Block::Hash) -> Result; } -impl Client for SubstrateClient where - B: client::backend::Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + 'static, - Self: BlockImport, - Block: BlockT, - RA: Send + Sync +impl Client for SubstrateClient +where + B: client::backend::Backend + Send + Sync + 'static, + E: CallExecutor + Send + Sync + 'static, + Self: BlockImport, + Block: BlockT, + RA: Send + Sync, { - fn info(&self) -> Result, Error> { - (self as &SubstrateClient).info() - } - - fn block_status(&self, id: &BlockId) -> Result { - (self as &SubstrateClient).block_status(id) - } - - fn block_hash(&self, block_number: ::Number) -> Result, Error> { - (self as &SubstrateClient).block_hash(block_number) - } - - fn header(&self, id: &BlockId) -> Result, Error> { - (self as &SubstrateClient).header(id) - } - - fn body(&self, id: &BlockId) -> Result>, Error> { - (self as &SubstrateClient).body(id) - } - - fn justification(&self, id: &BlockId) -> Result, Error> { - (self as &SubstrateClient).justification(id) - } - - fn header_proof(&self, block_number: ::Number) -> Result<(Block::Header, Vec>), Error> { - (self as &SubstrateClient).header_proof(&BlockId::Number(block_number)) - } - - fn read_proof(&self, block: &Block::Hash, key: &[u8]) -> Result>, Error> { - (self as &SubstrateClient).read_proof(&BlockId::Hash(block.clone()), key) - } - - fn execution_proof(&self, block: &Block::Hash, method: &str, data: &[u8]) -> Result<(Vec, Vec>), Error> { - (self as &SubstrateClient).execution_proof(&BlockId::Hash(block.clone()), method, data) - } - - fn key_changes_proof( - &self, - first: Block::Hash, - last: Block::Hash, - min: Block::Hash, - max: Block::Hash, - key: &StorageKey - ) -> Result, Error> { - (self as &SubstrateClient).key_changes_proof(first, last, min, max, key) - } - - fn is_descendent_of(&self, base: &Block::Hash, block: &Block::Hash) -> Result { - if base == block { - return Ok(false); - } - - let tree_route = ::client::blockchain::tree_route( - self.backend().blockchain(), - BlockId::Hash(*block), - BlockId::Hash(*base), - )?; - - Ok(tree_route.common_block().hash == *base) - } + fn info(&self) -> Result, Error> { + (self as &SubstrateClient).info() + } + + fn block_status(&self, id: &BlockId) -> Result { + (self as &SubstrateClient).block_status(id) + } + + fn block_hash( + &self, + block_number: ::Number, + ) -> Result, Error> { + (self as &SubstrateClient).block_hash(block_number) + } + + fn header(&self, id: &BlockId) -> Result, Error> { + (self as &SubstrateClient).header(id) + } + + fn body(&self, id: &BlockId) -> Result>, Error> { + (self as &SubstrateClient).body(id) + } + + fn justification(&self, id: &BlockId) -> Result, Error> { + (self as &SubstrateClient).justification(id) + } + + fn header_proof( + &self, + block_number: ::Number, + ) -> Result<(Block::Header, Vec>), Error> { + (self as &SubstrateClient).header_proof(&BlockId::Number(block_number)) + } + + fn read_proof(&self, block: &Block::Hash, key: &[u8]) -> Result>, Error> { + (self as &SubstrateClient).read_proof(&BlockId::Hash(block.clone()), key) + } + + fn execution_proof( + &self, + block: &Block::Hash, + method: &str, + data: &[u8], + ) -> Result<(Vec, Vec>), Error> { + (self as &SubstrateClient).execution_proof( + &BlockId::Hash(block.clone()), + method, + data, + ) + } + + fn key_changes_proof( + &self, + first: Block::Hash, + last: Block::Hash, + min: Block::Hash, + max: Block::Hash, + key: &StorageKey, + ) -> Result, Error> { + (self as &SubstrateClient).key_changes_proof(first, last, min, max, key) + } + + fn is_descendent_of(&self, base: &Block::Hash, block: &Block::Hash) -> Result { + if base == block { + return Ok(false); + } + + let tree_route = ::client::blockchain::tree_route( + self.backend().blockchain(), + BlockId::Hash(*block), + BlockId::Hash(*base), + )?; + + Ok(tree_route.common_block().hash == *base) + } } diff --git a/core/network/src/config.rs b/core/network/src/config.rs index 2491fc21c4..e96d68c2a6 100644 --- a/core/network/src/config.rs +++ b/core/network/src/config.rs @@ -16,69 +16,67 @@ //! Configuration for the networking layer of Substrate. -pub use network_libp2p::{NonReservedPeerMode, NetworkConfiguration, NodeKeyConfig, Secret}; +pub use network_libp2p::{NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, Secret}; -use bitflags::bitflags; use crate::chain::Client; -use parity_codec; use crate::on_demand::OnDemandService; -use runtime_primitives::traits::{Block as BlockT}; use crate::service::{ExHashT, TransactionPool}; +use bitflags::bitflags; +use parity_codec; +use runtime_primitives::traits::Block as BlockT; use std::sync::Arc; /// Service initialization parameters. pub struct Params { - /// Configuration. - pub config: ProtocolConfig, - /// Network layer configuration. - pub network_config: NetworkConfiguration, - /// Substrate relay chain access point. - pub chain: Arc>, - /// On-demand service reference. - pub on_demand: Option>>, - /// Transaction pool. - pub transaction_pool: Arc>, - /// Protocol specialization. - pub specialization: S, + /// Configuration. + pub config: ProtocolConfig, + /// Network layer configuration. + pub network_config: NetworkConfiguration, + /// Substrate relay chain access point. + pub chain: Arc>, + /// On-demand service reference. + pub on_demand: Option>>, + /// Transaction pool. + pub transaction_pool: Arc>, + /// Protocol specialization. + pub specialization: S, } /// Configuration for the Substrate-specific part of the networking layer. #[derive(Clone)] pub struct ProtocolConfig { - /// Assigned roles. - pub roles: Roles, + /// Assigned roles. + pub roles: Roles, } impl Default for ProtocolConfig { - fn default() -> ProtocolConfig { - ProtocolConfig { - roles: Roles::FULL, - } - } + fn default() -> ProtocolConfig { + ProtocolConfig { roles: Roles::FULL } + } } bitflags! { - /// Bitmask of the roles that a node fulfills. - pub struct Roles: u8 { - /// No network. - const NONE = 0b00000000; - /// Full node, does not participate in consensus. - const FULL = 0b00000001; - /// Light client node. - const LIGHT = 0b00000010; - /// Act as an authority - const AUTHORITY = 0b00000100; - } + /// Bitmask of the roles that a node fulfills. + pub struct Roles: u8 { + /// No network. + const NONE = 0b00000000; + /// Full node, does not participate in consensus. + const FULL = 0b00000001; + /// Light client node. + const LIGHT = 0b00000010; + /// Act as an authority + const AUTHORITY = 0b00000100; + } } impl parity_codec::Encode for Roles { - fn encode_to(&self, dest: &mut T) { - dest.push_byte(self.bits()) - } + fn encode_to(&self, dest: &mut T) { + dest.push_byte(self.bits()) + } } impl parity_codec::Decode for Roles { - fn decode(input: &mut I) -> Option { - Self::from_bits(input.read_byte()?) - } + fn decode(input: &mut I) -> Option { + Self::from_bits(input.read_byte()?) + } } diff --git a/core/network/src/consensus_gossip.rs b/core/network/src/consensus_gossip.rs index 433dd5b795..1a49d3622e 100644 --- a/core/network/src/consensus_gossip.rs +++ b/core/network/src/consensus_gossip.rs @@ -17,512 +17,597 @@ //! Utility for gossip of network messages between authorities. //! Handles chain-specific and standard BFT messages. -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use log::{trace, debug}; +use crate::config::Roles; +pub use crate::message::generic::{ConsensusMessage, Message}; +use crate::protocol::Context; use futures::sync::mpsc; -use rand::{self, seq::SliceRandom}; +use log::{debug, trace}; use lru_cache::LruCache; -use network_libp2p::{Severity, PeerId}; +use network_libp2p::{PeerId, Severity}; +use rand::{self, seq::SliceRandom}; use runtime_primitives::traits::{Block as BlockT, Hash, HashFor}; use runtime_primitives::ConsensusEngineId; -pub use crate::message::generic::{Message, ConsensusMessage}; -use crate::protocol::Context; -use crate::config::Roles; +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; // FIXME: Add additional spam/DoS attack protection: https://github.com/paritytech/substrate/issues/1115 const KNOWN_MESSAGES_CACHE_SIZE: usize = 4096; struct PeerConsensus { - known_messages: HashSet, - is_authority: bool, + known_messages: HashSet, + is_authority: bool, } #[derive(Clone, Copy)] enum Status { - Live, - Future, + Live, + Future, } struct MessageEntry { - message_hash: B::Hash, - topic: B::Hash, - message: ConsensusMessage, - status: Status, + message_hash: B::Hash, + topic: B::Hash, + message: ConsensusMessage, + status: Status, } /// Message validation result. pub enum ValidationResult { - /// Message is valid with this topic. - Valid(H), - /// Message is future with this topic. - Future(H), - /// Invalid message. - Invalid, - /// Obsolete message. - Expired, + /// Message is valid with this topic. + Valid(H), + /// Message is future with this topic. + Future(H), + /// Invalid message. + Invalid, + /// Obsolete message. + Expired, } /// Validates consensus messages. pub trait Validator { - /// Validate consensus message. - fn validate(&self, data: &[u8]) -> ValidationResult; - - /// Produce a closure for validating messages on a given topic. - fn message_expired<'a>(&'a self) -> Box bool + 'a> { - Box::new(move |_topic, data| match self.validate(data) { - ValidationResult::Valid(_) | ValidationResult::Future(_) => false, - ValidationResult::Invalid | ValidationResult::Expired => true, - }) - } + /// Validate consensus message. + fn validate(&self, data: &[u8]) -> ValidationResult; + + /// Produce a closure for validating messages on a given topic. + fn message_expired<'a>(&'a self) -> Box bool + 'a> { + Box::new(move |_topic, data| match self.validate(data) { + ValidationResult::Valid(_) | ValidationResult::Future(_) => false, + ValidationResult::Invalid | ValidationResult::Expired => true, + }) + } } /// Consensus network protocol handler. Manages statements and candidate requests. pub struct ConsensusGossip { - peers: HashMap>, - live_message_sinks: HashMap<(ConsensusEngineId, B::Hash), Vec>>>, - messages: Vec>, - known_messages: LruCache, - validators: HashMap>>, + peers: HashMap>, + live_message_sinks: HashMap<(ConsensusEngineId, B::Hash), Vec>>>, + messages: Vec>, + known_messages: LruCache, + validators: HashMap>>, } impl ConsensusGossip { - /// Create a new instance. - pub fn new() -> Self { - ConsensusGossip { - peers: HashMap::new(), - live_message_sinks: HashMap::new(), - messages: Default::default(), - known_messages: LruCache::new(KNOWN_MESSAGES_CACHE_SIZE), - validators: Default::default(), - } - } - - /// Closes all notification streams. - pub fn abort(&mut self) { - self.live_message_sinks.clear(); - } - - /// Register message validator for a message type. - pub fn register_validator(&mut self, engine_id: ConsensusEngineId, validator: Arc>) { - self.validators.insert(engine_id, validator); - } - - /// Handle new connected peer. - pub fn new_peer(&mut self, protocol: &mut Context, who: PeerId, roles: Roles) { - if roles.intersects(Roles::AUTHORITY) { - trace!(target:"gossip", "Registering {:?} {}", roles, who); - // Send out all known messages to authorities. - let mut known_messages = HashSet::new(); - for entry in self.messages.iter() { - if let Status::Future = entry.status { continue } - - known_messages.insert(entry.message_hash); - protocol.send_message(who.clone(), Message::Consensus(entry.message.clone())); - } - self.peers.insert(who, PeerConsensus { - known_messages, - is_authority: true, - }); - } - else if roles.intersects(Roles::FULL) { - self.peers.insert(who, PeerConsensus { - known_messages: HashSet::new(), - is_authority: false, - }); - } - } - - fn propagate( - &mut self, - protocol: &mut Context, - message_hash: B::Hash, - get_message: F, - force: bool, - ) - where F: Fn() -> ConsensusMessage, - { - let mut non_authorities: Vec<_> = self.peers.iter() - .filter_map(|(id, ref peer)| - if !peer.is_authority && (!peer.known_messages.contains(&message_hash) || force) { - Some(id.clone()) - } else { - None - } - ) - .collect(); - - non_authorities.shuffle(&mut rand::thread_rng()); - let non_authorities: HashSet<_> = if non_authorities.is_empty() { - HashSet::new() - } else { - non_authorities[0..non_authorities.len().min(((non_authorities.len() as f64).sqrt() as usize).max(3))].iter().collect() - }; - - for (id, ref mut peer) in self.peers.iter_mut() { - if peer.is_authority { - if peer.known_messages.insert(message_hash.clone()) || force { - let message = get_message(); - trace!(target:"gossip", "Propagating to authority {}: {:?}", id, message); - protocol.send_message(id.clone(), Message::Consensus(message)); - } - } else if non_authorities.contains(&id) { - let message = get_message(); - trace!(target:"gossip", "Propagating to {}: {:?}", id, message); - protocol.send_message(id.clone(), Message::Consensus(message)); - } - } - } - - fn register_message( - &mut self, - message_hash: B::Hash, - topic: B::Hash, - status: Status, - get_message: F, - ) - where F: Fn() -> ConsensusMessage - { - if self.known_messages.insert(message_hash, ()).is_none() { - self.messages.push(MessageEntry { - topic, - message_hash, - message: get_message(), - status, - }); - } - } - - /// Call when a peer has been disconnected to stop tracking gossip status. - pub fn peer_disconnected(&mut self, _protocol: &mut Context, who: PeerId) { - self.peers.remove(&who); - } - - /// Prune old or no longer relevant consensus messages. Provide a predicate - /// for pruning, which returns `false` when the items with a given topic should be pruned. - pub fn collect_garbage(&mut self) { - use std::collections::hash_map::Entry; - - self.live_message_sinks.retain(|_, sinks| { - sinks.retain(|sink| !sink.is_closed()); - !sinks.is_empty() - }); - - let known_messages = &mut self.known_messages; - let before = self.messages.len(); - let validators = &self.validators; - - let mut check_fns = HashMap::new(); - let mut message_expired = move |entry: &MessageEntry| { - let engine_id = entry.message.engine_id; - let check_fn = match check_fns.entry(engine_id) { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(vacant) => match validators.get(&engine_id) { - None => return true, // treat all messages with no validator as expired - Some(validator) => vacant.insert(validator.message_expired()), - } - }; - - (check_fn)(entry.topic, &entry.message.data) - }; - - self.messages.retain(|entry| !message_expired(entry)); - - trace!(target: "gossip", "Cleaned up {} stale messages, {} left ({} known)", - before - self.messages.len(), - self.messages.len(), - known_messages.len(), - ); - - for (_, ref mut peer) in self.peers.iter_mut() { - peer.known_messages.retain(|h| known_messages.contains_key(h)); - } - } - - /// Get data of valid, incoming messages for a topic (but might have expired meanwhile) - pub fn messages_for(&mut self, engine_id: ConsensusEngineId, topic: B::Hash) - -> mpsc::UnboundedReceiver> - { - let (tx, rx) = mpsc::unbounded(); - - let validator = match self.validators.get(&engine_id) { - None => { - self.live_message_sinks.entry((engine_id, topic)).or_default().push(tx); - return rx; - } - Some(v) => v, - }; - - for entry in self.messages.iter_mut() - .filter(|e| e.topic == topic && e.message.engine_id == engine_id) - { - let live = match entry.status { - Status::Live => true, - Status::Future => match validator.validate(&entry.message.data) { - ValidationResult::Valid(_) => { - entry.status = Status::Live; - true - } - _ => { - // don't send messages considered to be future still. - // if messages are considered expired they'll be cleaned up when we - // collect garbage. - false - } - } - }; - - if live { - entry.status = Status::Live; - tx.unbounded_send(entry.message.data.clone()) - .expect("receiver known to be live; qed"); - } - } - - self.live_message_sinks.entry((engine_id, topic)).or_default().push(tx); - - rx - } - - /// Handle an incoming ConsensusMessage for topic by who via protocol. Discard message if topic - /// already known, the message is old, its source peers isn't a registered peer or the connection - /// to them is broken. Return `Some(topic, message)` if it was added to the internal queue, `None` - /// in all other cases. - pub fn on_incoming( - &mut self, - protocol: &mut Context, - who: PeerId, - message: ConsensusMessage, - ) -> Option<(B::Hash, ConsensusMessage)> { - let message_hash = HashFor::::hash(&message.data[..]); - - if self.known_messages.contains_key(&message_hash) { - trace!(target:"gossip", "Ignored already known message from {}", who); - return None; - } - - if let Some(ref mut peer) = self.peers.get_mut(&who) { - use std::collections::hash_map::Entry; - - let engine_id = message.engine_id; - // validate the message - let (topic, status) = match self.validators.get(&engine_id) - .map(|v| v.validate(&message.data)) - { - Some(ValidationResult::Valid(topic)) => (topic, Status::Live), - Some(ValidationResult::Future(topic)) => (topic, Status::Future), - Some(ValidationResult::Invalid) => { - trace!(target:"gossip", "Invalid message from {}", who); - protocol.report_peer( - who, - Severity::Bad(format!("Sent invalid consensus message")), - ); - return None; - }, - Some(ValidationResult::Expired) => { - trace!(target:"gossip", "Ignored expired message from {}", who); - return None; - }, - None => { - protocol.report_peer( - who.clone(), - Severity::Useless(format!("Sent unknown consensus engine id")), - ); - trace!(target:"gossip", "Unknown message engine id {:?} from {}", + /// Create a new instance. + pub fn new() -> Self { + ConsensusGossip { + peers: HashMap::new(), + live_message_sinks: HashMap::new(), + messages: Default::default(), + known_messages: LruCache::new(KNOWN_MESSAGES_CACHE_SIZE), + validators: Default::default(), + } + } + + /// Closes all notification streams. + pub fn abort(&mut self) { + self.live_message_sinks.clear(); + } + + /// Register message validator for a message type. + pub fn register_validator( + &mut self, + engine_id: ConsensusEngineId, + validator: Arc>, + ) { + self.validators.insert(engine_id, validator); + } + + /// Handle new connected peer. + pub fn new_peer(&mut self, protocol: &mut Context, who: PeerId, roles: Roles) { + if roles.intersects(Roles::AUTHORITY) { + trace!(target:"gossip", "Registering {:?} {}", roles, who); + // Send out all known messages to authorities. + let mut known_messages = HashSet::new(); + for entry in self.messages.iter() { + if let Status::Future = entry.status { + continue; + } + + known_messages.insert(entry.message_hash); + protocol.send_message(who.clone(), Message::Consensus(entry.message.clone())); + } + self.peers.insert( + who, + PeerConsensus { + known_messages, + is_authority: true, + }, + ); + } else if roles.intersects(Roles::FULL) { + self.peers.insert( + who, + PeerConsensus { + known_messages: HashSet::new(), + is_authority: false, + }, + ); + } + } + + fn propagate( + &mut self, + protocol: &mut Context, + message_hash: B::Hash, + get_message: F, + force: bool, + ) where + F: Fn() -> ConsensusMessage, + { + let mut non_authorities: Vec<_> = self + .peers + .iter() + .filter_map(|(id, ref peer)| { + if !peer.is_authority && (!peer.known_messages.contains(&message_hash) || force) { + Some(id.clone()) + } else { + None + } + }) + .collect(); + + non_authorities.shuffle(&mut rand::thread_rng()); + let non_authorities: HashSet<_> = if non_authorities.is_empty() { + HashSet::new() + } else { + non_authorities[0..non_authorities + .len() + .min(((non_authorities.len() as f64).sqrt() as usize).max(3))] + .iter() + .collect() + }; + + for (id, ref mut peer) in self.peers.iter_mut() { + if peer.is_authority { + if peer.known_messages.insert(message_hash.clone()) || force { + let message = get_message(); + trace!(target:"gossip", "Propagating to authority {}: {:?}", id, message); + protocol.send_message(id.clone(), Message::Consensus(message)); + } + } else if non_authorities.contains(&id) { + let message = get_message(); + trace!(target:"gossip", "Propagating to {}: {:?}", id, message); + protocol.send_message(id.clone(), Message::Consensus(message)); + } + } + } + + fn register_message( + &mut self, + message_hash: B::Hash, + topic: B::Hash, + status: Status, + get_message: F, + ) where + F: Fn() -> ConsensusMessage, + { + if self.known_messages.insert(message_hash, ()).is_none() { + self.messages.push(MessageEntry { + topic, + message_hash, + message: get_message(), + status, + }); + } + } + + /// Call when a peer has been disconnected to stop tracking gossip status. + pub fn peer_disconnected(&mut self, _protocol: &mut Context, who: PeerId) { + self.peers.remove(&who); + } + + /// Prune old or no longer relevant consensus messages. Provide a predicate + /// for pruning, which returns `false` when the items with a given topic should be pruned. + pub fn collect_garbage(&mut self) { + use std::collections::hash_map::Entry; + + self.live_message_sinks.retain(|_, sinks| { + sinks.retain(|sink| !sink.is_closed()); + !sinks.is_empty() + }); + + let known_messages = &mut self.known_messages; + let before = self.messages.len(); + let validators = &self.validators; + + let mut check_fns = HashMap::new(); + let mut message_expired = move |entry: &MessageEntry| { + let engine_id = entry.message.engine_id; + let check_fn = match check_fns.entry(engine_id) { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(vacant) => match validators.get(&engine_id) { + None => return true, // treat all messages with no validator as expired + Some(validator) => vacant.insert(validator.message_expired()), + }, + }; + + (check_fn)(entry.topic, &entry.message.data) + }; + + self.messages.retain(|entry| !message_expired(entry)); + + trace!(target: "gossip", "Cleaned up {} stale messages, {} left ({} known)", + before - self.messages.len(), + self.messages.len(), + known_messages.len(), + ); + + for (_, ref mut peer) in self.peers.iter_mut() { + peer.known_messages + .retain(|h| known_messages.contains_key(h)); + } + } + + /// Get data of valid, incoming messages for a topic (but might have expired meanwhile) + pub fn messages_for( + &mut self, + engine_id: ConsensusEngineId, + topic: B::Hash, + ) -> mpsc::UnboundedReceiver> { + let (tx, rx) = mpsc::unbounded(); + + let validator = match self.validators.get(&engine_id) { + None => { + self.live_message_sinks + .entry((engine_id, topic)) + .or_default() + .push(tx); + return rx; + } + Some(v) => v, + }; + + for entry in self + .messages + .iter_mut() + .filter(|e| e.topic == topic && e.message.engine_id == engine_id) + { + let live = match entry.status { + Status::Live => true, + Status::Future => match validator.validate(&entry.message.data) { + ValidationResult::Valid(_) => { + entry.status = Status::Live; + true + } + _ => { + // don't send messages considered to be future still. + // if messages are considered expired they'll be cleaned up when we + // collect garbage. + false + } + }, + }; + + if live { + entry.status = Status::Live; + tx.unbounded_send(entry.message.data.clone()) + .expect("receiver known to be live; qed"); + } + } + + self.live_message_sinks + .entry((engine_id, topic)) + .or_default() + .push(tx); + + rx + } + + /// Handle an incoming ConsensusMessage for topic by who via protocol. Discard message if topic + /// already known, the message is old, its source peers isn't a registered peer or the connection + /// to them is broken. Return `Some(topic, message)` if it was added to the internal queue, `None` + /// in all other cases. + pub fn on_incoming( + &mut self, + protocol: &mut Context, + who: PeerId, + message: ConsensusMessage, + ) -> Option<(B::Hash, ConsensusMessage)> { + let message_hash = HashFor::::hash(&message.data[..]); + + if self.known_messages.contains_key(&message_hash) { + trace!(target:"gossip", "Ignored already known message from {}", who); + return None; + } + + if let Some(ref mut peer) = self.peers.get_mut(&who) { + use std::collections::hash_map::Entry; + + let engine_id = message.engine_id; + // validate the message + let (topic, status) = match self + .validators + .get(&engine_id) + .map(|v| v.validate(&message.data)) + { + Some(ValidationResult::Valid(topic)) => (topic, Status::Live), + Some(ValidationResult::Future(topic)) => (topic, Status::Future), + Some(ValidationResult::Invalid) => { + trace!(target:"gossip", "Invalid message from {}", who); + protocol.report_peer( + who, + Severity::Bad(format!("Sent invalid consensus message")), + ); + return None; + } + Some(ValidationResult::Expired) => { + trace!(target:"gossip", "Ignored expired message from {}", who); + return None; + } + None => { + protocol.report_peer( + who.clone(), + Severity::Useless(format!("Sent unknown consensus engine id")), + ); + trace!(target:"gossip", "Unknown message engine id {:?} from {}", engine_id, who); - return None; - }, - }; - - peer.known_messages.insert(message_hash); - if let Entry::Occupied(mut entry) = self.live_message_sinks.entry((engine_id, topic)) { - debug!(target: "gossip", "Pushing consensus message to sinks for {}.", topic); - entry.get_mut().retain(|sink| { - if let Err(e) = sink.unbounded_send(message.data.clone()) { - trace!(target:"gossip", "Error broadcasting message notification: {:?}", e); - } - !sink.is_closed() - }); - if entry.get().is_empty() { - entry.remove_entry(); - } - } - self.multicast_inner(protocol, message_hash, topic, status, || message.clone(), false); - Some((topic, message)) - } else { - trace!(target:"gossip", "Ignored statement from unregistered peer {}", who); - None - } - } - - /// Multicast a message to all peers. - pub fn multicast( - &mut self, - protocol: &mut Context, - topic: B::Hash, - message: ConsensusMessage, - force: bool, - ) { - let message_hash = HashFor::::hash(&message.data); - self.multicast_inner(protocol, message_hash, topic, Status::Live, || message.clone(), force); - } - - fn multicast_inner( - &mut self, - protocol: &mut Context, - message_hash: B::Hash, - topic: B::Hash, - status: Status, - get_message: F, - force: bool, - ) - where F: Fn() -> ConsensusMessage - { - self.register_message(message_hash, topic, status, &get_message); - if let Status::Live = status { - self.propagate(protocol, message_hash, get_message, force); - } - } + return None; + } + }; + + peer.known_messages.insert(message_hash); + if let Entry::Occupied(mut entry) = self.live_message_sinks.entry((engine_id, topic)) { + debug!(target: "gossip", "Pushing consensus message to sinks for {}.", topic); + entry.get_mut().retain(|sink| { + if let Err(e) = sink.unbounded_send(message.data.clone()) { + trace!(target:"gossip", "Error broadcasting message notification: {:?}", e); + } + !sink.is_closed() + }); + if entry.get().is_empty() { + entry.remove_entry(); + } + } + self.multicast_inner( + protocol, + message_hash, + topic, + status, + || message.clone(), + false, + ); + Some((topic, message)) + } else { + trace!(target:"gossip", "Ignored statement from unregistered peer {}", who); + None + } + } + + /// Multicast a message to all peers. + pub fn multicast( + &mut self, + protocol: &mut Context, + topic: B::Hash, + message: ConsensusMessage, + force: bool, + ) { + let message_hash = HashFor::::hash(&message.data); + self.multicast_inner( + protocol, + message_hash, + topic, + Status::Live, + || message.clone(), + force, + ); + } + + fn multicast_inner( + &mut self, + protocol: &mut Context, + message_hash: B::Hash, + topic: B::Hash, + status: Status, + get_message: F, + force: bool, + ) where + F: Fn() -> ConsensusMessage, + { + self.register_message(message_hash, topic, status, &get_message); + if let Status::Live = status { + self.propagate(protocol, message_hash, get_message, force); + } + } } #[cfg(test)] mod tests { - use runtime_primitives::testing::{H256, Block as RawBlock, ExtrinsicWrapper}; - use futures::Stream; - - use super::*; - - type Block = RawBlock>; - - macro_rules! push_msg { - ($consensus:expr, $topic:expr, $hash: expr, $m:expr) => { - if $consensus.known_messages.insert($hash, ()).is_none() { - $consensus.messages.push(MessageEntry { - topic: $topic, - message_hash: $hash, - message: ConsensusMessage { data: $m, engine_id: [0, 0, 0, 0] }, - status: Status::Live, - }); - } - } - } - - struct AllowAll; - impl Validator for AllowAll { - fn validate(&self, _data: &[u8]) -> ValidationResult { - ValidationResult::Valid(H256::default()) - } - } - - #[test] - fn collects_garbage() { - struct AllowOne; - impl Validator for AllowOne { - fn validate(&self, data: &[u8]) -> ValidationResult { - if data[0] == 1 { - ValidationResult::Valid(H256::default()) - } else { - ValidationResult::Expired - } - } - } - - let prev_hash = H256::random(); - let best_hash = H256::random(); - let mut consensus = ConsensusGossip::::new(); - let m1_hash = H256::random(); - let m2_hash = H256::random(); - let m1 = vec![1, 2, 3]; - let m2 = vec![4, 5, 6]; - - push_msg!(consensus, prev_hash, m1_hash, m1); - push_msg!(consensus, best_hash, m2_hash, m2.clone()); - consensus.known_messages.insert(m1_hash, ()); - consensus.known_messages.insert(m2_hash, ()); - - let test_engine_id = Default::default(); - consensus.register_validator(test_engine_id, Arc::new(AllowAll)); - consensus.collect_garbage(); - assert_eq!(consensus.messages.len(), 2); - assert_eq!(consensus.known_messages.len(), 2); - - consensus.register_validator(test_engine_id, Arc::new(AllowOne)); - - // m2 is expired - consensus.collect_garbage(); - assert_eq!(consensus.messages.len(), 1); - // known messages are only pruned based on size. - assert_eq!(consensus.known_messages.len(), 2); - assert!(consensus.known_messages.contains_key(&m2_hash)); - } - - #[test] - fn message_stream_include_those_sent_before_asking_for_stream() { - use futures::Stream; - - let mut consensus = ConsensusGossip::::new(); - consensus.register_validator([0, 0, 0, 0], Arc::new(AllowAll)); - - let message = ConsensusMessage { data: vec![4, 5, 6], engine_id: [0, 0, 0, 0] }; - - let message_hash = HashFor::::hash(&message.data); - let topic = HashFor::::hash(&[1,2,3]); - - consensus.register_message(message_hash, topic, Status::Live, || message.clone()); - let stream = consensus.messages_for([0, 0, 0, 0], topic); - - assert_eq!(stream.wait().next(), Some(Ok(message.data))); - } - - #[test] - fn can_keep_multiple_messages_per_topic() { - let mut consensus = ConsensusGossip::::new(); - - let topic = [1; 32].into(); - let msg_a = ConsensusMessage { data: vec![1, 2, 3], engine_id: [0, 0, 0, 0] }; - let msg_b = ConsensusMessage { data: vec![4, 5, 6], engine_id: [0, 0, 0, 0] }; - - consensus.register_message(HashFor::::hash(&msg_a.data), topic, Status::Live, || msg_a.clone()); - consensus.register_message(HashFor::::hash(&msg_b.data), topic, Status::Live, || msg_b.clone()); - - assert_eq!(consensus.messages.len(), 2); - } - - #[test] - fn can_keep_multiple_subscribers_per_topic() { - let mut consensus = ConsensusGossip::::new(); - consensus.register_validator([0, 0, 0, 0], Arc::new(AllowAll)); - - let message = ConsensusMessage { data: vec![4, 5, 6], engine_id: [0, 0, 0, 0] }; - - let message_hash = HashFor::::hash(&message.data); - let topic = HashFor::::hash(&[1,2,3]); - - consensus.register_message(message_hash, topic, Status::Live, || message.clone()); - - let stream1 = consensus.messages_for([0, 0, 0, 0], topic); - let stream2 = consensus.messages_for([0, 0, 0, 0], topic); - - assert_eq!(stream1.wait().next(), Some(Ok(message.data.clone()))); - assert_eq!(stream2.wait().next(), Some(Ok(message.data))); - } - - #[test] - fn topics_are_localized_to_engine_id() { - let mut consensus = ConsensusGossip::::new(); - consensus.register_validator([0, 0, 0, 0], Arc::new(AllowAll)); - - let topic = [1; 32].into(); - let msg_a = ConsensusMessage { data: vec![1, 2, 3], engine_id: [0, 0, 0, 0] }; - let msg_b = ConsensusMessage { data: vec![4, 5, 6], engine_id: [0, 0, 0, 1] }; - - consensus.register_message(HashFor::::hash(&msg_a.data), topic, Status::Live, || msg_a.clone()); - consensus.register_message(HashFor::::hash(&msg_b.data), topic, Status::Live, || msg_b.clone()); - - let mut stream = consensus.messages_for([0, 0, 0, 0], topic).wait(); - - assert_eq!(stream.next(), Some(Ok(vec![1, 2, 3]))); - let _ = consensus.live_message_sinks.remove(&([0, 0, 0, 0], topic)); - assert_eq!(stream.next(), None); - } + use futures::Stream; + use runtime_primitives::testing::{Block as RawBlock, ExtrinsicWrapper, H256}; + + use super::*; + + type Block = RawBlock>; + + macro_rules! push_msg { + ($consensus:expr, $topic:expr, $hash: expr, $m:expr) => { + if $consensus.known_messages.insert($hash, ()).is_none() { + $consensus.messages.push(MessageEntry { + topic: $topic, + message_hash: $hash, + message: ConsensusMessage { + data: $m, + engine_id: [0, 0, 0, 0], + }, + status: Status::Live, + }); + } + }; + } + + struct AllowAll; + impl Validator for AllowAll { + fn validate(&self, _data: &[u8]) -> ValidationResult { + ValidationResult::Valid(H256::default()) + } + } + + #[test] + fn collects_garbage() { + struct AllowOne; + impl Validator for AllowOne { + fn validate(&self, data: &[u8]) -> ValidationResult { + if data[0] == 1 { + ValidationResult::Valid(H256::default()) + } else { + ValidationResult::Expired + } + } + } + + let prev_hash = H256::random(); + let best_hash = H256::random(); + let mut consensus = ConsensusGossip::::new(); + let m1_hash = H256::random(); + let m2_hash = H256::random(); + let m1 = vec![1, 2, 3]; + let m2 = vec![4, 5, 6]; + + push_msg!(consensus, prev_hash, m1_hash, m1); + push_msg!(consensus, best_hash, m2_hash, m2.clone()); + consensus.known_messages.insert(m1_hash, ()); + consensus.known_messages.insert(m2_hash, ()); + + let test_engine_id = Default::default(); + consensus.register_validator(test_engine_id, Arc::new(AllowAll)); + consensus.collect_garbage(); + assert_eq!(consensus.messages.len(), 2); + assert_eq!(consensus.known_messages.len(), 2); + + consensus.register_validator(test_engine_id, Arc::new(AllowOne)); + + // m2 is expired + consensus.collect_garbage(); + assert_eq!(consensus.messages.len(), 1); + // known messages are only pruned based on size. + assert_eq!(consensus.known_messages.len(), 2); + assert!(consensus.known_messages.contains_key(&m2_hash)); + } + + #[test] + fn message_stream_include_those_sent_before_asking_for_stream() { + use futures::Stream; + + let mut consensus = ConsensusGossip::::new(); + consensus.register_validator([0, 0, 0, 0], Arc::new(AllowAll)); + + let message = ConsensusMessage { + data: vec![4, 5, 6], + engine_id: [0, 0, 0, 0], + }; + + let message_hash = HashFor::::hash(&message.data); + let topic = HashFor::::hash(&[1, 2, 3]); + + consensus.register_message(message_hash, topic, Status::Live, || message.clone()); + let stream = consensus.messages_for([0, 0, 0, 0], topic); + + assert_eq!(stream.wait().next(), Some(Ok(message.data))); + } + + #[test] + fn can_keep_multiple_messages_per_topic() { + let mut consensus = ConsensusGossip::::new(); + + let topic = [1; 32].into(); + let msg_a = ConsensusMessage { + data: vec![1, 2, 3], + engine_id: [0, 0, 0, 0], + }; + let msg_b = ConsensusMessage { + data: vec![4, 5, 6], + engine_id: [0, 0, 0, 0], + }; + + consensus.register_message( + HashFor::::hash(&msg_a.data), + topic, + Status::Live, + || msg_a.clone(), + ); + consensus.register_message( + HashFor::::hash(&msg_b.data), + topic, + Status::Live, + || msg_b.clone(), + ); + + assert_eq!(consensus.messages.len(), 2); + } + + #[test] + fn can_keep_multiple_subscribers_per_topic() { + let mut consensus = ConsensusGossip::::new(); + consensus.register_validator([0, 0, 0, 0], Arc::new(AllowAll)); + + let message = ConsensusMessage { + data: vec![4, 5, 6], + engine_id: [0, 0, 0, 0], + }; + + let message_hash = HashFor::::hash(&message.data); + let topic = HashFor::::hash(&[1, 2, 3]); + + consensus.register_message(message_hash, topic, Status::Live, || message.clone()); + + let stream1 = consensus.messages_for([0, 0, 0, 0], topic); + let stream2 = consensus.messages_for([0, 0, 0, 0], topic); + + assert_eq!(stream1.wait().next(), Some(Ok(message.data.clone()))); + assert_eq!(stream2.wait().next(), Some(Ok(message.data))); + } + + #[test] + fn topics_are_localized_to_engine_id() { + let mut consensus = ConsensusGossip::::new(); + consensus.register_validator([0, 0, 0, 0], Arc::new(AllowAll)); + + let topic = [1; 32].into(); + let msg_a = ConsensusMessage { + data: vec![1, 2, 3], + engine_id: [0, 0, 0, 0], + }; + let msg_b = ConsensusMessage { + data: vec![4, 5, 6], + engine_id: [0, 0, 0, 1], + }; + + consensus.register_message( + HashFor::::hash(&msg_a.data), + topic, + Status::Live, + || msg_a.clone(), + ); + consensus.register_message( + HashFor::::hash(&msg_b.data), + topic, + Status::Live, + || msg_b.clone(), + ); + + let mut stream = consensus.messages_for([0, 0, 0, 0], topic).wait(); + + assert_eq!(stream.next(), Some(Ok(vec![1, 2, 3]))); + let _ = consensus.live_message_sinks.remove(&([0, 0, 0, 0], topic)); + assert_eq!(stream.next(), None); + } } diff --git a/core/network/src/error.rs b/core/network/src/error.rs index bf687f9969..c40d4993a5 100644 --- a/core/network/src/error.rs +++ b/core/network/src/error.rs @@ -20,19 +20,19 @@ // https://github.com/paritytech/substrate/issues/1547 #![allow(deprecated)] +use client; use error_chain::*; use std::io::Error as IoError; -use client; error_chain! { - foreign_links { - Io(IoError) #[doc = "IO error."]; - } + foreign_links { + Io(IoError) #[doc = "IO error."]; + } - links { - Client(client::error::Error, client::error::ErrorKind) #[doc="Client error"]; - } + links { + Client(client::error::Error, client::error::ErrorKind) #[doc="Client error"]; + } - errors { - } + errors { + } } diff --git a/core/network/src/lib.rs b/core/network/src/lib.rs index 0200494517..9a7e194139 100644 --- a/core/network/src/lib.rs +++ b/core/network/src/lib.rs @@ -24,32 +24,32 @@ mod service; mod sync; #[macro_use] mod protocol; -mod chain; mod blocks; -mod on_demand; -mod util; +mod chain; pub mod config; pub mod consensus_gossip; pub mod error; pub mod message; +mod on_demand; pub mod specialization; +mod util; #[cfg(any(test, feature = "test-helpers"))] pub mod test; pub use chain::Client as ClientHandle; -pub use service::{Service, FetchFuture, TransactionPool, ManageNetwork, NetworkMsg, SyncProvider, ExHashT}; -pub use protocol::{ProtocolStatus, PeerInfo, Context}; -pub use sync::{Status as SyncStatus, SyncState}; +pub use error::Error; +pub use message::{generic as generic_message, RequestId, Status as StatusMessage}; pub use network_libp2p::{ - identity, multiaddr, - ProtocolId, Severity, Multiaddr, - NetworkState, NetworkStatePeer, NetworkStateNotConnectedPeer, NetworkStatePeerEndpoint, - NodeKeyConfig, Secret, Secp256k1Secret, Ed25519Secret, - build_multiaddr, PeerId, PublicKey + build_multiaddr, identity, multiaddr, Ed25519Secret, Multiaddr, NetworkState, + NetworkStateNotConnectedPeer, NetworkStatePeer, NetworkStatePeerEndpoint, NodeKeyConfig, + PeerId, ProtocolId, PublicKey, Secp256k1Secret, Secret, Severity, }; -pub use message::{generic as generic_message, RequestId, Status as StatusMessage}; -pub use error::Error; pub use on_demand::{OnDemand, OnDemandService, RemoteResponse}; +pub use protocol::{Context, PeerInfo, ProtocolStatus}; #[doc(hidden)] pub use runtime_primitives::traits::Block as BlockT; +pub use service::{ + ExHashT, FetchFuture, ManageNetwork, NetworkMsg, Service, SyncProvider, TransactionPool, +}; +pub use sync::{Status as SyncStatus, SyncState}; diff --git a/core/network/src/message.rs b/core/network/src/message.rs index d0f697b8ab..8310590d99 100644 --- a/core/network/src/message.rs +++ b/core/network/src/message.rs @@ -16,14 +16,15 @@ //! Network packet message types. These get serialized and put into the lower level protocol payload. -use bitflags::bitflags; -use runtime_primitives::{ConsensusEngineId, traits::{Block as BlockT, Header as HeaderT}}; -use parity_codec::{Encode, Decode, Input, Output}; pub use self::generic::{ - BlockAnnounce, RemoteCallRequest, RemoteReadRequest, - RemoteHeaderRequest, RemoteHeaderResponse, - RemoteChangesRequest, RemoteChangesResponse, - FromBlock + BlockAnnounce, FromBlock, RemoteCallRequest, RemoteChangesRequest, RemoteChangesResponse, + RemoteHeaderRequest, RemoteHeaderResponse, RemoteReadRequest, +}; +use bitflags::bitflags; +use parity_codec::{Decode, Encode, Input, Output}; +use runtime_primitives::{ + traits::{Block as BlockT, Header as HeaderT}, + ConsensusEngineId, }; /// A unique ID of a request. @@ -31,337 +32,328 @@ pub type RequestId = u64; /// Type alias for using the message type using block type parameters. pub type Message = generic::Message< - ::Header, - ::Hash, - <::Header as HeaderT>::Number, - ::Extrinsic, + ::Header, + ::Hash, + <::Header as HeaderT>::Number, + ::Extrinsic, >; /// Type alias for using the status type using block type parameters. -pub type Status = generic::Status< - ::Hash, - <::Header as HeaderT>::Number, ->; +pub type Status = + generic::Status<::Hash, <::Header as HeaderT>::Number>; /// Type alias for using the block request type using block type parameters. -pub type BlockRequest = generic::BlockRequest< - ::Hash, - <::Header as HeaderT>::Number, ->; +pub type BlockRequest = + generic::BlockRequest<::Hash, <::Header as HeaderT>::Number>; /// Type alias for using the BlockData type using block type parameters. -pub type BlockData = generic::BlockData< - ::Header, - ::Hash, - ::Extrinsic, ->; +pub type BlockData = + generic::BlockData<::Header, ::Hash, ::Extrinsic>; /// Type alias for using the BlockResponse type using block type parameters. -pub type BlockResponse = generic::BlockResponse< - ::Header, - ::Hash, - ::Extrinsic, ->; +pub type BlockResponse = + generic::BlockResponse<::Header, ::Hash, ::Extrinsic>; /// A set of transactions. pub type Transactions = Vec; /// Bits of block data and associated artifacts to request. bitflags! { - /// Node roles bitmask. - pub struct BlockAttributes: u8 { - /// Include block header. - const HEADER = 0b00000001; - /// Include block body. - const BODY = 0b00000010; - /// Include block receipt. - const RECEIPT = 0b00000100; - /// Include block message queue. - const MESSAGE_QUEUE = 0b00001000; - /// Include a justification for the block. - const JUSTIFICATION = 0b00010000; - } + /// Node roles bitmask. + pub struct BlockAttributes: u8 { + /// Include block header. + const HEADER = 0b00000001; + /// Include block body. + const BODY = 0b00000010; + /// Include block receipt. + const RECEIPT = 0b00000100; + /// Include block message queue. + const MESSAGE_QUEUE = 0b00001000; + /// Include a justification for the block. + const JUSTIFICATION = 0b00010000; + } } impl Encode for BlockAttributes { - fn encode_to(&self, dest: &mut T) { - dest.push_byte(self.bits()) - } + fn encode_to(&self, dest: &mut T) { + dest.push_byte(self.bits()) + } } impl Decode for BlockAttributes { - fn decode(input: &mut I) -> Option { - Self::from_bits(input.read_byte()?) - } + fn decode(input: &mut I) -> Option { + Self::from_bits(input.read_byte()?) + } } #[derive(Debug, PartialEq, Eq, Clone, Copy, Encode, Decode)] /// Block enumeration direction. pub enum Direction { - /// Enumerate in ascending order (from child to parent). - Ascending = 0, - /// Enumerate in descendfing order (from parent to canonical child). - Descending = 1, + /// Enumerate in ascending order (from child to parent). + Ascending = 0, + /// Enumerate in descendfing order (from parent to canonical child). + Descending = 1, } /// Remote call response. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct RemoteCallResponse { - /// Id of a request this response was made for. - pub id: RequestId, - /// Execution proof. - pub proof: Vec>, + /// Id of a request this response was made for. + pub id: RequestId, + /// Execution proof. + pub proof: Vec>, } #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] /// Remote read response. pub struct RemoteReadResponse { - /// Id of a request this response was made for. - pub id: RequestId, - /// Read proof. - pub proof: Vec>, + /// Id of a request this response was made for. + pub id: RequestId, + /// Read proof. + pub proof: Vec>, } /// Generic types. pub mod generic { - use parity_codec::{Encode, Decode}; - use network_libp2p::{CustomMessage, CustomMessageId}; - use runtime_primitives::Justification; - use crate::config::Roles; - use super::{ - BlockAttributes, RemoteCallResponse, RemoteReadResponse, - RequestId, Transactions, Direction, ConsensusEngineId, - }; - /// Consensus is mostly opaque to us - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - pub struct ConsensusMessage { - /// Identifies consensus engine. - pub engine_id: ConsensusEngineId, - /// Message payload. - pub data: Vec, - } - - /// Block data sent in the response. - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - pub struct BlockData { - /// Block header hash. - pub hash: Hash, - /// Block header if requested. - pub header: Option

, - /// Block body if requested. - pub body: Option>, - /// Block receipt if requested. - pub receipt: Option>, - /// Block message queue if requested. - pub message_queue: Option>, - /// Justification if requested. - pub justification: Option, - } - - /// Identifies starting point of a block sequence. - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - pub enum FromBlock { - /// Start with given hash. - Hash(Hash), - /// Start with given block number. - Number(Number), - } - - /// A network message. - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - pub enum Message { - /// Status packet. - Status(Status), - /// Block request. - BlockRequest(BlockRequest), - /// Block response. - BlockResponse(BlockResponse), - /// Block announce. - BlockAnnounce(BlockAnnounce
), - /// Transactions. - Transactions(Transactions), - /// Consensus protocol message. - Consensus(ConsensusMessage), - /// Remote method call request. - RemoteCallRequest(RemoteCallRequest), - /// Remote method call response. - RemoteCallResponse(RemoteCallResponse), - /// Remote storage read request. - RemoteReadRequest(RemoteReadRequest), - /// Remote storage read response. - RemoteReadResponse(RemoteReadResponse), - /// Remote header request. - RemoteHeaderRequest(RemoteHeaderRequest), - /// Remote header response. - RemoteHeaderResponse(RemoteHeaderResponse
), - /// Remote changes request. - RemoteChangesRequest(RemoteChangesRequest), - /// Remote changes reponse. - RemoteChangesResponse(RemoteChangesResponse), - /// Chain-specific message - #[codec(index = "255")] - ChainSpecific(Vec), - } - - impl CustomMessage for Message - where Self: Decode + Encode - { - fn into_bytes(self) -> Vec { - self.encode() - } - - fn from_bytes(bytes: &[u8]) -> Result { - Decode::decode(&mut &bytes[..]).ok_or(()) - } - - fn request_id(&self) -> CustomMessageId { - match *self { - Message::Status(_) => CustomMessageId::OneWay, - Message::BlockRequest(ref req) => CustomMessageId::Request(req.id), - Message::BlockResponse(ref resp) => CustomMessageId::Response(resp.id), - Message::BlockAnnounce(_) => CustomMessageId::OneWay, - Message::Transactions(_) => CustomMessageId::OneWay, - Message::Consensus(_) => CustomMessageId::OneWay, - Message::RemoteCallRequest(ref req) => CustomMessageId::Request(req.id), - Message::RemoteCallResponse(ref resp) => CustomMessageId::Response(resp.id), - Message::RemoteReadRequest(ref req) => CustomMessageId::Request(req.id), - Message::RemoteReadResponse(ref resp) => CustomMessageId::Response(resp.id), - Message::RemoteHeaderRequest(ref req) => CustomMessageId::Request(req.id), - Message::RemoteHeaderResponse(ref resp) => CustomMessageId::Response(resp.id), - Message::RemoteChangesRequest(ref req) => CustomMessageId::Request(req.id), - Message::RemoteChangesResponse(ref resp) => CustomMessageId::Response(resp.id), - Message::ChainSpecific(_) => CustomMessageId::OneWay, - } - } - } - - /// Status sent on connection. - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - pub struct Status { - /// Protocol version. - pub version: u32, - /// Minimum supported version. - pub min_supported_version: u32, - /// Supported roles. - pub roles: Roles, - /// Best block number. - pub best_number: Number, - /// Best block hash. - pub best_hash: Hash, - /// Genesis block hash. - pub genesis_hash: Hash, - /// Chain-specific status. - pub chain_status: Vec, - } - - /// Request block data from a peer. - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - pub struct BlockRequest { - /// Unique request id. - pub id: RequestId, - /// Bits of block data to request. - pub fields: BlockAttributes, - /// Start from this block. - pub from: FromBlock, - /// End at this block. An implementation defined maximum is used when unspecified. - pub to: Option, - /// Sequence direction. - pub direction: Direction, - /// Maximum number of blocks to return. An implementation defined maximum is used when unspecified. - pub max: Option, - } - - /// Response to `BlockRequest` - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - pub struct BlockResponse { - /// Id of a request this response was made for. - pub id: RequestId, - /// Block data for the requested sequence. - pub blocks: Vec>, - } - - /// Announce a new complete relay chain block on the network. - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - pub struct BlockAnnounce { - /// New block header. - pub header: H, - } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Remote call request. - pub struct RemoteCallRequest { - /// Unique request id. - pub id: RequestId, - /// Block at which to perform call. - pub block: H, - /// Method name. - pub method: String, - /// Call data. - pub data: Vec, - } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Remote storage read request. - pub struct RemoteReadRequest { - /// Unique request id. - pub id: RequestId, - /// Block at which to perform call. - pub block: H, - /// Storage key. - pub key: Vec, - } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Remote header request. - pub struct RemoteHeaderRequest { - /// Unique request id. - pub id: RequestId, - /// Block number to request header for. - pub block: N, - } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Remote header response. - pub struct RemoteHeaderResponse
{ - /// Id of a request this response was made for. - pub id: RequestId, - /// Header. None if proof generation has failed (e.g. header is unknown). - pub header: Option
, - /// Header proof. - pub proof: Vec>, - } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Remote changes request. - pub struct RemoteChangesRequest { - /// Unique request id. - pub id: RequestId, - /// Hash of the first block of the range (including first) where changes are requested. - pub first: H, - /// Hash of the last block of the range (including last) where changes are requested. - pub last: H, - /// Hash of the first block for which the requester has the changes trie root. All other - /// affected roots must be proved. - pub min: H, - /// Hash of the last block that we can use when querying changes. - pub max: H, - /// Storage key which changes are requested. - pub key: Vec, - } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Remote changes response. - pub struct RemoteChangesResponse { - /// Id of a request this response was made for. - pub id: RequestId, - /// Proof has been generated using block with this number as a max block. Should be - /// less than or equal to the RemoteChangesRequest::max block number. - pub max: N, - /// Changes proof. - pub proof: Vec>, - /// Changes tries roots missing on the requester' node. - pub roots: Vec<(N, H)>, - /// Missing changes tries roots proof. - pub roots_proof: Vec>, - } + use super::{ + BlockAttributes, ConsensusEngineId, Direction, RemoteCallResponse, RemoteReadResponse, + RequestId, Transactions, + }; + use crate::config::Roles; + use network_libp2p::{CustomMessage, CustomMessageId}; + use parity_codec::{Decode, Encode}; + use runtime_primitives::Justification; + /// Consensus is mostly opaque to us + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub struct ConsensusMessage { + /// Identifies consensus engine. + pub engine_id: ConsensusEngineId, + /// Message payload. + pub data: Vec, + } + + /// Block data sent in the response. + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub struct BlockData { + /// Block header hash. + pub hash: Hash, + /// Block header if requested. + pub header: Option
, + /// Block body if requested. + pub body: Option>, + /// Block receipt if requested. + pub receipt: Option>, + /// Block message queue if requested. + pub message_queue: Option>, + /// Justification if requested. + pub justification: Option, + } + + /// Identifies starting point of a block sequence. + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub enum FromBlock { + /// Start with given hash. + Hash(Hash), + /// Start with given block number. + Number(Number), + } + + /// A network message. + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub enum Message { + /// Status packet. + Status(Status), + /// Block request. + BlockRequest(BlockRequest), + /// Block response. + BlockResponse(BlockResponse), + /// Block announce. + BlockAnnounce(BlockAnnounce
), + /// Transactions. + Transactions(Transactions), + /// Consensus protocol message. + Consensus(ConsensusMessage), + /// Remote method call request. + RemoteCallRequest(RemoteCallRequest), + /// Remote method call response. + RemoteCallResponse(RemoteCallResponse), + /// Remote storage read request. + RemoteReadRequest(RemoteReadRequest), + /// Remote storage read response. + RemoteReadResponse(RemoteReadResponse), + /// Remote header request. + RemoteHeaderRequest(RemoteHeaderRequest), + /// Remote header response. + RemoteHeaderResponse(RemoteHeaderResponse
), + /// Remote changes request. + RemoteChangesRequest(RemoteChangesRequest), + /// Remote changes reponse. + RemoteChangesResponse(RemoteChangesResponse), + /// Chain-specific message + #[codec(index = "255")] + ChainSpecific(Vec), + } + + impl CustomMessage for Message + where + Self: Decode + Encode, + { + fn into_bytes(self) -> Vec { + self.encode() + } + + fn from_bytes(bytes: &[u8]) -> Result { + Decode::decode(&mut &bytes[..]).ok_or(()) + } + + fn request_id(&self) -> CustomMessageId { + match *self { + Message::Status(_) => CustomMessageId::OneWay, + Message::BlockRequest(ref req) => CustomMessageId::Request(req.id), + Message::BlockResponse(ref resp) => CustomMessageId::Response(resp.id), + Message::BlockAnnounce(_) => CustomMessageId::OneWay, + Message::Transactions(_) => CustomMessageId::OneWay, + Message::Consensus(_) => CustomMessageId::OneWay, + Message::RemoteCallRequest(ref req) => CustomMessageId::Request(req.id), + Message::RemoteCallResponse(ref resp) => CustomMessageId::Response(resp.id), + Message::RemoteReadRequest(ref req) => CustomMessageId::Request(req.id), + Message::RemoteReadResponse(ref resp) => CustomMessageId::Response(resp.id), + Message::RemoteHeaderRequest(ref req) => CustomMessageId::Request(req.id), + Message::RemoteHeaderResponse(ref resp) => CustomMessageId::Response(resp.id), + Message::RemoteChangesRequest(ref req) => CustomMessageId::Request(req.id), + Message::RemoteChangesResponse(ref resp) => CustomMessageId::Response(resp.id), + Message::ChainSpecific(_) => CustomMessageId::OneWay, + } + } + } + + /// Status sent on connection. + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub struct Status { + /// Protocol version. + pub version: u32, + /// Minimum supported version. + pub min_supported_version: u32, + /// Supported roles. + pub roles: Roles, + /// Best block number. + pub best_number: Number, + /// Best block hash. + pub best_hash: Hash, + /// Genesis block hash. + pub genesis_hash: Hash, + /// Chain-specific status. + pub chain_status: Vec, + } + + /// Request block data from a peer. + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub struct BlockRequest { + /// Unique request id. + pub id: RequestId, + /// Bits of block data to request. + pub fields: BlockAttributes, + /// Start from this block. + pub from: FromBlock, + /// End at this block. An implementation defined maximum is used when unspecified. + pub to: Option, + /// Sequence direction. + pub direction: Direction, + /// Maximum number of blocks to return. An implementation defined maximum is used when unspecified. + pub max: Option, + } + + /// Response to `BlockRequest` + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub struct BlockResponse { + /// Id of a request this response was made for. + pub id: RequestId, + /// Block data for the requested sequence. + pub blocks: Vec>, + } + + /// Announce a new complete relay chain block on the network. + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub struct BlockAnnounce { + /// New block header. + pub header: H, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Remote call request. + pub struct RemoteCallRequest { + /// Unique request id. + pub id: RequestId, + /// Block at which to perform call. + pub block: H, + /// Method name. + pub method: String, + /// Call data. + pub data: Vec, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Remote storage read request. + pub struct RemoteReadRequest { + /// Unique request id. + pub id: RequestId, + /// Block at which to perform call. + pub block: H, + /// Storage key. + pub key: Vec, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Remote header request. + pub struct RemoteHeaderRequest { + /// Unique request id. + pub id: RequestId, + /// Block number to request header for. + pub block: N, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Remote header response. + pub struct RemoteHeaderResponse
{ + /// Id of a request this response was made for. + pub id: RequestId, + /// Header. None if proof generation has failed (e.g. header is unknown). + pub header: Option
, + /// Header proof. + pub proof: Vec>, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Remote changes request. + pub struct RemoteChangesRequest { + /// Unique request id. + pub id: RequestId, + /// Hash of the first block of the range (including first) where changes are requested. + pub first: H, + /// Hash of the last block of the range (including last) where changes are requested. + pub last: H, + /// Hash of the first block for which the requester has the changes trie root. All other + /// affected roots must be proved. + pub min: H, + /// Hash of the last block that we can use when querying changes. + pub max: H, + /// Storage key which changes are requested. + pub key: Vec, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Remote changes response. + pub struct RemoteChangesResponse { + /// Id of a request this response was made for. + pub id: RequestId, + /// Proof has been generated using block with this number as a max block. Should be + /// less than or equal to the RemoteChangesRequest::max block number. + pub max: N, + /// Changes proof. + pub proof: Vec>, + /// Changes tries roots missing on the requester' node. + pub roots: Vec<(N, H)>, + /// Missing changes tries roots proof. + pub roots_proof: Vec>, + } } diff --git a/core/network/src/on_demand.rs b/core/network/src/on_demand.rs index 6a89014465..94842c6adf 100644 --- a/core/network/src/on_demand.rs +++ b/core/network/src/on_demand.rs @@ -16,23 +16,25 @@ //! On-demand requests service. -use std::collections::{HashMap, VecDeque}; -use std::sync::Arc; -use std::time::{Instant, Duration}; -use log::trace; -use futures::{Async, Future, Poll}; +use crate::config::Roles; +use crate::message; +use crate::service::{NetworkChan, NetworkMsg}; +use client::error::{Error as ClientError, ErrorKind as ClientErrorKind}; +use client::light::fetcher::{ + ChangesProof, FetchChecker, Fetcher, RemoteCallRequest, RemoteChangesRequest, + RemoteHeaderRequest, RemoteReadRequest, +}; use futures::sync::oneshot::{channel, Receiver, Sender as OneShotSender}; -use linked_hash_map::LinkedHashMap; +use futures::{Async, Future, Poll}; use linked_hash_map::Entry; +use linked_hash_map::LinkedHashMap; +use log::trace; +use network_libp2p::{PeerId, Severity}; use parking_lot::Mutex; -use client::{error::{Error as ClientError, ErrorKind as ClientErrorKind}}; -use client::light::fetcher::{Fetcher, FetchChecker, RemoteHeaderRequest, - RemoteCallRequest, RemoteReadRequest, RemoteChangesRequest, ChangesProof}; -use crate::message; -use network_libp2p::{Severity, PeerId}; -use crate::config::Roles; -use crate::service::{NetworkChan, NetworkMsg}; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use std::collections::{HashMap, VecDeque}; +use std::sync::Arc; +use std::time::{Duration, Instant}; /// Remote request timeout. const REQUEST_TIMEOUT: Duration = Duration::from_secs(15); @@ -41,981 +43,1208 @@ const RETRY_COUNT: usize = 1; /// On-demand service API. pub trait OnDemandService: Send + Sync { - /// When new node is connected. - fn on_connect(&self, peer: PeerId, role: Roles, best_number: NumberFor); + /// When new node is connected. + fn on_connect(&self, peer: PeerId, role: Roles, best_number: NumberFor); - /// When block is announced by the peer. - fn on_block_announce(&self, peer: PeerId, best_number: NumberFor); + /// When block is announced by the peer. + fn on_block_announce(&self, peer: PeerId, best_number: NumberFor); - /// When node is disconnected. - fn on_disconnect(&self, peer: PeerId); + /// When node is disconnected. + fn on_disconnect(&self, peer: PeerId); - /// Maintain peers requests. - fn maintain_peers(&self); + /// Maintain peers requests. + fn maintain_peers(&self); - /// When header response is received from remote node. - fn on_remote_header_response( - &self, - peer: PeerId, - response: message::RemoteHeaderResponse - ); + /// When header response is received from remote node. + fn on_remote_header_response( + &self, + peer: PeerId, + response: message::RemoteHeaderResponse, + ); - /// When read response is received from remote node. - fn on_remote_read_response(&self, peer: PeerId, response: message::RemoteReadResponse); + /// When read response is received from remote node. + fn on_remote_read_response(&self, peer: PeerId, response: message::RemoteReadResponse); - /// When call response is received from remote node. - fn on_remote_call_response(&self, peer: PeerId, response: message::RemoteCallResponse); + /// When call response is received from remote node. + fn on_remote_call_response(&self, peer: PeerId, response: message::RemoteCallResponse); - /// When changes response is received from remote node. - fn on_remote_changes_response( - &self, - peer: PeerId, - response: message::RemoteChangesResponse, Block::Hash> - ); + /// When changes response is received from remote node. + fn on_remote_changes_response( + &self, + peer: PeerId, + response: message::RemoteChangesResponse, Block::Hash>, + ); } /// On-demand requests service. Dispatches requests to appropriate peers. pub struct OnDemand { - core: Mutex>, - checker: Arc>, - network_sender: Mutex>>, + core: Mutex>, + checker: Arc>, + network_sender: Mutex>>, } /// On-demand remote call response. pub struct RemoteResponse { - receiver: Receiver>, + receiver: Receiver>, } #[derive(Default)] struct OnDemandCore { - next_request_id: u64, - pending_requests: VecDeque>, - active_peers: LinkedHashMap>, - idle_peers: VecDeque, - best_blocks: HashMap>, + next_request_id: u64, + pending_requests: VecDeque>, + active_peers: LinkedHashMap>, + idle_peers: VecDeque, + best_blocks: HashMap>, } struct Request { - id: u64, - timestamp: Instant, - retry_count: usize, - data: RequestData, + id: u64, + timestamp: Instant, + retry_count: usize, + data: RequestData, } enum RequestData { - RemoteHeader(RemoteHeaderRequest, OneShotSender>), - RemoteRead(RemoteReadRequest, OneShotSender>, ClientError>>), - RemoteCall(RemoteCallRequest, OneShotSender, ClientError>>), - RemoteChanges(RemoteChangesRequest, OneShotSender, u32)>, ClientError>>), + RemoteHeader( + RemoteHeaderRequest, + OneShotSender>, + ), + RemoteRead( + RemoteReadRequest, + OneShotSender>, ClientError>>, + ), + RemoteCall( + RemoteCallRequest, + OneShotSender, ClientError>>, + ), + RemoteChanges( + RemoteChangesRequest, + OneShotSender, u32)>, ClientError>>, + ), } enum Accept { - Ok, - CheckFailed(ClientError, RequestData), - Unexpected(RequestData), + Ok, + CheckFailed(ClientError, RequestData), + Unexpected(RequestData), } impl Future for RemoteResponse { - type Item = T; - type Error = ClientError; - - fn poll(&mut self) -> Poll { - self.receiver.poll() - .map_err(|_| ClientErrorKind::RemoteFetchCancelled.into()) - .and_then(|r| match r { - Async::Ready(Ok(ready)) => Ok(Async::Ready(ready)), - Async::Ready(Err(error)) => Err(error), - Async::NotReady => Ok(Async::NotReady), - }) - } + type Item = T; + type Error = ClientError; + + fn poll(&mut self) -> Poll { + self.receiver + .poll() + .map_err(|_| ClientErrorKind::RemoteFetchCancelled.into()) + .and_then(|r| match r { + Async::Ready(Ok(ready)) => Ok(Async::Ready(ready)), + Async::Ready(Err(error)) => Err(error), + Async::NotReady => Ok(Async::NotReady), + }) + } } -impl OnDemand where - B::Header: HeaderT, +impl OnDemand +where + B::Header: HeaderT, { - /// Creates new on-demand service. - pub fn new(checker: Arc>) -> Self { - OnDemand { - checker, - network_sender: Mutex::new(None), - core: Mutex::new(OnDemandCore { - next_request_id: 0, - pending_requests: VecDeque::new(), - active_peers: LinkedHashMap::new(), - idle_peers: VecDeque::new(), - best_blocks: HashMap::new(), - }) - } - } - - /// Sets weak reference to network service. - pub fn set_network_sender(&self, network_sender: NetworkChan) { - self.network_sender.lock().replace(network_sender); - } - - fn send(&self, msg: NetworkMsg) { - let _ = self.network_sender + /// Creates new on-demand service. + pub fn new(checker: Arc>) -> Self { + OnDemand { + checker, + network_sender: Mutex::new(None), + core: Mutex::new(OnDemandCore { + next_request_id: 0, + pending_requests: VecDeque::new(), + active_peers: LinkedHashMap::new(), + idle_peers: VecDeque::new(), + best_blocks: HashMap::new(), + }), + } + } + + /// Sets weak reference to network service. + pub fn set_network_sender(&self, network_sender: NetworkChan) { + self.network_sender.lock().replace(network_sender); + } + + fn send(&self, msg: NetworkMsg) { + let _ = self.network_sender .lock() .as_ref() .expect("1. OnDemand is passed a network sender upon initialization of the service, 2. it should bet set by now") .send(msg); - } - - /// Schedule && dispatch all scheduled requests. - fn schedule_request(&self, retry_count: Option, data: RequestData, result: R) -> R { - let mut core = self.core.lock(); - core.insert(retry_count.unwrap_or(RETRY_COUNT), data); - core.dispatch(self); - result - } - - /// Try to accept response from given peer. - fn accept_response) -> Accept>(&self, rtype: &str, peer: PeerId, request_id: u64, try_accept: F) { - let mut core = self.core.lock(); - let request = match core.remove(peer.clone(), request_id) { - Some(request) => request, - None => { - let reason = format!("Invalid remote {} response from peer", rtype); - self.send(NetworkMsg::ReportPeer(peer.clone(), Severity::Bad(reason))); - core.remove_peer(peer); - return; - }, - }; - - let retry_count = request.retry_count; - let (retry_count, retry_request_data) = match try_accept(request) { - Accept::Ok => (retry_count, None), - Accept::CheckFailed(error, retry_request_data) => { - let reason = format!("Failed to check remote {} response from peer: {}", rtype, error); - self.send(NetworkMsg::ReportPeer(peer.clone(), Severity::Bad(reason))); - core.remove_peer(peer); - - if retry_count > 0 { - (retry_count - 1, Some(retry_request_data)) - } else { - trace!(target: "sync", "Failed to get remote {} response for given number of retries", rtype); - retry_request_data.fail(ClientErrorKind::RemoteFetchFailed.into()); - (0, None) - } - }, - Accept::Unexpected(retry_request_data) => { - let reason = format!("Unexpected response to remote {} from peer", rtype); - self.send(NetworkMsg::ReportPeer(peer.clone(), Severity::Bad(reason))); - core.remove_peer(peer); - - (retry_count, Some(retry_request_data)) - }, - }; - - if let Some(request_data) = retry_request_data { - core.insert(retry_count, request_data); - } - - core.dispatch(self); - } + } + + /// Schedule && dispatch all scheduled requests. + fn schedule_request( + &self, + retry_count: Option, + data: RequestData, + result: R, + ) -> R { + let mut core = self.core.lock(); + core.insert(retry_count.unwrap_or(RETRY_COUNT), data); + core.dispatch(self); + result + } + + /// Try to accept response from given peer. + fn accept_response) -> Accept>( + &self, + rtype: &str, + peer: PeerId, + request_id: u64, + try_accept: F, + ) { + let mut core = self.core.lock(); + let request = match core.remove(peer.clone(), request_id) { + Some(request) => request, + None => { + let reason = format!("Invalid remote {} response from peer", rtype); + self.send(NetworkMsg::ReportPeer(peer.clone(), Severity::Bad(reason))); + core.remove_peer(peer); + return; + } + }; + + let retry_count = request.retry_count; + let (retry_count, retry_request_data) = match try_accept(request) { + Accept::Ok => (retry_count, None), + Accept::CheckFailed(error, retry_request_data) => { + let reason = format!( + "Failed to check remote {} response from peer: {}", + rtype, error + ); + self.send(NetworkMsg::ReportPeer(peer.clone(), Severity::Bad(reason))); + core.remove_peer(peer); + + if retry_count > 0 { + (retry_count - 1, Some(retry_request_data)) + } else { + trace!(target: "sync", "Failed to get remote {} response for given number of retries", rtype); + retry_request_data.fail(ClientErrorKind::RemoteFetchFailed.into()); + (0, None) + } + } + Accept::Unexpected(retry_request_data) => { + let reason = format!("Unexpected response to remote {} from peer", rtype); + self.send(NetworkMsg::ReportPeer(peer.clone(), Severity::Bad(reason))); + core.remove_peer(peer); + + (retry_count, Some(retry_request_data)) + } + }; + + if let Some(request_data) = retry_request_data { + core.insert(retry_count, request_data); + } + + core.dispatch(self); + } } -impl OnDemandService for OnDemand where - B: BlockT, - B::Header: HeaderT, +impl OnDemandService for OnDemand +where + B: BlockT, + B::Header: HeaderT, { - fn on_connect(&self, peer: PeerId, role: Roles, best_number: NumberFor) { - if !role.intersects(Roles::FULL | Roles::AUTHORITY) { - return; - } - - let mut core = self.core.lock(); - core.add_peer(peer, best_number); - core.dispatch(self); - } - - fn on_block_announce(&self, peer: PeerId, best_number: NumberFor) { - let mut core = self.core.lock(); - core.update_peer(peer, best_number); - core.dispatch(self); - } - - fn on_disconnect(&self, peer: PeerId) { - let mut core = self.core.lock(); - core.remove_peer(peer); - core.dispatch(self); - } - - fn maintain_peers(&self) { - let mut core = self.core.lock(); - for bad_peer in core.maintain_peers() { - self.send(NetworkMsg::ReportPeer(bad_peer, Severity::Timeout)); - } - core.dispatch(self); - } - - fn on_remote_header_response(&self, peer: PeerId, response: message::RemoteHeaderResponse) { - self.accept_response("header", peer, response.id, |request| match request.data { - RequestData::RemoteHeader(request, sender) => match self.checker.check_header_proof(&request, response.header, response.proof) { - Ok(response) => { - // we do not bother if receiver has been dropped already - let _ = sender.send(Ok(response)); - Accept::Ok - }, - Err(error) => Accept::CheckFailed(error, RequestData::RemoteHeader(request, sender)), - }, - data @ _ => Accept::Unexpected(data), - }) - } - - fn on_remote_read_response(&self, peer: PeerId, response: message::RemoteReadResponse) { - self.accept_response("read", peer, response.id, |request| match request.data { - RequestData::RemoteRead(request, sender) => match self.checker.check_read_proof(&request, response.proof) { - Ok(response) => { - // we do not bother if receiver has been dropped already - let _ = sender.send(Ok(response)); - Accept::Ok - }, - Err(error) => Accept::CheckFailed(error, RequestData::RemoteRead(request, sender)), - }, - data @ _ => Accept::Unexpected(data), - }) - } - - fn on_remote_call_response(&self, peer: PeerId, response: message::RemoteCallResponse) { - self.accept_response("call", peer, response.id, |request| match request.data { - RequestData::RemoteCall(request, sender) => match self.checker.check_execution_proof(&request, response.proof) { - Ok(response) => { - // we do not bother if receiver has been dropped already - let _ = sender.send(Ok(response)); - Accept::Ok - }, - Err(error) => Accept::CheckFailed(error, RequestData::RemoteCall(request, sender)), - }, - data @ _ => Accept::Unexpected(data), - }) - } - - fn on_remote_changes_response(&self, peer: PeerId, response: message::RemoteChangesResponse, B::Hash>) { - self.accept_response("changes", peer, response.id, |request| match request.data { - RequestData::RemoteChanges(request, sender) => match self.checker.check_changes_proof( - &request, ChangesProof { - max_block: response.max, - proof: response.proof, - roots: response.roots.into_iter().collect(), - roots_proof: response.roots_proof, - }) { - Ok(response) => { - // we do not bother if receiver has been dropped already - let _ = sender.send(Ok(response)); - Accept::Ok - }, - Err(error) => Accept::CheckFailed(error, RequestData::RemoteChanges(request, sender)), - }, - data @ _ => Accept::Unexpected(data), - }) - } + fn on_connect(&self, peer: PeerId, role: Roles, best_number: NumberFor) { + if !role.intersects(Roles::FULL | Roles::AUTHORITY) { + return; + } + + let mut core = self.core.lock(); + core.add_peer(peer, best_number); + core.dispatch(self); + } + + fn on_block_announce(&self, peer: PeerId, best_number: NumberFor) { + let mut core = self.core.lock(); + core.update_peer(peer, best_number); + core.dispatch(self); + } + + fn on_disconnect(&self, peer: PeerId) { + let mut core = self.core.lock(); + core.remove_peer(peer); + core.dispatch(self); + } + + fn maintain_peers(&self) { + let mut core = self.core.lock(); + for bad_peer in core.maintain_peers() { + self.send(NetworkMsg::ReportPeer(bad_peer, Severity::Timeout)); + } + core.dispatch(self); + } + + fn on_remote_header_response( + &self, + peer: PeerId, + response: message::RemoteHeaderResponse, + ) { + self.accept_response("header", peer, response.id, |request| match request.data { + RequestData::RemoteHeader(request, sender) => { + match self + .checker + .check_header_proof(&request, response.header, response.proof) + { + Ok(response) => { + // we do not bother if receiver has been dropped already + let _ = sender.send(Ok(response)); + Accept::Ok + } + Err(error) => { + Accept::CheckFailed(error, RequestData::RemoteHeader(request, sender)) + } + } + } + data @ _ => Accept::Unexpected(data), + }) + } + + fn on_remote_read_response(&self, peer: PeerId, response: message::RemoteReadResponse) { + self.accept_response("read", peer, response.id, |request| match request.data { + RequestData::RemoteRead(request, sender) => match self + .checker + .check_read_proof(&request, response.proof) + { + Ok(response) => { + // we do not bother if receiver has been dropped already + let _ = sender.send(Ok(response)); + Accept::Ok + } + Err(error) => Accept::CheckFailed(error, RequestData::RemoteRead(request, sender)), + }, + data @ _ => Accept::Unexpected(data), + }) + } + + fn on_remote_call_response(&self, peer: PeerId, response: message::RemoteCallResponse) { + self.accept_response("call", peer, response.id, |request| match request.data { + RequestData::RemoteCall(request, sender) => match self + .checker + .check_execution_proof(&request, response.proof) + { + Ok(response) => { + // we do not bother if receiver has been dropped already + let _ = sender.send(Ok(response)); + Accept::Ok + } + Err(error) => Accept::CheckFailed(error, RequestData::RemoteCall(request, sender)), + }, + data @ _ => Accept::Unexpected(data), + }) + } + + fn on_remote_changes_response( + &self, + peer: PeerId, + response: message::RemoteChangesResponse, B::Hash>, + ) { + self.accept_response("changes", peer, response.id, |request| match request.data { + RequestData::RemoteChanges(request, sender) => match self.checker.check_changes_proof( + &request, + ChangesProof { + max_block: response.max, + proof: response.proof, + roots: response.roots.into_iter().collect(), + roots_proof: response.roots_proof, + }, + ) { + Ok(response) => { + // we do not bother if receiver has been dropped already + let _ = sender.send(Ok(response)); + Accept::Ok + } + Err(error) => { + Accept::CheckFailed(error, RequestData::RemoteChanges(request, sender)) + } + }, + data @ _ => Accept::Unexpected(data), + }) + } } -impl Fetcher for OnDemand where - B: BlockT, - B::Header: HeaderT, +impl Fetcher for OnDemand +where + B: BlockT, + B::Header: HeaderT, { - type RemoteHeaderResult = RemoteResponse; - type RemoteReadResult = RemoteResponse>>; - type RemoteCallResult = RemoteResponse>; - type RemoteChangesResult = RemoteResponse, u32)>>; - - fn remote_header(&self, request: RemoteHeaderRequest) -> Self::RemoteHeaderResult { - let (sender, receiver) = channel(); - self.schedule_request(request.retry_count.clone(), RequestData::RemoteHeader(request, sender), - RemoteResponse { receiver }) - } - - fn remote_read(&self, request: RemoteReadRequest) -> Self::RemoteReadResult { - let (sender, receiver) = channel(); - self.schedule_request(request.retry_count.clone(), RequestData::RemoteRead(request, sender), - RemoteResponse { receiver }) - } - - fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult { - let (sender, receiver) = channel(); - self.schedule_request(request.retry_count.clone(), RequestData::RemoteCall(request, sender), - RemoteResponse { receiver }) - } - - fn remote_changes(&self, request: RemoteChangesRequest) -> Self::RemoteChangesResult { - let (sender, receiver) = channel(); - self.schedule_request(request.retry_count.clone(), RequestData::RemoteChanges(request, sender), - RemoteResponse { receiver }) - } + type RemoteHeaderResult = RemoteResponse; + type RemoteReadResult = RemoteResponse>>; + type RemoteCallResult = RemoteResponse>; + type RemoteChangesResult = RemoteResponse, u32)>>; + + fn remote_header(&self, request: RemoteHeaderRequest) -> Self::RemoteHeaderResult { + let (sender, receiver) = channel(); + self.schedule_request( + request.retry_count.clone(), + RequestData::RemoteHeader(request, sender), + RemoteResponse { receiver }, + ) + } + + fn remote_read(&self, request: RemoteReadRequest) -> Self::RemoteReadResult { + let (sender, receiver) = channel(); + self.schedule_request( + request.retry_count.clone(), + RequestData::RemoteRead(request, sender), + RemoteResponse { receiver }, + ) + } + + fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult { + let (sender, receiver) = channel(); + self.schedule_request( + request.retry_count.clone(), + RequestData::RemoteCall(request, sender), + RemoteResponse { receiver }, + ) + } + + fn remote_changes( + &self, + request: RemoteChangesRequest, + ) -> Self::RemoteChangesResult { + let (sender, receiver) = channel(); + self.schedule_request( + request.retry_count.clone(), + RequestData::RemoteChanges(request, sender), + RemoteResponse { receiver }, + ) + } } -impl OnDemandCore where - B: BlockT, - B::Header: HeaderT, +impl OnDemandCore +where + B: BlockT, + B::Header: HeaderT, { - pub fn add_peer(&mut self, peer: PeerId, best_number: NumberFor) { - self.idle_peers.push_back(peer.clone()); - self.best_blocks.insert(peer, best_number); - } - - pub fn update_peer(&mut self, peer: PeerId, best_number: NumberFor) { - self.best_blocks.insert(peer, best_number); - } - - pub fn remove_peer(&mut self, peer: PeerId) { - self.best_blocks.remove(&peer); - - if let Some(request) = self.active_peers.remove(&peer) { - self.pending_requests.push_front(request); - return; - } - - if let Some(idle_index) = self.idle_peers.iter().position(|i| *i == peer) { - self.idle_peers.swap_remove_back(idle_index); - } - } - - pub fn maintain_peers(&mut self) -> Vec { - let now = Instant::now(); - let mut bad_peers = Vec::new(); - loop { - match self.active_peers.front() { - Some((_, request)) if now - request.timestamp >= REQUEST_TIMEOUT => (), - _ => return bad_peers, - } - - let (bad_peer, request) = self.active_peers.pop_front().expect("front() is Some as checked above"); - self.pending_requests.push_front(request); - bad_peers.push(bad_peer); - } - } - - pub fn insert(&mut self, retry_count: usize, data: RequestData) { - let request_id = self.next_request_id; - self.next_request_id += 1; - - self.pending_requests.push_back(Request { - id: request_id, - timestamp: Instant::now(), - retry_count, - data, - }); - } - - pub fn remove(&mut self, peer: PeerId, id: u64) -> Option> { - match self.active_peers.entry(peer.clone()) { - Entry::Occupied(entry) => match entry.get().id == id { - true => { - self.idle_peers.push_back(peer); - Some(entry.remove()) - }, - false => None, - }, - Entry::Vacant(_) => None, - } - } - - pub fn dispatch(&mut self, on_demand: &OnDemand) { - - let mut last_peer = self.idle_peers.back().cloned(); - let mut unhandled_requests = VecDeque::new(); - - loop { - let peer = match self.idle_peers.pop_front() { - Some(peer) => peer, - None => break, - }; - - // check if request can (optimistically) be processed by the peer - let can_be_processed_by_peer = { - let request = match self.pending_requests.front() { - Some(r) => r, - None => { - self.idle_peers.push_front(peer); - break; - }, - }; - let peer_best_block = self.best_blocks.get(&peer) - .expect("entries are inserted into best_blocks when peer is connected; + pub fn add_peer(&mut self, peer: PeerId, best_number: NumberFor) { + self.idle_peers.push_back(peer.clone()); + self.best_blocks.insert(peer, best_number); + } + + pub fn update_peer(&mut self, peer: PeerId, best_number: NumberFor) { + self.best_blocks.insert(peer, best_number); + } + + pub fn remove_peer(&mut self, peer: PeerId) { + self.best_blocks.remove(&peer); + + if let Some(request) = self.active_peers.remove(&peer) { + self.pending_requests.push_front(request); + return; + } + + if let Some(idle_index) = self.idle_peers.iter().position(|i| *i == peer) { + self.idle_peers.swap_remove_back(idle_index); + } + } + + pub fn maintain_peers(&mut self) -> Vec { + let now = Instant::now(); + let mut bad_peers = Vec::new(); + loop { + match self.active_peers.front() { + Some((_, request)) if now - request.timestamp >= REQUEST_TIMEOUT => (), + _ => return bad_peers, + } + + let (bad_peer, request) = self + .active_peers + .pop_front() + .expect("front() is Some as checked above"); + self.pending_requests.push_front(request); + bad_peers.push(bad_peer); + } + } + + pub fn insert(&mut self, retry_count: usize, data: RequestData) { + let request_id = self.next_request_id; + self.next_request_id += 1; + + self.pending_requests.push_back(Request { + id: request_id, + timestamp: Instant::now(), + retry_count, + data, + }); + } + + pub fn remove(&mut self, peer: PeerId, id: u64) -> Option> { + match self.active_peers.entry(peer.clone()) { + Entry::Occupied(entry) => match entry.get().id == id { + true => { + self.idle_peers.push_back(peer); + Some(entry.remove()) + } + false => None, + }, + Entry::Vacant(_) => None, + } + } + + pub fn dispatch(&mut self, on_demand: &OnDemand) { + let mut last_peer = self.idle_peers.back().cloned(); + let mut unhandled_requests = VecDeque::new(); + + loop { + let peer = match self.idle_peers.pop_front() { + Some(peer) => peer, + None => break, + }; + + // check if request can (optimistically) be processed by the peer + let can_be_processed_by_peer = { + let request = match self.pending_requests.front() { + Some(r) => r, + None => { + self.idle_peers.push_front(peer); + break; + } + }; + let peer_best_block = self.best_blocks.get(&peer).expect( + "entries are inserted into best_blocks when peer is connected; entries are removed from best_blocks when peer is disconnected; - peer is in idle_peers and thus connected; qed"); - request.required_block() <= *peer_best_block - }; - - if !can_be_processed_by_peer { - // return peer to the back of the queue - self.idle_peers.push_back(peer.clone()); - - // we have enumerated all peers and noone can handle request - if Some(peer) == last_peer { - let request = self.pending_requests.pop_front().expect("checked in loop condition; qed"); - unhandled_requests.push_back(request); - last_peer = self.idle_peers.back().cloned(); - } - - continue; - } - - last_peer = self.idle_peers.back().cloned(); - - let mut request = self.pending_requests.pop_front().expect("checked in loop condition; qed"); - request.timestamp = Instant::now(); - trace!(target: "sync", "Dispatching remote request {} to peer {}", request.id, peer); - on_demand.send(NetworkMsg::Outgoing(peer.clone(), request.message())); - self.active_peers.insert(peer, request); - } - - self.pending_requests.append(&mut unhandled_requests); - } + peer is in idle_peers and thus connected; qed", + ); + request.required_block() <= *peer_best_block + }; + + if !can_be_processed_by_peer { + // return peer to the back of the queue + self.idle_peers.push_back(peer.clone()); + + // we have enumerated all peers and noone can handle request + if Some(peer) == last_peer { + let request = self + .pending_requests + .pop_front() + .expect("checked in loop condition; qed"); + unhandled_requests.push_back(request); + last_peer = self.idle_peers.back().cloned(); + } + + continue; + } + + last_peer = self.idle_peers.back().cloned(); + + let mut request = self + .pending_requests + .pop_front() + .expect("checked in loop condition; qed"); + request.timestamp = Instant::now(); + trace!(target: "sync", "Dispatching remote request {} to peer {}", request.id, peer); + on_demand.send(NetworkMsg::Outgoing(peer.clone(), request.message())); + self.active_peers.insert(peer, request); + } + + self.pending_requests.append(&mut unhandled_requests); + } } impl Request { - pub fn required_block(&self) -> NumberFor { - match self.data { - RequestData::RemoteHeader(ref data, _) => data.block, - RequestData::RemoteRead(ref data, _) => *data.header.number(), - RequestData::RemoteCall(ref data, _) => *data.header.number(), - RequestData::RemoteChanges(ref data, _) => data.max_block.0, - } - } - - pub fn message(&self) -> message::Message { - match self.data { - RequestData::RemoteHeader(ref data, _) => - message::generic::Message::RemoteHeaderRequest(message::RemoteHeaderRequest { - id: self.id, - block: data.block, - }), - RequestData::RemoteRead(ref data, _) => - message::generic::Message::RemoteReadRequest(message::RemoteReadRequest { - id: self.id, - block: data.block, - key: data.key.clone(), - }), - RequestData::RemoteCall(ref data, _) => - message::generic::Message::RemoteCallRequest(message::RemoteCallRequest { - id: self.id, - block: data.block, - method: data.method.clone(), - data: data.call_data.clone(), - }), - RequestData::RemoteChanges(ref data, _) => - message::generic::Message::RemoteChangesRequest(message::RemoteChangesRequest { - id: self.id, - first: data.first_block.1.clone(), - last: data.last_block.1.clone(), - min: data.tries_roots.1.clone(), - max: data.max_block.1.clone(), - key: data.key.clone(), - }), - } - } + pub fn required_block(&self) -> NumberFor { + match self.data { + RequestData::RemoteHeader(ref data, _) => data.block, + RequestData::RemoteRead(ref data, _) => *data.header.number(), + RequestData::RemoteCall(ref data, _) => *data.header.number(), + RequestData::RemoteChanges(ref data, _) => data.max_block.0, + } + } + + pub fn message(&self) -> message::Message { + match self.data { + RequestData::RemoteHeader(ref data, _) => { + message::generic::Message::RemoteHeaderRequest(message::RemoteHeaderRequest { + id: self.id, + block: data.block, + }) + } + RequestData::RemoteRead(ref data, _) => { + message::generic::Message::RemoteReadRequest(message::RemoteReadRequest { + id: self.id, + block: data.block, + key: data.key.clone(), + }) + } + RequestData::RemoteCall(ref data, _) => { + message::generic::Message::RemoteCallRequest(message::RemoteCallRequest { + id: self.id, + block: data.block, + method: data.method.clone(), + data: data.call_data.clone(), + }) + } + RequestData::RemoteChanges(ref data, _) => { + message::generic::Message::RemoteChangesRequest(message::RemoteChangesRequest { + id: self.id, + first: data.first_block.1.clone(), + last: data.last_block.1.clone(), + min: data.tries_roots.1.clone(), + max: data.max_block.1.clone(), + key: data.key.clone(), + }) + } + } + } } impl RequestData { - pub fn fail(self, error: ClientError) { - // don't care if anyone is listening - match self { - RequestData::RemoteHeader(_, sender) => { let _ = sender.send(Err(error)); }, - RequestData::RemoteCall(_, sender) => { let _ = sender.send(Err(error)); }, - RequestData::RemoteRead(_, sender) => { let _ = sender.send(Err(error)); }, - RequestData::RemoteChanges(_, sender) => { let _ = sender.send(Err(error)); }, - } - } + pub fn fail(self, error: ClientError) { + // don't care if anyone is listening + match self { + RequestData::RemoteHeader(_, sender) => { + let _ = sender.send(Err(error)); + } + RequestData::RemoteCall(_, sender) => { + let _ = sender.send(Err(error)); + } + RequestData::RemoteRead(_, sender) => { + let _ = sender.send(Err(error)); + } + RequestData::RemoteChanges(_, sender) => { + let _ = sender.send(Err(error)); + } + } + } } #[cfg(test)] pub mod tests { - use std::sync::Arc; - use std::time::Instant; - use futures::Future; - use runtime_primitives::traits::NumberFor; - use client::{error::{ErrorKind as ClientErrorKind, Result as ClientResult}}; - use client::light::fetcher::{Fetcher, FetchChecker, RemoteHeaderRequest, - RemoteCallRequest, RemoteReadRequest, RemoteChangesRequest, ChangesProof}; - use crate::config::Roles; - use crate::message; - use network_libp2p::{PeerId, Severity}; - use crate::service::{network_channel, NetworkPort, NetworkMsg}; - use super::{REQUEST_TIMEOUT, OnDemand, OnDemandService}; - use test_client::runtime::{changes_trie_config, Block, Header}; - - pub struct DummyExecutor; - struct DummyFetchChecker { ok: bool } - - impl FetchChecker for DummyFetchChecker { - fn check_header_proof( - &self, - _request: &RemoteHeaderRequest
, - header: Option
, - _remote_proof: Vec> - ) -> ClientResult
{ - match self.ok { - true if header.is_some() => Ok(header.unwrap()), - _ => Err(ClientErrorKind::Backend("Test error".into()).into()), - } - } - - fn check_read_proof(&self, _: &RemoteReadRequest
, _: Vec>) -> ClientResult>> { - match self.ok { - true => Ok(Some(vec![42])), - false => Err(ClientErrorKind::Backend("Test error".into()).into()), - } - } - - fn check_execution_proof(&self, _: &RemoteCallRequest
, _: Vec>) -> ClientResult> { - match self.ok { - true => Ok(vec![42]), - false => Err(ClientErrorKind::Backend("Test error".into()).into()), - } - } - - fn check_changes_proof(&self, _: &RemoteChangesRequest
, _: ChangesProof
) -> ClientResult, u32)>> { - match self.ok { - true => Ok(vec![(100, 2)]), - false => Err(ClientErrorKind::Backend("Test error".into()).into()), - } - } - } - - fn dummy(ok: bool) -> (Arc, Arc>) { - let executor = Arc::new(DummyExecutor); - let service = Arc::new(OnDemand::new(Arc::new(DummyFetchChecker { ok }))); - (executor, service) - } - - fn total_peers(on_demand: &OnDemand) -> usize { - let core = on_demand.core.lock(); - core.idle_peers.len() + core.active_peers.len() - } - - fn receive_call_response(on_demand: &OnDemand, peer: PeerId, id: message::RequestId) { - on_demand.on_remote_call_response(peer, message::RemoteCallResponse { - id: id, - proof: vec![vec![2]], - }); - } - - fn dummy_header() -> Header { - Header { - parent_hash: Default::default(), - number: 0, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - } - } - - fn assert_disconnected_peer(network_port: NetworkPort, expected_severity: Severity) { - let mut disconnect_count = 0; - while let Ok(msg) = network_port.receiver().try_recv() { - match msg { - NetworkMsg::ReportPeer(_, severity) => { - if severity == expected_severity { - disconnect_count = disconnect_count + 1; - } - }, - _ => {}, - } - } - assert_eq!(disconnect_count, 1); - } - - #[test] - fn knows_about_peers_roles() { - let (_, on_demand) = dummy(true); - let peer0 = PeerId::random(); - let peer1 = PeerId::random(); - let peer2 = PeerId::random(); - on_demand.on_connect(peer0, Roles::LIGHT, 1000); - on_demand.on_connect(peer1.clone(), Roles::FULL, 2000); - on_demand.on_connect(peer2.clone(), Roles::AUTHORITY, 3000); - assert_eq!(vec![peer1.clone(), peer2.clone()], on_demand.core.lock().idle_peers.iter().cloned().collect::>()); - assert_eq!(on_demand.core.lock().best_blocks.get(&peer1), Some(&2000)); - assert_eq!(on_demand.core.lock().best_blocks.get(&peer2), Some(&3000)); - } - - #[test] - fn disconnects_from_idle_peer() { - let peer0 = PeerId::random(); - - let (_, on_demand) = dummy(true); - on_demand.on_connect(peer0.clone(), Roles::FULL, 100); - assert_eq!(1, total_peers(&*on_demand)); - assert!(!on_demand.core.lock().best_blocks.is_empty()); - - on_demand.on_disconnect(peer0); - assert_eq!(0, total_peers(&*on_demand)); - assert!(on_demand.core.lock().best_blocks.is_empty()); - } - - #[test] - fn disconnects_from_timeouted_peer() { - let (_x, on_demand) = dummy(true); - let (network_sender, network_port) = network_channel(); - let peer0 = PeerId::random(); - let peer1 = PeerId::random(); - on_demand.set_network_sender(network_sender.clone()); - on_demand.on_connect(peer0.clone(), Roles::FULL, 1000); - on_demand.on_connect(peer1.clone(), Roles::FULL, 1000); - assert_eq!(vec![peer0.clone(), peer1.clone()], on_demand.core.lock().idle_peers.iter().cloned().collect::>()); - assert!(on_demand.core.lock().active_peers.is_empty()); - - on_demand.remote_call(RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: None, - }); - assert_eq!(vec![peer1.clone()], on_demand.core.lock().idle_peers.iter().cloned().collect::>()); - assert_eq!(vec![peer0.clone()], on_demand.core.lock().active_peers.keys().cloned().collect::>()); - - on_demand.core.lock().active_peers[&peer0].timestamp = Instant::now() - REQUEST_TIMEOUT - REQUEST_TIMEOUT; - on_demand.maintain_peers(); - assert!(on_demand.core.lock().idle_peers.is_empty()); - assert_eq!(vec![peer1.clone()], on_demand.core.lock().active_peers.keys().cloned().collect::>()); - assert_disconnected_peer(network_port, Severity::Timeout); - } - - #[test] - fn disconnects_from_peer_on_response_with_wrong_id() { - let (_x, on_demand) = dummy(true); - let peer0 = PeerId::random(); - let (network_sender, network_port) = network_channel(); - on_demand.set_network_sender(network_sender.clone()); - on_demand.on_connect(peer0.clone(), Roles::FULL, 1000); - - on_demand.remote_call(RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: None, - }); - receive_call_response(&*on_demand, peer0, 1); - assert_disconnected_peer(network_port, Severity::Bad("Invalid remote call response from peer".to_string())); - assert_eq!(on_demand.core.lock().pending_requests.len(), 1); - } - - #[test] - fn disconnects_from_peer_on_incorrect_response() { - let (_x, on_demand) = dummy(false); - let (network_sender, network_port) = network_channel(); - let peer0 = PeerId::random(); - on_demand.set_network_sender(network_sender.clone()); - on_demand.remote_call(RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(1), - }); - - on_demand.on_connect(peer0.clone(), Roles::FULL, 1000); - receive_call_response(&*on_demand, peer0.clone(), 0); - assert_disconnected_peer(network_port, Severity::Bad("Failed to check remote call response from peer: Backend error: Test error".to_string())); - assert_eq!(on_demand.core.lock().pending_requests.len(), 1); - } - - #[test] - fn disconnects_from_peer_on_unexpected_response() { - let (_x, on_demand) = dummy(true); - let (network_sender, network_port) = network_channel(); - let peer0 = PeerId::random(); - on_demand.set_network_sender(network_sender.clone()); - on_demand.on_connect(peer0.clone(), Roles::FULL, 1000); - - receive_call_response(&*on_demand, peer0, 0); - assert_disconnected_peer(network_port, Severity::Bad("Invalid remote call response from peer".to_string())); - } - - #[test] - fn disconnects_from_peer_on_wrong_response_type() { - let (_x, on_demand) = dummy(false); - let peer0 = PeerId::random(); - let (network_sender, network_port) = network_channel(); - on_demand.set_network_sender(network_sender.clone()); - on_demand.on_connect(peer0.clone(), Roles::FULL, 1000); - - on_demand.remote_call(RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(1), - }); - - on_demand.on_remote_read_response(peer0.clone(), message::RemoteReadResponse { - id: 0, - proof: vec![vec![2]], - }); - assert_disconnected_peer(network_port, Severity::Bad("Unexpected response to remote read from peer".to_string())); - assert_eq!(on_demand.core.lock().pending_requests.len(), 1); - } - - #[test] - fn receives_remote_failure_after_retry_count_failures() { - use parking_lot::{Condvar, Mutex}; - - let retry_count = 2; - let peer_ids = (0 .. retry_count + 1).map(|_| PeerId::random()).collect::>(); - let (_x, on_demand) = dummy(false); - let (network_sender, _network_port) = network_channel(); - on_demand.set_network_sender(network_sender.clone()); - for i in 0..retry_count+1 { - on_demand.on_connect(peer_ids[i].clone(), Roles::FULL, 1000); - } - - let sync = Arc::new((Mutex::new(0), Mutex::new(0), Condvar::new())); - let thread_sync = sync.clone(); - - let response = on_demand.remote_call(RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(retry_count) - }); - let thread = ::std::thread::spawn(move || { - let &(ref current, ref finished_at, ref finished) = &*thread_sync; - let _ = response.wait().unwrap_err(); - *finished_at.lock() = *current.lock(); - finished.notify_one(); - }); - - let &(ref current, ref finished_at, ref finished) = &*sync; - for i in 0..retry_count+1 { - let mut current = current.lock(); - *current = *current + 1; - receive_call_response(&*on_demand, peer_ids[i].clone(), i as u64); - } - - let mut finished_at = finished_at.lock(); - assert!(!finished.wait_for(&mut finished_at, ::std::time::Duration::from_millis(1000)).timed_out()); - assert_eq!(*finished_at, retry_count + 1); - - thread.join().unwrap(); - } - - #[test] - fn receives_remote_call_response() { - let (_x, on_demand) = dummy(true); - let (network_sender, _network_port) = network_channel(); - let peer0 = PeerId::random(); - on_demand.set_network_sender(network_sender.clone()); - on_demand.on_connect(peer0.clone(), Roles::FULL, 1000); - - let response = on_demand.remote_call(RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: None, - }); - let thread = ::std::thread::spawn(move || { - let result = response.wait().unwrap(); - assert_eq!(result, vec![42]); - }); - - receive_call_response(&*on_demand, peer0.clone(), 0); - thread.join().unwrap(); - } - - #[test] - fn receives_remote_read_response() { - let (_x, on_demand) = dummy(true); - let (network_sender, _network_port) = network_channel(); - let peer0 = PeerId::random(); - on_demand.set_network_sender(network_sender.clone()); - on_demand.on_connect(peer0.clone(), Roles::FULL, 1000); - - let response = on_demand.remote_read(RemoteReadRequest { - header: dummy_header(), - block: Default::default(), - key: b":key".to_vec(), - retry_count: None, - }); - let thread = ::std::thread::spawn(move || { - let result = response.wait().unwrap(); - assert_eq!(result, Some(vec![42])); - }); - - on_demand.on_remote_read_response(peer0.clone(), message::RemoteReadResponse { - id: 0, - proof: vec![vec![2]], - }); - thread.join().unwrap(); - } - - #[test] - fn receives_remote_header_response() { - let (_x, on_demand) = dummy(true); - let (network_sender, _network_port) = network_channel(); - let peer0 = PeerId::random(); - on_demand.set_network_sender(network_sender.clone()); - on_demand.on_connect(peer0.clone(), Roles::FULL, 1000); - - let response = on_demand.remote_header(RemoteHeaderRequest { - cht_root: Default::default(), - block: 1, - retry_count: None, - }); - let thread = ::std::thread::spawn(move || { - let result = response.wait().unwrap(); - assert_eq!( - result.hash(), - "6443a0b46e0412e626363028115a9f2c\ - f963eeed526b8b33e5316f08b50d0dc3".parse().unwrap() - ); - }); - - on_demand.on_remote_header_response(peer0.clone(), message::RemoteHeaderResponse { - id: 0, - header: Some(Header { - parent_hash: Default::default(), - number: 1, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - }), - proof: vec![vec![2]], - }); - thread.join().unwrap(); - } - - #[test] - fn receives_remote_changes_response() { - let (_x, on_demand) = dummy(true); - let (network_sender, _network_port) = network_channel(); - let peer0 = PeerId::random(); - on_demand.set_network_sender(network_sender.clone()); - on_demand.on_connect(peer0.clone(), Roles::FULL, 1000); - - let response = on_demand.remote_changes(RemoteChangesRequest { - changes_trie_config: changes_trie_config(), - first_block: (1, Default::default()), - last_block: (100, Default::default()), - max_block: (100, Default::default()), - tries_roots: (1, Default::default(), vec![]), - key: vec![], - retry_count: None, - }); - let thread = ::std::thread::spawn(move || { - let result = response.wait().unwrap(); - assert_eq!(result, vec![(100, 2)]); - }); - - on_demand.on_remote_changes_response(peer0.clone(), message::RemoteChangesResponse { - id: 0, - max: 1000, - proof: vec![vec![2]], - roots: vec![], - roots_proof: vec![], - }); - thread.join().unwrap(); - } - - #[test] - fn does_not_sends_request_to_peer_who_has_no_required_block() { - let (_x, on_demand) = dummy(true); - let (network_sender, _network_port) = network_channel(); - let peer1 = PeerId::random(); - let peer2 = PeerId::random(); - on_demand.set_network_sender(network_sender.clone()); - - on_demand.on_connect(peer1.clone(), Roles::FULL, 100); - - on_demand.remote_header(RemoteHeaderRequest { - cht_root: Default::default(), - block: 200, - retry_count: None, - }); - on_demand.remote_header(RemoteHeaderRequest { - cht_root: Default::default(), - block: 250, - retry_count: None, - }); - on_demand.remote_header(RemoteHeaderRequest { - cht_root: Default::default(), - block: 250, - retry_count: None, - }); - - on_demand.on_connect(peer2.clone(), Roles::FULL, 150); - - assert_eq!(vec![peer1.clone(), peer2.clone()], on_demand.core.lock().idle_peers.iter().cloned().collect::>()); - assert_eq!(on_demand.core.lock().pending_requests.len(), 3); - - on_demand.on_block_announce(peer1.clone(), 250); - - assert_eq!(vec![peer2.clone()], on_demand.core.lock().idle_peers.iter().cloned().collect::>()); - assert_eq!(on_demand.core.lock().pending_requests.len(), 2); - - on_demand.on_block_announce(peer2.clone(), 250); - - assert!(!on_demand.core.lock().idle_peers.iter().any(|_| true)); - assert_eq!(on_demand.core.lock().pending_requests.len(), 1); - - on_demand.on_remote_header_response(peer1.clone(), message::RemoteHeaderResponse { - id: 0, - header: Some(dummy_header()), - proof: vec![], - }); - - assert!(!on_demand.core.lock().idle_peers.iter().any(|_| true)); - assert_eq!(on_demand.core.lock().pending_requests.len(), 0); - } - - #[test] - fn does_not_loop_forever_after_dispatching_request_to_last_peer() { - // this test is a regression for a bug where the dispatch function would - // loop forever after dispatching a request to the last peer, since the - // last peer was not updated - let (_x, on_demand) = dummy(true); - let (network_sender, _network_port) = network_channel(); - let peer1 = PeerId::random(); - let peer2 = PeerId::random(); - let peer3 = PeerId::random(); - on_demand.set_network_sender(network_sender.clone()); - - on_demand.remote_header(RemoteHeaderRequest { - cht_root: Default::default(), - block: 250, - retry_count: None, - }); - on_demand.remote_header(RemoteHeaderRequest { - cht_root: Default::default(), - block: 250, - retry_count: None, - }); - - on_demand.on_connect(peer1.clone(), Roles::FULL, 200); - on_demand.on_connect(peer2.clone(), Roles::FULL, 200); - on_demand.on_connect(peer3.clone(), Roles::FULL, 250); - - assert_eq!(vec![peer1.clone(), peer2.clone()], on_demand.core.lock().idle_peers.iter().cloned().collect::>()); - assert_eq!(on_demand.core.lock().pending_requests.len(), 1); - } - - #[test] - fn tries_to_send_all_pending_requests() { - let (_x, on_demand) = dummy(true); - let (network_sender, _network_port) = network_channel(); - let peer1 = PeerId::random(); - on_demand.set_network_sender(network_sender.clone()); - - on_demand.remote_header(RemoteHeaderRequest { - cht_root: Default::default(), - block: 300, - retry_count: None, - }); - on_demand.remote_header(RemoteHeaderRequest { - cht_root: Default::default(), - block: 250, - retry_count: None, - }); - - on_demand.on_connect(peer1.clone(), Roles::FULL, 250); - - assert!(on_demand.core.lock().idle_peers.iter().cloned().collect::>().is_empty()); - assert_eq!(on_demand.core.lock().pending_requests.len(), 1); - } + use super::{OnDemand, OnDemandService, REQUEST_TIMEOUT}; + use crate::config::Roles; + use crate::message; + use crate::service::{network_channel, NetworkMsg, NetworkPort}; + use client::error::{ErrorKind as ClientErrorKind, Result as ClientResult}; + use client::light::fetcher::{ + ChangesProof, FetchChecker, Fetcher, RemoteCallRequest, RemoteChangesRequest, + RemoteHeaderRequest, RemoteReadRequest, + }; + use futures::Future; + use network_libp2p::{PeerId, Severity}; + use runtime_primitives::traits::NumberFor; + use std::sync::Arc; + use std::time::Instant; + use test_client::runtime::{changes_trie_config, Block, Header}; + + pub struct DummyExecutor; + struct DummyFetchChecker { + ok: bool, + } + + impl FetchChecker for DummyFetchChecker { + fn check_header_proof( + &self, + _request: &RemoteHeaderRequest
, + header: Option
, + _remote_proof: Vec>, + ) -> ClientResult
{ + match self.ok { + true if header.is_some() => Ok(header.unwrap()), + _ => Err(ClientErrorKind::Backend("Test error".into()).into()), + } + } + + fn check_read_proof( + &self, + _: &RemoteReadRequest
, + _: Vec>, + ) -> ClientResult>> { + match self.ok { + true => Ok(Some(vec![42])), + false => Err(ClientErrorKind::Backend("Test error".into()).into()), + } + } + + fn check_execution_proof( + &self, + _: &RemoteCallRequest
, + _: Vec>, + ) -> ClientResult> { + match self.ok { + true => Ok(vec![42]), + false => Err(ClientErrorKind::Backend("Test error".into()).into()), + } + } + + fn check_changes_proof( + &self, + _: &RemoteChangesRequest
, + _: ChangesProof
, + ) -> ClientResult, u32)>> { + match self.ok { + true => Ok(vec![(100, 2)]), + false => Err(ClientErrorKind::Backend("Test error".into()).into()), + } + } + } + + fn dummy(ok: bool) -> (Arc, Arc>) { + let executor = Arc::new(DummyExecutor); + let service = Arc::new(OnDemand::new(Arc::new(DummyFetchChecker { ok }))); + (executor, service) + } + + fn total_peers(on_demand: &OnDemand) -> usize { + let core = on_demand.core.lock(); + core.idle_peers.len() + core.active_peers.len() + } + + fn receive_call_response(on_demand: &OnDemand, peer: PeerId, id: message::RequestId) { + on_demand.on_remote_call_response( + peer, + message::RemoteCallResponse { + id: id, + proof: vec![vec![2]], + }, + ); + } + + fn dummy_header() -> Header { + Header { + parent_hash: Default::default(), + number: 0, + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: Default::default(), + } + } + + fn assert_disconnected_peer(network_port: NetworkPort, expected_severity: Severity) { + let mut disconnect_count = 0; + while let Ok(msg) = network_port.receiver().try_recv() { + match msg { + NetworkMsg::ReportPeer(_, severity) => { + if severity == expected_severity { + disconnect_count = disconnect_count + 1; + } + } + _ => {} + } + } + assert_eq!(disconnect_count, 1); + } + + #[test] + fn knows_about_peers_roles() { + let (_, on_demand) = dummy(true); + let peer0 = PeerId::random(); + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + on_demand.on_connect(peer0, Roles::LIGHT, 1000); + on_demand.on_connect(peer1.clone(), Roles::FULL, 2000); + on_demand.on_connect(peer2.clone(), Roles::AUTHORITY, 3000); + assert_eq!( + vec![peer1.clone(), peer2.clone()], + on_demand + .core + .lock() + .idle_peers + .iter() + .cloned() + .collect::>() + ); + assert_eq!(on_demand.core.lock().best_blocks.get(&peer1), Some(&2000)); + assert_eq!(on_demand.core.lock().best_blocks.get(&peer2), Some(&3000)); + } + + #[test] + fn disconnects_from_idle_peer() { + let peer0 = PeerId::random(); + + let (_, on_demand) = dummy(true); + on_demand.on_connect(peer0.clone(), Roles::FULL, 100); + assert_eq!(1, total_peers(&*on_demand)); + assert!(!on_demand.core.lock().best_blocks.is_empty()); + + on_demand.on_disconnect(peer0); + assert_eq!(0, total_peers(&*on_demand)); + assert!(on_demand.core.lock().best_blocks.is_empty()); + } + + #[test] + fn disconnects_from_timeouted_peer() { + let (_x, on_demand) = dummy(true); + let (network_sender, network_port) = network_channel(); + let peer0 = PeerId::random(); + let peer1 = PeerId::random(); + on_demand.set_network_sender(network_sender.clone()); + on_demand.on_connect(peer0.clone(), Roles::FULL, 1000); + on_demand.on_connect(peer1.clone(), Roles::FULL, 1000); + assert_eq!( + vec![peer0.clone(), peer1.clone()], + on_demand + .core + .lock() + .idle_peers + .iter() + .cloned() + .collect::>() + ); + assert!(on_demand.core.lock().active_peers.is_empty()); + + on_demand.remote_call(RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: None, + }); + assert_eq!( + vec![peer1.clone()], + on_demand + .core + .lock() + .idle_peers + .iter() + .cloned() + .collect::>() + ); + assert_eq!( + vec![peer0.clone()], + on_demand + .core + .lock() + .active_peers + .keys() + .cloned() + .collect::>() + ); + + on_demand.core.lock().active_peers[&peer0].timestamp = + Instant::now() - REQUEST_TIMEOUT - REQUEST_TIMEOUT; + on_demand.maintain_peers(); + assert!(on_demand.core.lock().idle_peers.is_empty()); + assert_eq!( + vec![peer1.clone()], + on_demand + .core + .lock() + .active_peers + .keys() + .cloned() + .collect::>() + ); + assert_disconnected_peer(network_port, Severity::Timeout); + } + + #[test] + fn disconnects_from_peer_on_response_with_wrong_id() { + let (_x, on_demand) = dummy(true); + let peer0 = PeerId::random(); + let (network_sender, network_port) = network_channel(); + on_demand.set_network_sender(network_sender.clone()); + on_demand.on_connect(peer0.clone(), Roles::FULL, 1000); + + on_demand.remote_call(RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: None, + }); + receive_call_response(&*on_demand, peer0, 1); + assert_disconnected_peer( + network_port, + Severity::Bad("Invalid remote call response from peer".to_string()), + ); + assert_eq!(on_demand.core.lock().pending_requests.len(), 1); + } + + #[test] + fn disconnects_from_peer_on_incorrect_response() { + let (_x, on_demand) = dummy(false); + let (network_sender, network_port) = network_channel(); + let peer0 = PeerId::random(); + on_demand.set_network_sender(network_sender.clone()); + on_demand.remote_call(RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: Some(1), + }); + + on_demand.on_connect(peer0.clone(), Roles::FULL, 1000); + receive_call_response(&*on_demand, peer0.clone(), 0); + assert_disconnected_peer( + network_port, + Severity::Bad( + "Failed to check remote call response from peer: Backend error: Test error" + .to_string(), + ), + ); + assert_eq!(on_demand.core.lock().pending_requests.len(), 1); + } + + #[test] + fn disconnects_from_peer_on_unexpected_response() { + let (_x, on_demand) = dummy(true); + let (network_sender, network_port) = network_channel(); + let peer0 = PeerId::random(); + on_demand.set_network_sender(network_sender.clone()); + on_demand.on_connect(peer0.clone(), Roles::FULL, 1000); + + receive_call_response(&*on_demand, peer0, 0); + assert_disconnected_peer( + network_port, + Severity::Bad("Invalid remote call response from peer".to_string()), + ); + } + + #[test] + fn disconnects_from_peer_on_wrong_response_type() { + let (_x, on_demand) = dummy(false); + let peer0 = PeerId::random(); + let (network_sender, network_port) = network_channel(); + on_demand.set_network_sender(network_sender.clone()); + on_demand.on_connect(peer0.clone(), Roles::FULL, 1000); + + on_demand.remote_call(RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: Some(1), + }); + + on_demand.on_remote_read_response( + peer0.clone(), + message::RemoteReadResponse { + id: 0, + proof: vec![vec![2]], + }, + ); + assert_disconnected_peer( + network_port, + Severity::Bad("Unexpected response to remote read from peer".to_string()), + ); + assert_eq!(on_demand.core.lock().pending_requests.len(), 1); + } + + #[test] + fn receives_remote_failure_after_retry_count_failures() { + use parking_lot::{Condvar, Mutex}; + + let retry_count = 2; + let peer_ids = (0..retry_count + 1) + .map(|_| PeerId::random()) + .collect::>(); + let (_x, on_demand) = dummy(false); + let (network_sender, _network_port) = network_channel(); + on_demand.set_network_sender(network_sender.clone()); + for i in 0..retry_count + 1 { + on_demand.on_connect(peer_ids[i].clone(), Roles::FULL, 1000); + } + + let sync = Arc::new((Mutex::new(0), Mutex::new(0), Condvar::new())); + let thread_sync = sync.clone(); + + let response = on_demand.remote_call(RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: Some(retry_count), + }); + let thread = ::std::thread::spawn(move || { + let &(ref current, ref finished_at, ref finished) = &*thread_sync; + let _ = response.wait().unwrap_err(); + *finished_at.lock() = *current.lock(); + finished.notify_one(); + }); + + let &(ref current, ref finished_at, ref finished) = &*sync; + for i in 0..retry_count + 1 { + let mut current = current.lock(); + *current = *current + 1; + receive_call_response(&*on_demand, peer_ids[i].clone(), i as u64); + } + + let mut finished_at = finished_at.lock(); + assert!(!finished + .wait_for(&mut finished_at, ::std::time::Duration::from_millis(1000)) + .timed_out()); + assert_eq!(*finished_at, retry_count + 1); + + thread.join().unwrap(); + } + + #[test] + fn receives_remote_call_response() { + let (_x, on_demand) = dummy(true); + let (network_sender, _network_port) = network_channel(); + let peer0 = PeerId::random(); + on_demand.set_network_sender(network_sender.clone()); + on_demand.on_connect(peer0.clone(), Roles::FULL, 1000); + + let response = on_demand.remote_call(RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: None, + }); + let thread = ::std::thread::spawn(move || { + let result = response.wait().unwrap(); + assert_eq!(result, vec![42]); + }); + + receive_call_response(&*on_demand, peer0.clone(), 0); + thread.join().unwrap(); + } + + #[test] + fn receives_remote_read_response() { + let (_x, on_demand) = dummy(true); + let (network_sender, _network_port) = network_channel(); + let peer0 = PeerId::random(); + on_demand.set_network_sender(network_sender.clone()); + on_demand.on_connect(peer0.clone(), Roles::FULL, 1000); + + let response = on_demand.remote_read(RemoteReadRequest { + header: dummy_header(), + block: Default::default(), + key: b":key".to_vec(), + retry_count: None, + }); + let thread = ::std::thread::spawn(move || { + let result = response.wait().unwrap(); + assert_eq!(result, Some(vec![42])); + }); + + on_demand.on_remote_read_response( + peer0.clone(), + message::RemoteReadResponse { + id: 0, + proof: vec![vec![2]], + }, + ); + thread.join().unwrap(); + } + + #[test] + fn receives_remote_header_response() { + let (_x, on_demand) = dummy(true); + let (network_sender, _network_port) = network_channel(); + let peer0 = PeerId::random(); + on_demand.set_network_sender(network_sender.clone()); + on_demand.on_connect(peer0.clone(), Roles::FULL, 1000); + + let response = on_demand.remote_header(RemoteHeaderRequest { + cht_root: Default::default(), + block: 1, + retry_count: None, + }); + let thread = ::std::thread::spawn(move || { + let result = response.wait().unwrap(); + assert_eq!( + result.hash(), + "6443a0b46e0412e626363028115a9f2c\ + f963eeed526b8b33e5316f08b50d0dc3" + .parse() + .unwrap() + ); + }); + + on_demand.on_remote_header_response( + peer0.clone(), + message::RemoteHeaderResponse { + id: 0, + header: Some(Header { + parent_hash: Default::default(), + number: 1, + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: Default::default(), + }), + proof: vec![vec![2]], + }, + ); + thread.join().unwrap(); + } + + #[test] + fn receives_remote_changes_response() { + let (_x, on_demand) = dummy(true); + let (network_sender, _network_port) = network_channel(); + let peer0 = PeerId::random(); + on_demand.set_network_sender(network_sender.clone()); + on_demand.on_connect(peer0.clone(), Roles::FULL, 1000); + + let response = on_demand.remote_changes(RemoteChangesRequest { + changes_trie_config: changes_trie_config(), + first_block: (1, Default::default()), + last_block: (100, Default::default()), + max_block: (100, Default::default()), + tries_roots: (1, Default::default(), vec![]), + key: vec![], + retry_count: None, + }); + let thread = ::std::thread::spawn(move || { + let result = response.wait().unwrap(); + assert_eq!(result, vec![(100, 2)]); + }); + + on_demand.on_remote_changes_response( + peer0.clone(), + message::RemoteChangesResponse { + id: 0, + max: 1000, + proof: vec![vec![2]], + roots: vec![], + roots_proof: vec![], + }, + ); + thread.join().unwrap(); + } + + #[test] + fn does_not_sends_request_to_peer_who_has_no_required_block() { + let (_x, on_demand) = dummy(true); + let (network_sender, _network_port) = network_channel(); + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + on_demand.set_network_sender(network_sender.clone()); + + on_demand.on_connect(peer1.clone(), Roles::FULL, 100); + + on_demand.remote_header(RemoteHeaderRequest { + cht_root: Default::default(), + block: 200, + retry_count: None, + }); + on_demand.remote_header(RemoteHeaderRequest { + cht_root: Default::default(), + block: 250, + retry_count: None, + }); + on_demand.remote_header(RemoteHeaderRequest { + cht_root: Default::default(), + block: 250, + retry_count: None, + }); + + on_demand.on_connect(peer2.clone(), Roles::FULL, 150); + + assert_eq!( + vec![peer1.clone(), peer2.clone()], + on_demand + .core + .lock() + .idle_peers + .iter() + .cloned() + .collect::>() + ); + assert_eq!(on_demand.core.lock().pending_requests.len(), 3); + + on_demand.on_block_announce(peer1.clone(), 250); + + assert_eq!( + vec![peer2.clone()], + on_demand + .core + .lock() + .idle_peers + .iter() + .cloned() + .collect::>() + ); + assert_eq!(on_demand.core.lock().pending_requests.len(), 2); + + on_demand.on_block_announce(peer2.clone(), 250); + + assert!(!on_demand.core.lock().idle_peers.iter().any(|_| true)); + assert_eq!(on_demand.core.lock().pending_requests.len(), 1); + + on_demand.on_remote_header_response( + peer1.clone(), + message::RemoteHeaderResponse { + id: 0, + header: Some(dummy_header()), + proof: vec![], + }, + ); + + assert!(!on_demand.core.lock().idle_peers.iter().any(|_| true)); + assert_eq!(on_demand.core.lock().pending_requests.len(), 0); + } + + #[test] + fn does_not_loop_forever_after_dispatching_request_to_last_peer() { + // this test is a regression for a bug where the dispatch function would + // loop forever after dispatching a request to the last peer, since the + // last peer was not updated + let (_x, on_demand) = dummy(true); + let (network_sender, _network_port) = network_channel(); + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + let peer3 = PeerId::random(); + on_demand.set_network_sender(network_sender.clone()); + + on_demand.remote_header(RemoteHeaderRequest { + cht_root: Default::default(), + block: 250, + retry_count: None, + }); + on_demand.remote_header(RemoteHeaderRequest { + cht_root: Default::default(), + block: 250, + retry_count: None, + }); + + on_demand.on_connect(peer1.clone(), Roles::FULL, 200); + on_demand.on_connect(peer2.clone(), Roles::FULL, 200); + on_demand.on_connect(peer3.clone(), Roles::FULL, 250); + + assert_eq!( + vec![peer1.clone(), peer2.clone()], + on_demand + .core + .lock() + .idle_peers + .iter() + .cloned() + .collect::>() + ); + assert_eq!(on_demand.core.lock().pending_requests.len(), 1); + } + + #[test] + fn tries_to_send_all_pending_requests() { + let (_x, on_demand) = dummy(true); + let (network_sender, _network_port) = network_channel(); + let peer1 = PeerId::random(); + on_demand.set_network_sender(network_sender.clone()); + + on_demand.remote_header(RemoteHeaderRequest { + cht_root: Default::default(), + block: 300, + retry_count: None, + }); + on_demand.remote_header(RemoteHeaderRequest { + cht_root: Default::default(), + block: 250, + retry_count: None, + }); + + on_demand.on_connect(peer1.clone(), Roles::FULL, 250); + + assert!(on_demand + .core + .lock() + .idle_peers + .iter() + .cloned() + .collect::>() + .is_empty()); + assert_eq!(on_demand.core.lock().pending_requests.len(), 1); + } } diff --git a/core/network/src/protocol.rs b/core/network/src/protocol.rs index 185f595cc0..8c12dee4f2 100644 --- a/core/network/src/protocol.rs +++ b/core/network/src/protocol.rs @@ -14,32 +14,32 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use crossbeam_channel::{self as channel, Receiver, Sender, select}; -use futures::sync::mpsc; -use parking_lot::Mutex; -use network_libp2p::{PeerId, Severity}; -use primitives::storage::StorageKey; -use runtime_primitives::{generic::BlockId, ConsensusEngineId}; -use runtime_primitives::traits::{As, Block as BlockT, Header as HeaderT, NumberFor, Zero}; -use consensus::import_queue::ImportQueue; -use crate::message::{self, Message}; -use crate::message::generic::{Message as GenericMessage, ConsensusMessage}; +use crate::chain::Client; +use crate::config::{ProtocolConfig, Roles}; use crate::consensus_gossip::ConsensusGossip; +use crate::message::generic::{ConsensusMessage, Message as GenericMessage}; +use crate::message::{self, Message}; use crate::on_demand::OnDemandService; +use crate::service::{ExHashT, NetworkChan, NetworkMsg, TransactionPool}; use crate::specialization::NetworkSpecialization; use crate::sync::{ChainSync, Status as SyncStatus, SyncState}; -use crate::service::{NetworkChan, NetworkMsg, TransactionPool, ExHashT}; -use crate::config::{ProtocolConfig, Roles}; +use crate::{error, util::LruHashSet}; +use client::light::fetcher::ChangesProof; +use consensus::import_queue::ImportQueue; +use crossbeam_channel::{self as channel, select, Receiver, Sender}; +use futures::sync::mpsc; +use log::{debug, trace, warn}; +use network_libp2p::{PeerId, Severity}; +use parking_lot::Mutex; use parking_lot::RwLock; +use primitives::storage::StorageKey; +use runtime_primitives::traits::{As, Block as BlockT, Header as HeaderT, NumberFor, Zero}; +use runtime_primitives::{generic::BlockId, ConsensusEngineId}; use rustc_hex::ToHex; use std::collections::{BTreeMap, HashMap}; -use std::sync::Arc; use std::sync::atomic::AtomicBool; +use std::sync::Arc; use std::{cmp, num::NonZeroUsize, thread, time}; -use log::{trace, debug, warn}; -use crate::chain::Client; -use client::light::fetcher::ChangesProof; -use crate::{error, util::LruHashSet}; const REQUEST_TIMEOUT_SEC: u64 = 40; /// Interval at which we perform time based maintenance @@ -63,142 +63,152 @@ const LIGHT_MAXIMAL_BLOCKS_DIFFERENCE: u64 = 8192; // Lock must always be taken in order declared here. pub struct Protocol, H: ExHashT> { - status_sinks: Arc>>>>, - network_chan: NetworkChan, - port: Receiver>, - from_network_port: Receiver>, - config: ProtocolConfig, - on_demand: Option>>, - genesis_hash: B::Hash, - sync: ChainSync, - specialization: S, - consensus_gossip: ConsensusGossip, - context_data: ContextData, - // Connected peers pending Status message. - handshaking_peers: HashMap, - // Connected peers from whom we received a Status message, - // similar to context_data.peers but shared with the SyncProvider. - connected_peers: Arc>>>, - transaction_pool: Arc>, + status_sinks: Arc>>>>, + network_chan: NetworkChan, + port: Receiver>, + from_network_port: Receiver>, + config: ProtocolConfig, + on_demand: Option>>, + genesis_hash: B::Hash, + sync: ChainSync, + specialization: S, + consensus_gossip: ConsensusGossip, + context_data: ContextData, + // Connected peers pending Status message. + handshaking_peers: HashMap, + // Connected peers from whom we received a Status message, + // similar to context_data.peers but shared with the SyncProvider. + connected_peers: Arc>>>, + transaction_pool: Arc>, } /// A peer from whom we have received a Status message. #[derive(Clone)] pub struct ConnectedPeer { - pub peer_info: PeerInfo + pub peer_info: PeerInfo, } /// A peer that we are connected to /// and from whom we have not yet received a Status message. struct HandshakingPeer { - timestamp: time::Instant, + timestamp: time::Instant, } /// Syncing status and statistics #[derive(Clone)] pub struct ProtocolStatus { - /// Sync status. - pub sync: SyncStatus, - /// Total number of connected peers - pub num_peers: usize, - /// Total number of active peers. - pub num_active_peers: usize, + /// Sync status. + pub sync: SyncStatus, + /// Total number of connected peers + pub num_peers: usize, + /// Total number of active peers. + pub num_active_peers: usize, } /// Peer information #[derive(Debug)] struct Peer { - info: PeerInfo, - /// Current block request, if any. - block_request: Option<(time::Instant, message::BlockRequest)>, - /// Requests we are no longer insterested in. - obsolete_requests: HashMap, - /// Holds a set of transactions known to this peer. - known_extrinsics: LruHashSet, - /// Holds a set of blocks known to this peer. - known_blocks: LruHashSet, - /// Request counter, - next_request_id: message::RequestId, + info: PeerInfo, + /// Current block request, if any. + block_request: Option<(time::Instant, message::BlockRequest)>, + /// Requests we are no longer insterested in. + obsolete_requests: HashMap, + /// Holds a set of transactions known to this peer. + known_extrinsics: LruHashSet, + /// Holds a set of blocks known to this peer. + known_blocks: LruHashSet, + /// Request counter, + next_request_id: message::RequestId, } /// Info about a peer's known state. #[derive(Clone, Debug)] pub struct PeerInfo { - /// Roles - pub roles: Roles, - /// Protocol version - pub protocol_version: u32, - /// Peer best block hash - pub best_hash: B::Hash, - /// Peer best block number - pub best_number: ::Number, + /// Roles + pub roles: Roles, + /// Protocol version + pub protocol_version: u32, + /// Peer best block hash + pub best_hash: B::Hash, + /// Peer best block number + pub best_number: ::Number, } /// Context for a network-specific handler. pub trait Context { - /// Get a reference to the client. - fn client(&self) -> &crate::chain::Client; + /// Get a reference to the client. + fn client(&self) -> &crate::chain::Client; - /// Point out that a peer has been malign or irresponsible or appeared lazy. - fn report_peer(&mut self, who: PeerId, reason: Severity); + /// Point out that a peer has been malign or irresponsible or appeared lazy. + fn report_peer(&mut self, who: PeerId, reason: Severity); - /// Get peer info. - fn peer_info(&self, peer: &PeerId) -> Option>; + /// Get peer info. + fn peer_info(&self, peer: &PeerId) -> Option>; - /// Send a message to a peer. - fn send_message(&mut self, who: PeerId, data: crate::message::Message); + /// Send a message to a peer. + fn send_message(&mut self, who: PeerId, data: crate::message::Message); } /// Protocol context. struct ProtocolContext<'a, B: 'a + BlockT, H: 'a + ExHashT> { - network_chan: &'a NetworkChan, - context_data: &'a mut ContextData, + network_chan: &'a NetworkChan, + context_data: &'a mut ContextData, } impl<'a, B: BlockT + 'a, H: 'a + ExHashT> ProtocolContext<'a, B, H> { - fn new(context_data: &'a mut ContextData, network_chan: &'a NetworkChan) -> Self { - ProtocolContext { network_chan, context_data } - } + fn new(context_data: &'a mut ContextData, network_chan: &'a NetworkChan) -> Self { + ProtocolContext { + network_chan, + context_data, + } + } } impl<'a, B: BlockT + 'a, H: ExHashT + 'a> Context for ProtocolContext<'a, B, H> { - fn send_message(&mut self, who: PeerId, message: Message) { - send_message(&mut self.context_data.peers, &self.network_chan, who, message) - } + fn send_message(&mut self, who: PeerId, message: Message) { + send_message( + &mut self.context_data.peers, + &self.network_chan, + who, + message, + ) + } - fn report_peer(&mut self, who: PeerId, reason: Severity) { - self.network_chan.send(NetworkMsg::ReportPeer(who, reason)) - } + fn report_peer(&mut self, who: PeerId, reason: Severity) { + self.network_chan.send(NetworkMsg::ReportPeer(who, reason)) + } - fn peer_info(&self, who: &PeerId) -> Option> { - self.context_data.peers.get(who).map(|p| p.info.clone()) - } + fn peer_info(&self, who: &PeerId) -> Option> { + self.context_data.peers.get(who).map(|p| p.info.clone()) + } - fn client(&self) -> &Client { - &*self.context_data.chain - } + fn client(&self) -> &Client { + &*self.context_data.chain + } } /// Data necessary to create a context. struct ContextData { - // All connected peers - peers: HashMap>, - pub chain: Arc>, + // All connected peers + peers: HashMap>, + pub chain: Arc>, } /// A task, consisting of a user-provided closure, to be executed on the Protocol thread. -pub trait SpecTask> { +pub trait SpecTask> { fn call_box(self: Box, spec: &mut S, context: &mut Context); } -impl, F: FnOnce(&mut S, &mut Context)> SpecTask for F { +impl, F: FnOnce(&mut S, &mut Context)> SpecTask + for F +{ fn call_box(self: Box, spec: &mut S, context: &mut Context) { (*self)(spec, context) } } /// A task, consisting of a user-provided closure, to be executed on the Protocol thread. -pub trait GossipTask { +pub trait GossipTask { fn call_box(self: Box, gossip: &mut ConsensusGossip, context: &mut Context); } @@ -210,922 +220,1007 @@ impl, &mut Context)> GossipTask< /// Messages sent to Protocol from elsewhere inside the system. pub enum ProtocolMsg> { - /// A batch of blocks has been processed, with or without errors. - BlocksProcessed(Vec, bool), - /// Tell protocol to restart sync. - RestartSync, - /// Propagate status updates. - Status, - /// Tell protocol to propagate extrinsics. - PropagateExtrinsics, - /// Tell protocol that a block was imported (sent by the import-queue). - BlockImportedSync(B::Hash, NumberFor), - /// Tell protocol to clear all pending justification requests. - ClearJustificationRequests, - /// Tell protocol to request justification for a block. - RequestJustification(B::Hash, NumberFor), - /// Inform protocol whether a justification was successfully imported. - JustificationImportResult(B::Hash, NumberFor, bool), - /// Propagate a block to peers. - AnnounceBlock(B::Hash), - /// A block has been imported (sent by the client). - BlockImported(B::Hash, B::Header), - /// A block has been finalized (sent by the client). - BlockFinalized(B::Hash, B::Header), - /// Execute a closure with the chain-specific network specialization. - ExecuteWithSpec(Box + Send + 'static>), - /// Execute a closure with the consensus gossip. - ExecuteWithGossip(Box + Send + 'static>), - /// Incoming gossip consensus message. - GossipConsensusMessage(B::Hash, ConsensusEngineId, Vec, bool), - /// Tell protocol to abort sync (does not stop protocol). - /// Only used in tests. - #[cfg(any(test, feature = "test-helpers"))] - Abort, - /// Tell protocol to abort sync and stop. - Stop, - /// Tell protocol to perform regular maintenance. - Tick, + /// A batch of blocks has been processed, with or without errors. + BlocksProcessed(Vec, bool), + /// Tell protocol to restart sync. + RestartSync, + /// Propagate status updates. + Status, + /// Tell protocol to propagate extrinsics. + PropagateExtrinsics, + /// Tell protocol that a block was imported (sent by the import-queue). + BlockImportedSync(B::Hash, NumberFor), + /// Tell protocol to clear all pending justification requests. + ClearJustificationRequests, + /// Tell protocol to request justification for a block. + RequestJustification(B::Hash, NumberFor), + /// Inform protocol whether a justification was successfully imported. + JustificationImportResult(B::Hash, NumberFor, bool), + /// Propagate a block to peers. + AnnounceBlock(B::Hash), + /// A block has been imported (sent by the client). + BlockImported(B::Hash, B::Header), + /// A block has been finalized (sent by the client). + BlockFinalized(B::Hash, B::Header), + /// Execute a closure with the chain-specific network specialization. + ExecuteWithSpec(Box + Send + 'static>), + /// Execute a closure with the consensus gossip. + ExecuteWithGossip(Box + Send + 'static>), + /// Incoming gossip consensus message. + GossipConsensusMessage(B::Hash, ConsensusEngineId, Vec, bool), + /// Tell protocol to abort sync (does not stop protocol). + /// Only used in tests. + #[cfg(any(test, feature = "test-helpers"))] + Abort, + /// Tell protocol to abort sync and stop. + Stop, + /// Tell protocol to perform regular maintenance. + Tick, } /// Messages sent to Protocol from Network-libp2p. pub enum FromNetworkMsg { - /// A peer connected, with debug info. - PeerConnected(PeerId, String), - /// A peer disconnected, with debug info. - PeerDisconnected(PeerId, String), - /// A custom message from another peer. - CustomMessage(PeerId, Message), - /// Let protocol know a peer is currenlty clogged. - PeerClogged(PeerId, Option>), + /// A peer connected, with debug info. + PeerConnected(PeerId, String), + /// A peer disconnected, with debug info. + PeerDisconnected(PeerId, String), + /// A custom message from another peer. + CustomMessage(PeerId, Message), + /// Let protocol know a peer is currenlty clogged. + PeerClogged(PeerId, Option>), } enum Incoming> { - FromNetwork(FromNetworkMsg), - FromClient(ProtocolMsg) + FromNetwork(FromNetworkMsg), + FromClient(ProtocolMsg), } impl, H: ExHashT> Protocol { - /// Create a new instance. - pub fn new( - status_sinks: Arc>>>>, - is_offline: Arc, - is_major_syncing: Arc, - connected_peers: Arc>>>, - network_chan: NetworkChan, - config: ProtocolConfig, - chain: Arc>, - import_queue: Box>, - on_demand: Option>>, - transaction_pool: Arc>, - specialization: S, - ) -> error::Result<(Sender>, Sender>)> { - let (protocol_sender, port) = channel::unbounded(); - let (from_network_sender, from_network_port) = channel::bounded(4); - let info = chain.info()?; - let sync = ChainSync::new(is_offline, is_major_syncing, config.roles, &info, import_queue); - let _ = thread::Builder::new() - .name("Protocol".into()) - .spawn(move || { - let mut protocol = Protocol { - status_sinks, - network_chan, - from_network_port, - port, - config: config, - context_data: ContextData { - peers: HashMap::new(), - chain, - }, - on_demand, - genesis_hash: info.chain.genesis_hash, - sync, - specialization: specialization, - consensus_gossip: ConsensusGossip::new(), - handshaking_peers: HashMap::new(), - connected_peers, - transaction_pool: transaction_pool, - }; - let tick_timeout = channel::tick(TICK_TIMEOUT); - let propagate_timeout = channel::tick(PROPAGATE_TIMEOUT); - let status_interval = channel::tick(STATUS_INTERVAL); - while protocol.run(&tick_timeout, &propagate_timeout, &status_interval) { - // Running until all senders have been dropped... - } - }) - .expect("Protocol thread spawning failed"); - Ok((protocol_sender, from_network_sender)) - } + /// Create a new instance. + pub fn new( + status_sinks: Arc>>>>, + is_offline: Arc, + is_major_syncing: Arc, + connected_peers: Arc>>>, + network_chan: NetworkChan, + config: ProtocolConfig, + chain: Arc>, + import_queue: Box>, + on_demand: Option>>, + transaction_pool: Arc>, + specialization: S, + ) -> error::Result<(Sender>, Sender>)> { + let (protocol_sender, port) = channel::unbounded(); + let (from_network_sender, from_network_port) = channel::bounded(4); + let info = chain.info()?; + let sync = ChainSync::new( + is_offline, + is_major_syncing, + config.roles, + &info, + import_queue, + ); + let _ = thread::Builder::new() + .name("Protocol".into()) + .spawn(move || { + let mut protocol = Protocol { + status_sinks, + network_chan, + from_network_port, + port, + config: config, + context_data: ContextData { + peers: HashMap::new(), + chain, + }, + on_demand, + genesis_hash: info.chain.genesis_hash, + sync, + specialization: specialization, + consensus_gossip: ConsensusGossip::new(), + handshaking_peers: HashMap::new(), + connected_peers, + transaction_pool: transaction_pool, + }; + let tick_timeout = channel::tick(TICK_TIMEOUT); + let propagate_timeout = channel::tick(PROPAGATE_TIMEOUT); + let status_interval = channel::tick(STATUS_INTERVAL); + while protocol.run(&tick_timeout, &propagate_timeout, &status_interval) { + // Running until all senders have been dropped... + } + }) + .expect("Protocol thread spawning failed"); + Ok((protocol_sender, from_network_sender)) + } - fn run( - &mut self, - tick_timeout: &Receiver, - propagate_timeout: &Receiver, - status_interval: &Receiver, - ) -> bool { - let msg = select! { - recv(self.port) -> event => { - match event { - Ok(msg) => Incoming::FromClient(msg), - // Our sender has been dropped, quit. - Err(_) => { - Incoming::FromClient(ProtocolMsg::Stop) - }, - } - }, - recv(self.from_network_port) -> event => { - match event { - Ok(msg) => Incoming::FromNetwork(msg), - // Our sender has been dropped, quit. - Err(_) => { - Incoming::FromClient(ProtocolMsg::Stop) - }, - } - }, - recv(tick_timeout) -> _ => { - Incoming::FromClient(ProtocolMsg::Tick) - }, - recv(propagate_timeout) -> _ => { - Incoming::FromClient(ProtocolMsg::PropagateExtrinsics) - }, - recv(status_interval) -> _ => { - Incoming::FromClient(ProtocolMsg::Status) - }, - }; - self.handle_msg(msg) - } + fn run( + &mut self, + tick_timeout: &Receiver, + propagate_timeout: &Receiver, + status_interval: &Receiver, + ) -> bool { + let msg = select! { + recv(self.port) -> event => { + match event { + Ok(msg) => Incoming::FromClient(msg), + // Our sender has been dropped, quit. + Err(_) => { + Incoming::FromClient(ProtocolMsg::Stop) + }, + } + }, + recv(self.from_network_port) -> event => { + match event { + Ok(msg) => Incoming::FromNetwork(msg), + // Our sender has been dropped, quit. + Err(_) => { + Incoming::FromClient(ProtocolMsg::Stop) + }, + } + }, + recv(tick_timeout) -> _ => { + Incoming::FromClient(ProtocolMsg::Tick) + }, + recv(propagate_timeout) -> _ => { + Incoming::FromClient(ProtocolMsg::PropagateExtrinsics) + }, + recv(status_interval) -> _ => { + Incoming::FromClient(ProtocolMsg::Status) + }, + }; + self.handle_msg(msg) + } - fn handle_msg(&mut self, msg: Incoming) -> bool { - match msg { - Incoming::FromNetwork(msg) => self.handle_network_msg(msg), - Incoming::FromClient(msg) => self.handle_client_msg(msg), - } - } + fn handle_msg(&mut self, msg: Incoming) -> bool { + match msg { + Incoming::FromNetwork(msg) => self.handle_network_msg(msg), + Incoming::FromClient(msg) => self.handle_client_msg(msg), + } + } - fn handle_client_msg(&mut self, msg: ProtocolMsg) -> bool { - match msg { - ProtocolMsg::Status => self.on_status(), - ProtocolMsg::BlockImported(hash, header) => self.on_block_imported(hash, &header), - ProtocolMsg::BlockFinalized(hash, header) => self.on_block_finalized(hash, &header), - ProtocolMsg::ExecuteWithSpec(task) => { - let mut context = - ProtocolContext::new(&mut self.context_data, &self.network_chan); - task.call_box(&mut self.specialization, &mut context); - }, - ProtocolMsg::ExecuteWithGossip(task) => { - let mut context = - ProtocolContext::new(&mut self.context_data, &self.network_chan); - task.call_box(&mut self.consensus_gossip, &mut context); - } - ProtocolMsg::GossipConsensusMessage(topic, engine_id, message, force) => { - self.gossip_consensus_message(topic, engine_id, message, force) - } - ProtocolMsg::BlocksProcessed(hashes, has_error) => { - self.sync.blocks_processed(hashes, has_error); - let mut context = - ProtocolContext::new(&mut self.context_data, &self.network_chan); - self.sync.maintain_sync(&mut context); - }, - ProtocolMsg::RestartSync => { - let mut context = - ProtocolContext::new(&mut self.context_data, &self.network_chan); - self.sync.restart(&mut context); - } - ProtocolMsg::AnnounceBlock(hash) => self.announce_block(hash), - ProtocolMsg::BlockImportedSync(hash, number) => self.sync.block_imported(&hash, number), - ProtocolMsg::ClearJustificationRequests => self.sync.clear_justification_requests(), - ProtocolMsg::RequestJustification(hash, number) => { - let mut context = - ProtocolContext::new(&mut self.context_data, &self.network_chan); - self.sync.request_justification(&hash, number, &mut context); - }, - ProtocolMsg::JustificationImportResult(hash, number, success) => self.sync.justification_import_result(hash, number, success), - ProtocolMsg::PropagateExtrinsics => self.propagate_extrinsics(), - ProtocolMsg::Tick => self.tick(), - #[cfg(any(test, feature = "test-helpers"))] - ProtocolMsg::Abort => self.abort(), - ProtocolMsg::Stop => { - self.stop(); - return false; - }, - } - true - } + fn handle_client_msg(&mut self, msg: ProtocolMsg) -> bool { + match msg { + ProtocolMsg::Status => self.on_status(), + ProtocolMsg::BlockImported(hash, header) => self.on_block_imported(hash, &header), + ProtocolMsg::BlockFinalized(hash, header) => self.on_block_finalized(hash, &header), + ProtocolMsg::ExecuteWithSpec(task) => { + let mut context = ProtocolContext::new(&mut self.context_data, &self.network_chan); + task.call_box(&mut self.specialization, &mut context); + } + ProtocolMsg::ExecuteWithGossip(task) => { + let mut context = ProtocolContext::new(&mut self.context_data, &self.network_chan); + task.call_box(&mut self.consensus_gossip, &mut context); + } + ProtocolMsg::GossipConsensusMessage(topic, engine_id, message, force) => { + self.gossip_consensus_message(topic, engine_id, message, force) + } + ProtocolMsg::BlocksProcessed(hashes, has_error) => { + self.sync.blocks_processed(hashes, has_error); + let mut context = ProtocolContext::new(&mut self.context_data, &self.network_chan); + self.sync.maintain_sync(&mut context); + } + ProtocolMsg::RestartSync => { + let mut context = ProtocolContext::new(&mut self.context_data, &self.network_chan); + self.sync.restart(&mut context); + } + ProtocolMsg::AnnounceBlock(hash) => self.announce_block(hash), + ProtocolMsg::BlockImportedSync(hash, number) => self.sync.block_imported(&hash, number), + ProtocolMsg::ClearJustificationRequests => self.sync.clear_justification_requests(), + ProtocolMsg::RequestJustification(hash, number) => { + let mut context = ProtocolContext::new(&mut self.context_data, &self.network_chan); + self.sync.request_justification(&hash, number, &mut context); + } + ProtocolMsg::JustificationImportResult(hash, number, success) => { + self.sync.justification_import_result(hash, number, success) + } + ProtocolMsg::PropagateExtrinsics => self.propagate_extrinsics(), + ProtocolMsg::Tick => self.tick(), + #[cfg(any(test, feature = "test-helpers"))] + ProtocolMsg::Abort => self.abort(), + ProtocolMsg::Stop => { + self.stop(); + return false; + } + } + true + } - fn handle_network_msg(&mut self, msg: FromNetworkMsg) -> bool { - match msg { - FromNetworkMsg::PeerDisconnected(who, debug_info) => self.on_peer_disconnected(who, debug_info), - FromNetworkMsg::PeerConnected(who, debug_info) => self.on_peer_connected(who, debug_info), - FromNetworkMsg::PeerClogged(who, message) => self.on_clogged_peer(who, message), - FromNetworkMsg::CustomMessage(who, message) => { - self.on_custom_message(who, message) - }, - } - true - } + fn handle_network_msg(&mut self, msg: FromNetworkMsg) -> bool { + match msg { + FromNetworkMsg::PeerDisconnected(who, debug_info) => { + self.on_peer_disconnected(who, debug_info) + } + FromNetworkMsg::PeerConnected(who, debug_info) => { + self.on_peer_connected(who, debug_info) + } + FromNetworkMsg::PeerClogged(who, message) => self.on_clogged_peer(who, message), + FromNetworkMsg::CustomMessage(who, message) => self.on_custom_message(who, message), + } + true + } - fn handle_response(&mut self, who: PeerId, response: &message::BlockResponse) -> Option> { - if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { - if let Some(_) = peer.obsolete_requests.remove(&response.id) { - trace!(target: "sync", "Ignoring obsolete block response packet from {} ({})", who, response.id,); - return None; - } - // Clear the request. If the response is invalid peer will be disconnected anyway. - let request = peer.block_request.take(); - if request.as_ref().map_or(false, |(_, r)| r.id == response.id) { - return request.map(|(_, r)| r) - } - trace!(target: "sync", "Unexpected response packet from {} ({})", who, response.id,); - let severity = Severity::Bad("Unexpected response packet received from peer".to_string()); - self.network_chan.send(NetworkMsg::ReportPeer(who, severity)) - } - None - } + fn handle_response( + &mut self, + who: PeerId, + response: &message::BlockResponse, + ) -> Option> { + if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { + if let Some(_) = peer.obsolete_requests.remove(&response.id) { + trace!(target: "sync", "Ignoring obsolete block response packet from {} ({})", who, response.id,); + return None; + } + // Clear the request. If the response is invalid peer will be disconnected anyway. + let request = peer.block_request.take(); + if request.as_ref().map_or(false, |(_, r)| r.id == response.id) { + return request.map(|(_, r)| r); + } + trace!(target: "sync", "Unexpected response packet from {} ({})", who, response.id,); + let severity = + Severity::Bad("Unexpected response packet received from peer".to_string()); + self.network_chan + .send(NetworkMsg::ReportPeer(who, severity)) + } + None + } - fn update_peer_info(&mut self, who: &PeerId) { - if let Some(info) = self.sync.peer_info(who) { - if let Some(ref mut peer) = self.context_data.peers.get_mut(who) { - peer.info.best_hash = info.best_hash; - peer.info.best_number = info.best_number; - } - let mut peers = self.connected_peers.write(); - if let Some(ref mut peer) = peers.get_mut(who) { - peer.peer_info.best_hash = info.best_hash; - peer.peer_info.best_number = info.best_number; - } - } - } + fn update_peer_info(&mut self, who: &PeerId) { + if let Some(info) = self.sync.peer_info(who) { + if let Some(ref mut peer) = self.context_data.peers.get_mut(who) { + peer.info.best_hash = info.best_hash; + peer.info.best_number = info.best_number; + } + let mut peers = self.connected_peers.write(); + if let Some(ref mut peer) = peers.get_mut(who) { + peer.peer_info.best_hash = info.best_hash; + peer.peer_info.best_number = info.best_number; + } + } + } - /// Propagates protocol statuses. - fn on_status(&mut self) { - let status = ProtocolStatus { - sync: self.sync.status(), - num_peers: self.context_data.peers.values().count(), - num_active_peers: self - .context_data - .peers - .values() - .filter(|p| p.block_request.is_some()) - .count(), - }; - self.status_sinks.lock().retain(|sink| sink.unbounded_send(status.clone()).is_ok()); - } + /// Propagates protocol statuses. + fn on_status(&mut self) { + let status = ProtocolStatus { + sync: self.sync.status(), + num_peers: self.context_data.peers.values().count(), + num_active_peers: self + .context_data + .peers + .values() + .filter(|p| p.block_request.is_some()) + .count(), + }; + self.status_sinks + .lock() + .retain(|sink| sink.unbounded_send(status.clone()).is_ok()); + } - fn on_custom_message(&mut self, who: PeerId, message: Message) { - match message { - GenericMessage::Status(s) => self.on_status_message(who, s), - GenericMessage::BlockRequest(r) => self.on_block_request(who, r), - GenericMessage::BlockResponse(r) => { - if let Some(request) = self.handle_response(who.clone(), &r) { - self.on_block_response(who.clone(), request, r); - self.update_peer_info(&who); - } - }, - GenericMessage::BlockAnnounce(announce) => { - self.on_block_announce(who.clone(), announce); - self.update_peer_info(&who); - }, - GenericMessage::Transactions(m) => self.on_extrinsics(who, m), - GenericMessage::RemoteCallRequest(request) => self.on_remote_call_request(who, request), - GenericMessage::RemoteCallResponse(response) => self.on_remote_call_response(who, response), - GenericMessage::RemoteReadRequest(request) => self.on_remote_read_request(who, request), - GenericMessage::RemoteReadResponse(response) => self.on_remote_read_response(who, response), - GenericMessage::RemoteHeaderRequest(request) => self.on_remote_header_request(who, request), - GenericMessage::RemoteHeaderResponse(response) => self.on_remote_header_response(who, response), - GenericMessage::RemoteChangesRequest(request) => self.on_remote_changes_request(who, request), - GenericMessage::RemoteChangesResponse(response) => self.on_remote_changes_response(who, response), - GenericMessage::Consensus(msg) => { - self.consensus_gossip.on_incoming( - &mut ProtocolContext::new(&mut self.context_data, &self.network_chan), - who, - msg, - ); - } - other => self.specialization.on_message( - &mut ProtocolContext::new(&mut self.context_data, &self.network_chan), - who, - &mut Some(other), - ), - } - } + fn on_custom_message(&mut self, who: PeerId, message: Message) { + match message { + GenericMessage::Status(s) => self.on_status_message(who, s), + GenericMessage::BlockRequest(r) => self.on_block_request(who, r), + GenericMessage::BlockResponse(r) => { + if let Some(request) = self.handle_response(who.clone(), &r) { + self.on_block_response(who.clone(), request, r); + self.update_peer_info(&who); + } + } + GenericMessage::BlockAnnounce(announce) => { + self.on_block_announce(who.clone(), announce); + self.update_peer_info(&who); + } + GenericMessage::Transactions(m) => self.on_extrinsics(who, m), + GenericMessage::RemoteCallRequest(request) => self.on_remote_call_request(who, request), + GenericMessage::RemoteCallResponse(response) => { + self.on_remote_call_response(who, response) + } + GenericMessage::RemoteReadRequest(request) => self.on_remote_read_request(who, request), + GenericMessage::RemoteReadResponse(response) => { + self.on_remote_read_response(who, response) + } + GenericMessage::RemoteHeaderRequest(request) => { + self.on_remote_header_request(who, request) + } + GenericMessage::RemoteHeaderResponse(response) => { + self.on_remote_header_response(who, response) + } + GenericMessage::RemoteChangesRequest(request) => { + self.on_remote_changes_request(who, request) + } + GenericMessage::RemoteChangesResponse(response) => { + self.on_remote_changes_response(who, response) + } + GenericMessage::Consensus(msg) => { + self.consensus_gossip.on_incoming( + &mut ProtocolContext::new(&mut self.context_data, &self.network_chan), + who, + msg, + ); + } + other => self.specialization.on_message( + &mut ProtocolContext::new(&mut self.context_data, &self.network_chan), + who, + &mut Some(other), + ), + } + } - fn send_message(&mut self, who: PeerId, message: Message) { - send_message::( - &mut self.context_data.peers, - &self.network_chan, - who, - message, - ); - } + fn send_message(&mut self, who: PeerId, message: Message) { + send_message::( + &mut self.context_data.peers, + &self.network_chan, + who, + message, + ); + } - fn gossip_consensus_message( - &mut self, - topic: B::Hash, - engine_id: ConsensusEngineId, - message: Vec, - force: bool, - ) { - self.consensus_gossip.multicast( - &mut ProtocolContext::new(&mut self.context_data, &self.network_chan), - topic, - ConsensusMessage{ data: message, engine_id }, - force, - ); - } + fn gossip_consensus_message( + &mut self, + topic: B::Hash, + engine_id: ConsensusEngineId, + message: Vec, + force: bool, + ) { + self.consensus_gossip.multicast( + &mut ProtocolContext::new(&mut self.context_data, &self.network_chan), + topic, + ConsensusMessage { + data: message, + engine_id, + }, + force, + ); + } - /// Called when a new peer is connected - fn on_peer_connected(&mut self, who: PeerId, debug_info: String) { - trace!(target: "sync", "Connecting {}: {}", who, debug_info); - self.handshaking_peers.insert(who.clone(), HandshakingPeer { timestamp: time::Instant::now() }); - self.send_status(who); - } + /// Called when a new peer is connected + fn on_peer_connected(&mut self, who: PeerId, debug_info: String) { + trace!(target: "sync", "Connecting {}: {}", who, debug_info); + self.handshaking_peers.insert( + who.clone(), + HandshakingPeer { + timestamp: time::Instant::now(), + }, + ); + self.send_status(who); + } - /// Called by peer when it is disconnecting - fn on_peer_disconnected(&mut self, peer: PeerId, debug_info: String) { - trace!(target: "sync", "Disconnecting {}: {}", peer, debug_info); - // lock all the the peer lists so that add/remove peer events are in order - let removed = { - self.handshaking_peers.remove(&peer); - self.connected_peers.write().remove(&peer); - self.context_data.peers.remove(&peer).is_some() - }; - if removed { - let mut context = ProtocolContext::new(&mut self.context_data, &self.network_chan); - self.consensus_gossip.peer_disconnected(&mut context, peer.clone()); - self.sync.peer_disconnected(&mut context, peer.clone()); - self.specialization.on_disconnect(&mut context, peer.clone()); - self.on_demand.as_ref().map(|s| s.on_disconnect(peer)); - } - } + /// Called by peer when it is disconnecting + fn on_peer_disconnected(&mut self, peer: PeerId, debug_info: String) { + trace!(target: "sync", "Disconnecting {}: {}", peer, debug_info); + // lock all the the peer lists so that add/remove peer events are in order + let removed = { + self.handshaking_peers.remove(&peer); + self.connected_peers.write().remove(&peer); + self.context_data.peers.remove(&peer).is_some() + }; + if removed { + let mut context = ProtocolContext::new(&mut self.context_data, &self.network_chan); + self.consensus_gossip + .peer_disconnected(&mut context, peer.clone()); + self.sync.peer_disconnected(&mut context, peer.clone()); + self.specialization + .on_disconnect(&mut context, peer.clone()); + self.on_demand.as_ref().map(|s| s.on_disconnect(peer)); + } + } - /// Called as a back-pressure mechanism if the networking detects that the peer cannot process - /// our messaging rate fast enough. - pub fn on_clogged_peer(&self, who: PeerId, _msg: Option>) { - // We don't do anything but print some diagnostics for now. - if let Some(peer) = self.context_data.peers.get(&who) { - debug!(target: "sync", "Clogged peer {} (protocol_version: {:?}; roles: {:?}; \ + /// Called as a back-pressure mechanism if the networking detects that the peer cannot process + /// our messaging rate fast enough. + pub fn on_clogged_peer(&self, who: PeerId, _msg: Option>) { + // We don't do anything but print some diagnostics for now. + if let Some(peer) = self.context_data.peers.get(&who) { + debug!(target: "sync", "Clogged peer {} (protocol_version: {:?}; roles: {:?}; \ known_extrinsics: {:?}; known_blocks: {:?}; best_hash: {:?}; best_number: {:?})", who, peer.info.protocol_version, peer.info.roles, peer.known_extrinsics, peer.known_blocks, peer.info.best_hash, peer.info.best_number); - } else { - debug!(target: "sync", "Peer clogged before being properly connected"); - } - } + } else { + debug!(target: "sync", "Peer clogged before being properly connected"); + } + } - fn on_block_request(&mut self, peer: PeerId, request: message::BlockRequest) { - trace!(target: "sync", "BlockRequest {} from {}: from {:?} to {:?} max {:?}", + fn on_block_request(&mut self, peer: PeerId, request: message::BlockRequest) { + trace!(target: "sync", "BlockRequest {} from {}: from {:?} to {:?} max {:?}", request.id, peer, request.from, request.to, request.max); - let mut blocks = Vec::new(); - let mut id = match request.from { - message::FromBlock::Hash(h) => BlockId::Hash(h), - message::FromBlock::Number(n) => BlockId::Number(n), - }; - let max = cmp::min(request.max.unwrap_or(u32::max_value()), MAX_BLOCK_DATA_RESPONSE) as usize; - let get_header = request.fields.contains(message::BlockAttributes::HEADER); - let get_body = request.fields.contains(message::BlockAttributes::BODY); - let get_justification = request - .fields - .contains(message::BlockAttributes::JUSTIFICATION); - while let Some(header) = self.context_data.chain.header(&id).unwrap_or(None) { - if blocks.len() >= max { - break; - } - let number = header.number().clone(); - let hash = header.hash(); - let parent_hash = header.parent_hash().clone(); - let justification = if get_justification { - self.context_data.chain.justification(&BlockId::Hash(hash)).unwrap_or(None) - } else { - None - }; - let block_data = message::generic::BlockData { - hash: hash, - header: if get_header { Some(header) } else { None }, - body: if get_body { - self.context_data - .chain - .body(&BlockId::Hash(hash)) - .unwrap_or(None) - } else { - None - }, - receipt: None, - message_queue: None, - justification, - }; - blocks.push(block_data); - match request.direction { - message::Direction::Ascending => id = BlockId::Number(number + As::sa(1)), - message::Direction::Descending => { - if number == As::sa(0) { - break; - } - id = BlockId::Hash(parent_hash) - } - } - } - let response = message::generic::BlockResponse { - id: request.id, - blocks: blocks, - }; - trace!(target: "sync", "Sending BlockResponse with {} blocks", response.blocks.len()); - self.send_message(peer, GenericMessage::BlockResponse(response)) - } + let mut blocks = Vec::new(); + let mut id = match request.from { + message::FromBlock::Hash(h) => BlockId::Hash(h), + message::FromBlock::Number(n) => BlockId::Number(n), + }; + let max = cmp::min( + request.max.unwrap_or(u32::max_value()), + MAX_BLOCK_DATA_RESPONSE, + ) as usize; + let get_header = request.fields.contains(message::BlockAttributes::HEADER); + let get_body = request.fields.contains(message::BlockAttributes::BODY); + let get_justification = request + .fields + .contains(message::BlockAttributes::JUSTIFICATION); + while let Some(header) = self.context_data.chain.header(&id).unwrap_or(None) { + if blocks.len() >= max { + break; + } + let number = header.number().clone(); + let hash = header.hash(); + let parent_hash = header.parent_hash().clone(); + let justification = if get_justification { + self.context_data + .chain + .justification(&BlockId::Hash(hash)) + .unwrap_or(None) + } else { + None + }; + let block_data = message::generic::BlockData { + hash: hash, + header: if get_header { Some(header) } else { None }, + body: if get_body { + self.context_data + .chain + .body(&BlockId::Hash(hash)) + .unwrap_or(None) + } else { + None + }, + receipt: None, + message_queue: None, + justification, + }; + blocks.push(block_data); + match request.direction { + message::Direction::Ascending => id = BlockId::Number(number + As::sa(1)), + message::Direction::Descending => { + if number == As::sa(0) { + break; + } + id = BlockId::Hash(parent_hash) + } + } + } + let response = message::generic::BlockResponse { + id: request.id, + blocks: blocks, + }; + trace!(target: "sync", "Sending BlockResponse with {} blocks", response.blocks.len()); + self.send_message(peer, GenericMessage::BlockResponse(response)) + } - fn on_block_response( - &mut self, - peer: PeerId, - request: message::BlockRequest, - response: message::BlockResponse, - ) { - let blocks_range = match ( - response.blocks.first().and_then(|b| b.header.as_ref().map(|h| h.number())), - response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), - ) { - (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), - (Some(first), Some(_)) => format!(" ({})", first), - _ => Default::default(), - }; - trace!(target: "sync", "BlockResponse {} from {} with {} blocks {}", + fn on_block_response( + &mut self, + peer: PeerId, + request: message::BlockRequest, + response: message::BlockResponse, + ) { + let blocks_range = match ( + response + .blocks + .first() + .and_then(|b| b.header.as_ref().map(|h| h.number())), + response + .blocks + .last() + .and_then(|b| b.header.as_ref().map(|h| h.number())), + ) { + (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), + (Some(first), Some(_)) => format!(" ({})", first), + _ => Default::default(), + }; + trace!(target: "sync", "BlockResponse {} from {} with {} blocks {}", response.id, peer, response.blocks.len(), blocks_range); - // TODO [andre]: move this logic to the import queue so that - // justifications are imported asynchronously (#1482) - if request.fields == message::BlockAttributes::JUSTIFICATION { - self.sync.on_block_justification_data( - &mut ProtocolContext::new(&mut self.context_data, &self.network_chan), - peer, - request, - response, - ); - } else { - self.sync.on_block_data(&mut ProtocolContext::new(&mut self.context_data, &self.network_chan), peer, request, response); - } - } - - /// Perform time based maintenance. - fn tick(&mut self) { - self.consensus_gossip.collect_garbage(); - self.maintain_peers(); - self.sync.tick(&mut ProtocolContext::new(&mut self.context_data, &self.network_chan)); - self.on_demand - .as_ref() - .map(|s| s.maintain_peers()); - } - - fn maintain_peers(&mut self) { - let tick = time::Instant::now(); - let mut aborting = Vec::new(); - { - for (who, peer) in self.context_data.peers.iter() { - if peer.block_request.as_ref().map_or(false, |(t, _)| (tick - *t).as_secs() > REQUEST_TIMEOUT_SEC) { - trace!(target: "sync", "Reqeust timeout {}", who); - aborting.push(who.clone()); - } else if peer.obsolete_requests.values().any(|t| (tick - *t).as_secs() > REQUEST_TIMEOUT_SEC) { - trace!(target: "sync", "Obsolete timeout {}", who); - aborting.push(who.clone()); - } - } - for (who, _) in self.handshaking_peers.iter().filter(|(_, handshaking)| (tick - handshaking.timestamp).as_secs() > REQUEST_TIMEOUT_SEC) { - trace!(target: "sync", "Handshake timeout {}", who); - aborting.push(who.clone()); - } - } - - self.specialization.maintain_peers(&mut ProtocolContext::new(&mut self.context_data, &self.network_chan)); - for p in aborting { - let _ = self - .network_chan - .send(NetworkMsg::ReportPeer(p, Severity::Timeout)); - } - } - - /// Called by peer to report status - fn on_status_message(&mut self, who: PeerId, status: message::Status) { - trace!(target: "sync", "New peer {} {:?}", who, status); - { - if self.context_data.peers.contains_key(&who) { - debug!("Unexpected status packet from {}", who); - return; - } - if status.genesis_hash != self.genesis_hash { - let reason = format!( - "Peer is on different chain (our genesis: {} theirs: {})", - self.genesis_hash, status.genesis_hash - ); - self.network_chan.send(NetworkMsg::ReportPeer( - who, - Severity::Bad(reason), - )); - return; - } - if status.version < MIN_VERSION && CURRENT_VERSION < status.min_supported_version { - let reason = format!("Peer using unsupported protocol version {}", status.version); - self.network_chan.send(NetworkMsg::ReportPeer( - who, - Severity::Bad(reason), - )); - return; - } - if self.config.roles & Roles::LIGHT == Roles::LIGHT { - let self_best_block = self - .context_data - .chain - .info() - .ok() - .and_then(|info| info.best_queued_number) - .unwrap_or_else(|| Zero::zero()); - let blocks_difference = self_best_block - .as_() - .checked_sub(status.best_number.as_()) - .unwrap_or(0); - if blocks_difference > LIGHT_MAXIMAL_BLOCKS_DIFFERENCE { - self.network_chan.send(NetworkMsg::ReportPeer( - who, - Severity::Useless( - "Peer is far behind us and will unable to serve light requests" - .to_string(), - ), - )); - return; - } - } - - let cache_limit = NonZeroUsize::new(1_000_000).expect("1_000_000 > 0; qed"); - - let info = match self.handshaking_peers.remove(&who) { - Some(_handshaking) => { - let peer_info = PeerInfo { - protocol_version: status.version, - roles: status.roles, - best_hash: status.best_hash, - best_number: status.best_number - }; - self.connected_peers - .write() - .insert(who.clone(), ConnectedPeer { peer_info: peer_info.clone() }); - peer_info - }, - None => { - debug!(target: "sync", "Received status from previously unconnected node {}", who); - return; - }, - }; - - let peer = Peer { - info, - block_request: None, - known_extrinsics: LruHashSet::new(cache_limit), - known_blocks: LruHashSet::new(cache_limit), - next_request_id: 0, - obsolete_requests: HashMap::new(), - }; - self.context_data.peers.insert(who.clone(), peer); - - debug!(target: "sync", "Connected {}", who); - } - - let mut context = ProtocolContext::new(&mut self.context_data, &self.network_chan); - self.on_demand - .as_ref() - .map(|s| s.on_connect(who.clone(), status.roles, status.best_number)); - self.sync.new_peer(&mut context, who.clone()); - self.consensus_gossip - .new_peer(&mut context, who.clone(), status.roles); - self.specialization.on_connect(&mut context, who, status); - } - - /// Called when peer sends us new extrinsics - fn on_extrinsics(&mut self, who: PeerId, extrinsics: message::Transactions) { - // Accept extrinsics only when fully synced - if self.sync.status().state != SyncState::Idle { - trace!(target: "sync", "{} Ignoring extrinsics while syncing", who); - return; - } - trace!(target: "sync", "Received {} extrinsics from {}", extrinsics.len(), who); - if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { - for t in extrinsics { - if let Some(hash) = self.transaction_pool.import(&t) { - peer.known_extrinsics.insert(hash); - } else { - trace!(target: "sync", "Extrinsic rejected"); - } - } - } - } - - /// Called when we propagate ready extrinsics to peers. - fn propagate_extrinsics(&mut self) { - debug!(target: "sync", "Propagating extrinsics"); - - // Accept transactions only when fully synced - if self.sync.status().state != SyncState::Idle { - return; - } - - let extrinsics = self.transaction_pool.transactions(); - let mut propagated_to = HashMap::new(); - for (who, peer) in self.context_data.peers.iter_mut() { - let (hashes, to_send): (Vec<_>, Vec<_>) = extrinsics - .iter() - .filter(|&(ref hash, _)| peer.known_extrinsics.insert(hash.clone())) - .cloned() - .unzip(); - - if !to_send.is_empty() { - for hash in hashes { - propagated_to - .entry(hash) - .or_insert_with(Vec::new) - .push(who.to_base58()); - } - trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who); - self.network_chan.send(NetworkMsg::Outgoing(who.clone(), GenericMessage::Transactions(to_send))) - } - } - self.transaction_pool.on_broadcasted(propagated_to); - } + // TODO [andre]: move this logic to the import queue so that + // justifications are imported asynchronously (#1482) + if request.fields == message::BlockAttributes::JUSTIFICATION { + self.sync.on_block_justification_data( + &mut ProtocolContext::new(&mut self.context_data, &self.network_chan), + peer, + request, + response, + ); + } else { + self.sync.on_block_data( + &mut ProtocolContext::new(&mut self.context_data, &self.network_chan), + peer, + request, + response, + ); + } + } - /// Make sure an important block is propagated to peers. - /// - /// In chain-based consensus, we often need to make sure non-best forks are - /// at least temporarily synced. - pub fn announce_block(&mut self, hash: B::Hash) { - let header = match self.context_data.chain.header(&BlockId::Hash(hash)) { - Ok(Some(header)) => header, - Ok(None) => { - warn!("Trying to announce unknown block: {}", hash); - return; - } - Err(e) => { - warn!("Error reading block header {}: {:?}", hash, e); - return; - } - }; - let hash = header.hash(); + /// Perform time based maintenance. + fn tick(&mut self) { + self.consensus_gossip.collect_garbage(); + self.maintain_peers(); + self.sync.tick(&mut ProtocolContext::new( + &mut self.context_data, + &self.network_chan, + )); + self.on_demand.as_ref().map(|s| s.maintain_peers()); + } - let message = GenericMessage::BlockAnnounce(message::BlockAnnounce { header: header.clone() }); + fn maintain_peers(&mut self) { + let tick = time::Instant::now(); + let mut aborting = Vec::new(); + { + for (who, peer) in self.context_data.peers.iter() { + if peer + .block_request + .as_ref() + .map_or(false, |(t, _)| (tick - *t).as_secs() > REQUEST_TIMEOUT_SEC) + { + trace!(target: "sync", "Reqeust timeout {}", who); + aborting.push(who.clone()); + } else if peer + .obsolete_requests + .values() + .any(|t| (tick - *t).as_secs() > REQUEST_TIMEOUT_SEC) + { + trace!(target: "sync", "Obsolete timeout {}", who); + aborting.push(who.clone()); + } + } + for (who, _) in self.handshaking_peers.iter().filter(|(_, handshaking)| { + (tick - handshaking.timestamp).as_secs() > REQUEST_TIMEOUT_SEC + }) { + trace!(target: "sync", "Handshake timeout {}", who); + aborting.push(who.clone()); + } + } + + self.specialization + .maintain_peers(&mut ProtocolContext::new( + &mut self.context_data, + &self.network_chan, + )); + for p in aborting { + let _ = self + .network_chan + .send(NetworkMsg::ReportPeer(p, Severity::Timeout)); + } + } - for (who, ref mut peer) in self.context_data.peers.iter_mut() { - trace!(target: "sync", "Reannouncing block {:?} to {}", hash, who); - peer.known_blocks.insert(hash); - self.network_chan.send(NetworkMsg::Outgoing(who.clone(), message.clone())) - } - } + /// Called by peer to report status + fn on_status_message(&mut self, who: PeerId, status: message::Status) { + trace!(target: "sync", "New peer {} {:?}", who, status); + { + if self.context_data.peers.contains_key(&who) { + debug!("Unexpected status packet from {}", who); + return; + } + if status.genesis_hash != self.genesis_hash { + let reason = format!( + "Peer is on different chain (our genesis: {} theirs: {})", + self.genesis_hash, status.genesis_hash + ); + self.network_chan + .send(NetworkMsg::ReportPeer(who, Severity::Bad(reason))); + return; + } + if status.version < MIN_VERSION && CURRENT_VERSION < status.min_supported_version { + let reason = format!("Peer using unsupported protocol version {}", status.version); + self.network_chan + .send(NetworkMsg::ReportPeer(who, Severity::Bad(reason))); + return; + } + if self.config.roles & Roles::LIGHT == Roles::LIGHT { + let self_best_block = self + .context_data + .chain + .info() + .ok() + .and_then(|info| info.best_queued_number) + .unwrap_or_else(|| Zero::zero()); + let blocks_difference = self_best_block + .as_() + .checked_sub(status.best_number.as_()) + .unwrap_or(0); + if blocks_difference > LIGHT_MAXIMAL_BLOCKS_DIFFERENCE { + self.network_chan.send(NetworkMsg::ReportPeer( + who, + Severity::Useless( + "Peer is far behind us and will unable to serve light requests" + .to_string(), + ), + )); + return; + } + } + + let cache_limit = NonZeroUsize::new(1_000_000).expect("1_000_000 > 0; qed"); + + let info = match self.handshaking_peers.remove(&who) { + Some(_handshaking) => { + let peer_info = PeerInfo { + protocol_version: status.version, + roles: status.roles, + best_hash: status.best_hash, + best_number: status.best_number, + }; + self.connected_peers.write().insert( + who.clone(), + ConnectedPeer { + peer_info: peer_info.clone(), + }, + ); + peer_info + } + None => { + debug!(target: "sync", "Received status from previously unconnected node {}", who); + return; + } + }; + + let peer = Peer { + info, + block_request: None, + known_extrinsics: LruHashSet::new(cache_limit), + known_blocks: LruHashSet::new(cache_limit), + next_request_id: 0, + obsolete_requests: HashMap::new(), + }; + self.context_data.peers.insert(who.clone(), peer); + + debug!(target: "sync", "Connected {}", who); + } + + let mut context = ProtocolContext::new(&mut self.context_data, &self.network_chan); + self.on_demand + .as_ref() + .map(|s| s.on_connect(who.clone(), status.roles, status.best_number)); + self.sync.new_peer(&mut context, who.clone()); + self.consensus_gossip + .new_peer(&mut context, who.clone(), status.roles); + self.specialization.on_connect(&mut context, who, status); + } - /// Send Status message - fn send_status(&mut self, who: PeerId) { - if let Ok(info) = self.context_data.chain.info() { - let status = message::generic::Status { - version: CURRENT_VERSION, - min_supported_version: MIN_VERSION, - genesis_hash: info.chain.genesis_hash, - roles: self.config.roles.into(), - best_number: info.chain.best_number, - best_hash: info.chain.best_hash, - chain_status: self.specialization.status(), - }; - self.send_message(who, GenericMessage::Status(status)) - } - } + /// Called when peer sends us new extrinsics + fn on_extrinsics(&mut self, who: PeerId, extrinsics: message::Transactions) { + // Accept extrinsics only when fully synced + if self.sync.status().state != SyncState::Idle { + trace!(target: "sync", "{} Ignoring extrinsics while syncing", who); + return; + } + trace!(target: "sync", "Received {} extrinsics from {}", extrinsics.len(), who); + if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { + for t in extrinsics { + if let Some(hash) = self.transaction_pool.import(&t) { + peer.known_extrinsics.insert(hash); + } else { + trace!(target: "sync", "Extrinsic rejected"); + } + } + } + } - fn abort(&mut self) { - self.sync.clear(); - self.specialization.on_abort(); - self.context_data.peers.clear(); - self.handshaking_peers.clear(); - self.consensus_gossip.abort(); - } + /// Called when we propagate ready extrinsics to peers. + fn propagate_extrinsics(&mut self) { + debug!(target: "sync", "Propagating extrinsics"); + + // Accept transactions only when fully synced + if self.sync.status().state != SyncState::Idle { + return; + } + + let extrinsics = self.transaction_pool.transactions(); + let mut propagated_to = HashMap::new(); + for (who, peer) in self.context_data.peers.iter_mut() { + let (hashes, to_send): (Vec<_>, Vec<_>) = extrinsics + .iter() + .filter(|&(ref hash, _)| peer.known_extrinsics.insert(hash.clone())) + .cloned() + .unzip(); + + if !to_send.is_empty() { + for hash in hashes { + propagated_to + .entry(hash) + .or_insert_with(Vec::new) + .push(who.to_base58()); + } + trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who); + self.network_chan.send(NetworkMsg::Outgoing( + who.clone(), + GenericMessage::Transactions(to_send), + )) + } + } + self.transaction_pool.on_broadcasted(propagated_to); + } - fn stop(&mut self) { - // stop processing import requests first (without holding a sync lock) - self.sync.stop(); + /// Make sure an important block is propagated to peers. + /// + /// In chain-based consensus, we often need to make sure non-best forks are + /// at least temporarily synced. + pub fn announce_block(&mut self, hash: B::Hash) { + let header = match self.context_data.chain.header(&BlockId::Hash(hash)) { + Ok(Some(header)) => header, + Ok(None) => { + warn!("Trying to announce unknown block: {}", hash); + return; + } + Err(e) => { + warn!("Error reading block header {}: {:?}", hash, e); + return; + } + }; + let hash = header.hash(); + + let message = GenericMessage::BlockAnnounce(message::BlockAnnounce { + header: header.clone(), + }); + + for (who, ref mut peer) in self.context_data.peers.iter_mut() { + trace!(target: "sync", "Reannouncing block {:?} to {}", hash, who); + peer.known_blocks.insert(hash); + self.network_chan + .send(NetworkMsg::Outgoing(who.clone(), message.clone())) + } + } - // and then clear all the sync data - self.abort(); - } + /// Send Status message + fn send_status(&mut self, who: PeerId) { + if let Ok(info) = self.context_data.chain.info() { + let status = message::generic::Status { + version: CURRENT_VERSION, + min_supported_version: MIN_VERSION, + genesis_hash: info.chain.genesis_hash, + roles: self.config.roles.into(), + best_number: info.chain.best_number, + best_hash: info.chain.best_hash, + chain_status: self.specialization.status(), + }; + self.send_message(who, GenericMessage::Status(status)) + } + } - fn on_block_announce(&mut self, who: PeerId, announce: message::BlockAnnounce) { - let header = announce.header; - let hash = header.hash(); - { - if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { - peer.known_blocks.insert(hash.clone()); - } - } - self.on_demand - .as_ref() - .map(|s| s.on_block_announce(who.clone(), *header.number())); - self.sync.on_block_announce( - &mut ProtocolContext::new(&mut self.context_data, &self.network_chan), - who, - hash, - &header, - ); - } + fn abort(&mut self) { + self.sync.clear(); + self.specialization.on_abort(); + self.context_data.peers.clear(); + self.handshaking_peers.clear(); + self.consensus_gossip.abort(); + } - fn on_block_imported(&mut self, hash: B::Hash, header: &B::Header) { - self.sync.update_chain_info(header); - self.specialization.on_block_imported( - &mut ProtocolContext::new(&mut self.context_data, &self.network_chan), - hash.clone(), - header, - ); - - // blocks are not announced by light clients - if self.config.roles & Roles::LIGHT == Roles::LIGHT { - return; - } + fn stop(&mut self) { + // stop processing import requests first (without holding a sync lock) + self.sync.stop(); - // send out block announcements + // and then clear all the sync data + self.abort(); + } - let message = GenericMessage::BlockAnnounce(message::BlockAnnounce { header: header.clone() }); + fn on_block_announce(&mut self, who: PeerId, announce: message::BlockAnnounce) { + let header = announce.header; + let hash = header.hash(); + { + if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { + peer.known_blocks.insert(hash.clone()); + } + } + self.on_demand + .as_ref() + .map(|s| s.on_block_announce(who.clone(), *header.number())); + self.sync.on_block_announce( + &mut ProtocolContext::new(&mut self.context_data, &self.network_chan), + who, + hash, + &header, + ); + } - for (who, ref mut peer) in self.context_data.peers.iter_mut() { - if peer.known_blocks.insert(hash.clone()) { - trace!(target: "sync", "Announcing block {:?} to {}", hash, who); - self.network_chan.send(NetworkMsg::Outgoing(who.clone(), message.clone())) - } - } - } + fn on_block_imported(&mut self, hash: B::Hash, header: &B::Header) { + self.sync.update_chain_info(header); + self.specialization.on_block_imported( + &mut ProtocolContext::new(&mut self.context_data, &self.network_chan), + hash.clone(), + header, + ); + + // blocks are not announced by light clients + if self.config.roles & Roles::LIGHT == Roles::LIGHT { + return; + } + + // send out block announcements + + let message = GenericMessage::BlockAnnounce(message::BlockAnnounce { + header: header.clone(), + }); + + for (who, ref mut peer) in self.context_data.peers.iter_mut() { + if peer.known_blocks.insert(hash.clone()) { + trace!(target: "sync", "Announcing block {:?} to {}", hash, who); + self.network_chan + .send(NetworkMsg::Outgoing(who.clone(), message.clone())) + } + } + } - fn on_block_finalized(&mut self, hash: B::Hash, header: &B::Header) { - self.sync.on_block_finalized( - &hash, - *header.number(), - &mut ProtocolContext::new(&mut self.context_data, &self.network_chan), - ); - } + fn on_block_finalized(&mut self, hash: B::Hash, header: &B::Header) { + self.sync.on_block_finalized( + &hash, + *header.number(), + &mut ProtocolContext::new(&mut self.context_data, &self.network_chan), + ); + } - fn on_remote_call_request( - &mut self, - who: PeerId, - request: message::RemoteCallRequest, - ) { - trace!(target: "sync", "Remote call request {} from {} ({} at {})", request.id, who, request.method, request.block); - let proof = match self.context_data.chain.execution_proof( - &request.block, - &request.method, - &request.data, - ) { - Ok((_, proof)) => proof, - Err(error) => { - trace!(target: "sync", "Remote call request {} from {} ({} at {}) failed with: {}", + fn on_remote_call_request( + &mut self, + who: PeerId, + request: message::RemoteCallRequest, + ) { + trace!(target: "sync", "Remote call request {} from {} ({} at {})", request.id, who, request.method, request.block); + let proof = match self.context_data.chain.execution_proof( + &request.block, + &request.method, + &request.data, + ) { + Ok((_, proof)) => proof, + Err(error) => { + trace!(target: "sync", "Remote call request {} from {} ({} at {}) failed with: {}", request.id, who, request.method, request.block, error); - Default::default() - } - }; - - self.send_message( - who, - GenericMessage::RemoteCallResponse(message::RemoteCallResponse { - id: request.id, - proof, - }), - ); - } + Default::default() + } + }; + + self.send_message( + who, + GenericMessage::RemoteCallResponse(message::RemoteCallResponse { + id: request.id, + proof, + }), + ); + } - fn on_remote_call_response(&mut self, who: PeerId, response: message::RemoteCallResponse) { - trace!(target: "sync", "Remote call response {} from {}", response.id, who); - self.on_demand - .as_ref() - .map(|s| s.on_remote_call_response(who, response)); - } + fn on_remote_call_response(&mut self, who: PeerId, response: message::RemoteCallResponse) { + trace!(target: "sync", "Remote call response {} from {}", response.id, who); + self.on_demand + .as_ref() + .map(|s| s.on_remote_call_response(who, response)); + } - fn on_remote_read_request( - &mut self, - who: PeerId, - request: message::RemoteReadRequest, - ) { - trace!(target: "sync", "Remote read request {} from {} ({} at {})", + fn on_remote_read_request( + &mut self, + who: PeerId, + request: message::RemoteReadRequest, + ) { + trace!(target: "sync", "Remote read request {} from {} ({} at {})", request.id, who, request.key.to_hex::(), request.block); - let proof = match self.context_data.chain.read_proof(&request.block, &request.key) { - Ok(proof) => proof, - Err(error) => { - trace!(target: "sync", "Remote read request {} from {} ({} at {}) failed with: {}", + let proof = match self + .context_data + .chain + .read_proof(&request.block, &request.key) + { + Ok(proof) => proof, + Err(error) => { + trace!(target: "sync", "Remote read request {} from {} ({} at {}) failed with: {}", request.id, who, request.key.to_hex::(), request.block, error); - Default::default() - } - }; - self.send_message( - who, - GenericMessage::RemoteReadResponse(message::RemoteReadResponse { - id: request.id, - proof, - }), - ); - } - fn on_remote_read_response(&mut self, who: PeerId, response: message::RemoteReadResponse) { - trace!(target: "sync", "Remote read response {} from {}", response.id, who); - self.on_demand - .as_ref() - .map(|s| s.on_remote_read_response(who, response)); - } + Default::default() + } + }; + self.send_message( + who, + GenericMessage::RemoteReadResponse(message::RemoteReadResponse { + id: request.id, + proof, + }), + ); + } + fn on_remote_read_response(&mut self, who: PeerId, response: message::RemoteReadResponse) { + trace!(target: "sync", "Remote read response {} from {}", response.id, who); + self.on_demand + .as_ref() + .map(|s| s.on_remote_read_response(who, response)); + } - fn on_remote_header_request( - &mut self, - who: PeerId, - request: message::RemoteHeaderRequest>, - ) { - trace!(target: "sync", "Remote header proof request {} from {} ({})", + fn on_remote_header_request( + &mut self, + who: PeerId, + request: message::RemoteHeaderRequest>, + ) { + trace!(target: "sync", "Remote header proof request {} from {} ({})", request.id, who, request.block); - let (header, proof) = match self.context_data.chain.header_proof(request.block) { - Ok((header, proof)) => (Some(header), proof), - Err(error) => { - trace!(target: "sync", "Remote header proof request {} from {} ({}) failed with: {}", + let (header, proof) = match self.context_data.chain.header_proof(request.block) { + Ok((header, proof)) => (Some(header), proof), + Err(error) => { + trace!(target: "sync", "Remote header proof request {} from {} ({}) failed with: {}", request.id, who, request.block, error); - (Default::default(), Default::default()) - } - }; - self.send_message( - who, - GenericMessage::RemoteHeaderResponse(message::RemoteHeaderResponse { - id: request.id, - header, - proof, - }), - ); - } + (Default::default(), Default::default()) + } + }; + self.send_message( + who, + GenericMessage::RemoteHeaderResponse(message::RemoteHeaderResponse { + id: request.id, + header, + proof, + }), + ); + } - fn on_remote_header_response( - &mut self, - who: PeerId, - response: message::RemoteHeaderResponse, - ) { - trace!(target: "sync", "Remote header proof response {} from {}", response.id, who); - self.on_demand - .as_ref() - .map(|s| s.on_remote_header_response(who, response)); - } + fn on_remote_header_response( + &mut self, + who: PeerId, + response: message::RemoteHeaderResponse, + ) { + trace!(target: "sync", "Remote header proof response {} from {}", response.id, who); + self.on_demand + .as_ref() + .map(|s| s.on_remote_header_response(who, response)); + } - fn on_remote_changes_request( - &mut self, - who: PeerId, - request: message::RemoteChangesRequest, - ) { - trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{})", + fn on_remote_changes_request( + &mut self, + who: PeerId, + request: message::RemoteChangesRequest, + ) { + trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{})", request.id, who, request.key.to_hex::(), request.first, request.last); - let key = StorageKey(request.key); - let proof = match self.context_data.chain.key_changes_proof(request.first, request.last, request.min, request.max, &key) { - Ok(proof) => proof, - Err(error) => { - trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{}) failed with: {}", + let key = StorageKey(request.key); + let proof = match self.context_data.chain.key_changes_proof( + request.first, + request.last, + request.min, + request.max, + &key, + ) { + Ok(proof) => proof, + Err(error) => { + trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{}) failed with: {}", request.id, who, key.0.to_hex::(), request.first, request.last, error); - ChangesProof:: { - max_block: Zero::zero(), - proof: vec![], - roots: BTreeMap::new(), - roots_proof: vec![], - } - } - }; - self.send_message( - who, - GenericMessage::RemoteChangesResponse(message::RemoteChangesResponse { - id: request.id, - max: proof.max_block, - proof: proof.proof, - roots: proof.roots.into_iter().collect(), - roots_proof: proof.roots_proof, - }), - ); - } + ChangesProof:: { + max_block: Zero::zero(), + proof: vec![], + roots: BTreeMap::new(), + roots_proof: vec![], + } + } + }; + self.send_message( + who, + GenericMessage::RemoteChangesResponse(message::RemoteChangesResponse { + id: request.id, + max: proof.max_block, + proof: proof.proof, + roots: proof.roots.into_iter().collect(), + roots_proof: proof.roots_proof, + }), + ); + } - fn on_remote_changes_response( - &mut self, - who: PeerId, - response: message::RemoteChangesResponse, B::Hash>, - ) { - trace!(target: "sync", "Remote changes proof response {} from {} (max={})", + fn on_remote_changes_response( + &mut self, + who: PeerId, + response: message::RemoteChangesResponse, B::Hash>, + ) { + trace!(target: "sync", "Remote changes proof response {} from {} (max={})", response.id, who, response.max); - self.on_demand - .as_ref() - .map(|s| s.on_remote_changes_response(who, response)); - } + self.on_demand + .as_ref() + .map(|s| s.on_remote_changes_response(who, response)); + } } fn send_message( - peers: &mut HashMap>, - network_chan: &NetworkChan, - who: PeerId, - mut message: Message, + peers: &mut HashMap>, + network_chan: &NetworkChan, + who: PeerId, + mut message: Message, ) { - if let GenericMessage::BlockRequest(ref mut r) = message { - if let Some(ref mut peer) = peers.get_mut(&who) { - r.id = peer.next_request_id; - peer.next_request_id = peer.next_request_id + 1; - if let Some((timestamp, request)) = peer.block_request.take() { - trace!(target: "sync", "Request {} for {} is now obsolete.", request.id, who); - peer.obsolete_requests.insert(request.id, timestamp); - } - peer.block_request = Some((time::Instant::now(), r.clone())); - } - } - network_chan.send(NetworkMsg::Outgoing(who, message)); + if let GenericMessage::BlockRequest(ref mut r) = message { + if let Some(ref mut peer) = peers.get_mut(&who) { + r.id = peer.next_request_id; + peer.next_request_id = peer.next_request_id + 1; + if let Some((timestamp, request)) = peer.block_request.take() { + trace!(target: "sync", "Request {} for {} is now obsolete.", request.id, who); + peer.obsolete_requests.insert(request.id, timestamp); + } + peer.block_request = Some((time::Instant::now(), r.clone())); + } + } + network_chan.send(NetworkMsg::Outgoing(who, message)); } /// Construct a simple protocol that is composed of several sub protocols. diff --git a/core/network/src/service.rs b/core/network/src/service.rs index 8e86587398..76cb5f1e34 100644 --- a/core/network/src/service.rs +++ b/core/network/src/service.rs @@ -14,26 +14,33 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::collections::HashMap; -use std::sync::Arc; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::{io, thread}; -use log::{warn, debug, error, trace, info}; -use futures::{Async, Future, Stream, stream, sync::oneshot, sync::mpsc}; -use parking_lot::{Mutex, RwLock}; -use network_libp2p::{ProtocolId, NetworkConfiguration, Severity}; -use network_libp2p::{start_service, parse_str_addr, Service as NetworkService, ServiceEvent as NetworkServiceEvent}; -use network_libp2p::{multiaddr, RegisteredProtocol, NetworkState}; -use peerset::Peerset; -use consensus::import_queue::{ImportQueue, Link}; -use crate::consensus_gossip::ConsensusGossip; -use crate::message::Message; -use crate::protocol::{self, Context, FromNetworkMsg, Protocol, ConnectedPeer, ProtocolMsg, ProtocolStatus, PeerInfo}; use crate::config::Params; -use crossbeam_channel::{self as channel, Receiver, Sender, TryRecvError}; +use crate::consensus_gossip::ConsensusGossip; use crate::error::Error; -use runtime_primitives::{traits::{Block as BlockT, NumberFor}, ConsensusEngineId}; +use crate::message::Message; +use crate::protocol::{ + self, ConnectedPeer, Context, FromNetworkMsg, PeerInfo, Protocol, ProtocolMsg, ProtocolStatus, +}; use crate::specialization::NetworkSpecialization; +use consensus::import_queue::{ImportQueue, Link}; +use crossbeam_channel::{self as channel, Receiver, Sender, TryRecvError}; +use futures::{stream, sync::mpsc, sync::oneshot, Async, Future, Stream}; +use log::{debug, error, info, trace, warn}; +use network_libp2p::{multiaddr, NetworkState, RegisteredProtocol}; +use network_libp2p::{ + parse_str_addr, start_service, Service as NetworkService, ServiceEvent as NetworkServiceEvent, +}; +use network_libp2p::{NetworkConfiguration, ProtocolId, Severity}; +use parking_lot::{Mutex, RwLock}; +use peerset::Peerset; +use runtime_primitives::{ + traits::{Block as BlockT, NumberFor}, + ConsensusEngineId, +}; +use std::collections::HashMap; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; +use std::{io, thread}; use tokio::prelude::task::AtomicTask; use tokio::runtime::Builder as RuntimeBuilder; @@ -43,553 +50,597 @@ pub use network_libp2p::PeerId; /// Type that represents fetch completion future. pub type FetchFuture = oneshot::Receiver>; - /// Sync status pub trait SyncProvider: Send + Sync { - /// Get a stream of sync statuses. - fn status(&self) -> mpsc::UnboundedReceiver>; - /// Get network state. - fn network_state(&self) -> NetworkState; - /// Get currently connected peers - fn peers(&self) -> Vec<(PeerId, PeerInfo)>; - /// Are we in the process of downloading the chain? - fn is_major_syncing(&self) -> bool; + /// Get a stream of sync statuses. + fn status(&self) -> mpsc::UnboundedReceiver>; + /// Get network state. + fn network_state(&self) -> NetworkState; + /// Get currently connected peers + fn peers(&self) -> Vec<(PeerId, PeerInfo)>; + /// Are we in the process of downloading the chain? + fn is_major_syncing(&self) -> bool; } /// Minimum Requirements for a Hash within Networking pub trait ExHashT: - ::std::hash::Hash + Eq + ::std::fmt::Debug + Clone + Send + Sync + 'static + ::std::hash::Hash + Eq + ::std::fmt::Debug + Clone + Send + Sync + 'static { } impl ExHashT for T where - T: ::std::hash::Hash + Eq + ::std::fmt::Debug + Clone + Send + Sync + 'static + T: ::std::hash::Hash + Eq + ::std::fmt::Debug + Clone + Send + Sync + 'static { } /// Transaction pool interface pub trait TransactionPool: Send + Sync { - /// Get transactions from the pool that are ready to be propagated. - fn transactions(&self) -> Vec<(H, B::Extrinsic)>; - /// Import a transaction into the pool. - fn import(&self, transaction: &B::Extrinsic) -> Option; - /// Notify the pool about transactions broadcast. - fn on_broadcasted(&self, propagations: HashMap>); + /// Get transactions from the pool that are ready to be propagated. + fn transactions(&self) -> Vec<(H, B::Extrinsic)>; + /// Import a transaction into the pool. + fn import(&self, transaction: &B::Extrinsic) -> Option; + /// Notify the pool about transactions broadcast. + fn on_broadcasted(&self, propagations: HashMap>); } /// A link implementation that connects to the network. #[derive(Clone)] pub struct NetworkLink> { - /// The protocol sender - pub(crate) protocol_sender: Sender>, - /// The network sender - pub(crate) network_sender: NetworkChan, + /// The protocol sender + pub(crate) protocol_sender: Sender>, + /// The network sender + pub(crate) network_sender: NetworkChan, } impl> Link for NetworkLink { - fn block_imported(&self, hash: &B::Hash, number: NumberFor) { - let _ = self.protocol_sender.send(ProtocolMsg::BlockImportedSync(hash.clone(), number)); - } - - fn blocks_processed(&self, processed_blocks: Vec, has_error: bool) { - let _ = self.protocol_sender.send(ProtocolMsg::BlocksProcessed(processed_blocks, has_error)); - } - - fn justification_imported(&self, who: PeerId, hash: &B::Hash, number: NumberFor, success: bool) { - let _ = self.protocol_sender.send(ProtocolMsg::JustificationImportResult(hash.clone(), number, success)); - if !success { - let reason = Severity::Bad(format!("Invalid justification provided for #{}", hash).to_string()); - let _ = self.network_sender.send(NetworkMsg::ReportPeer(who, reason)); - } - } - - fn clear_justification_requests(&self) { - let _ = self.protocol_sender.send(ProtocolMsg::ClearJustificationRequests); - } - - fn request_justification(&self, hash: &B::Hash, number: NumberFor) { - let _ = self.protocol_sender.send(ProtocolMsg::RequestJustification(hash.clone(), number)); - } - - fn useless_peer(&self, who: PeerId, reason: &str) { - trace!(target:"sync", "Useless peer {}, {}", who, reason); - self.network_sender.send(NetworkMsg::ReportPeer(who, Severity::Useless(reason.to_string()))); - } - - fn note_useless_and_restart_sync(&self, who: PeerId, reason: &str) { - trace!(target:"sync", "Bad peer {}, {}", who, reason); - // is this actually malign or just useless? - self.network_sender.send(NetworkMsg::ReportPeer(who, Severity::Useless(reason.to_string()))); - let _ = self.protocol_sender.send(ProtocolMsg::RestartSync); - } - - fn restart(&self) { - let _ = self.protocol_sender.send(ProtocolMsg::RestartSync); - } + fn block_imported(&self, hash: &B::Hash, number: NumberFor) { + let _ = self + .protocol_sender + .send(ProtocolMsg::BlockImportedSync(hash.clone(), number)); + } + + fn blocks_processed(&self, processed_blocks: Vec, has_error: bool) { + let _ = self + .protocol_sender + .send(ProtocolMsg::BlocksProcessed(processed_blocks, has_error)); + } + + fn justification_imported( + &self, + who: PeerId, + hash: &B::Hash, + number: NumberFor, + success: bool, + ) { + let _ = self + .protocol_sender + .send(ProtocolMsg::JustificationImportResult( + hash.clone(), + number, + success, + )); + if !success { + let reason = + Severity::Bad(format!("Invalid justification provided for #{}", hash).to_string()); + let _ = self + .network_sender + .send(NetworkMsg::ReportPeer(who, reason)); + } + } + + fn clear_justification_requests(&self) { + let _ = self + .protocol_sender + .send(ProtocolMsg::ClearJustificationRequests); + } + + fn request_justification(&self, hash: &B::Hash, number: NumberFor) { + let _ = self + .protocol_sender + .send(ProtocolMsg::RequestJustification(hash.clone(), number)); + } + + fn useless_peer(&self, who: PeerId, reason: &str) { + trace!(target:"sync", "Useless peer {}, {}", who, reason); + self.network_sender.send(NetworkMsg::ReportPeer( + who, + Severity::Useless(reason.to_string()), + )); + } + + fn note_useless_and_restart_sync(&self, who: PeerId, reason: &str) { + trace!(target:"sync", "Bad peer {}, {}", who, reason); + // is this actually malign or just useless? + self.network_sender.send(NetworkMsg::ReportPeer( + who, + Severity::Useless(reason.to_string()), + )); + let _ = self.protocol_sender.send(ProtocolMsg::RestartSync); + } + + fn restart(&self) { + let _ = self.protocol_sender.send(ProtocolMsg::RestartSync); + } } /// Substrate network service. Handles network IO and manages connectivity. pub struct Service> { - /// Sinks to propagate status updates. - status_sinks: Arc>>>>, - /// Are we connected to any peer? - is_offline: Arc, - /// Are we actively catching up with the chain? - is_major_syncing: Arc, - /// Peers whom we are connected with. - peers: Arc>>>, - /// Network service - network: Arc>>>, - /// Peerset manager (PSM); manages the reputation of nodes and indicates the network which - /// nodes it should be connected to or not. - peerset: Arc, - /// Protocol sender - protocol_sender: Sender>, - /// Sender for messages to the background service task, and handle for the background thread. - /// Dropping the sender should close the task and the thread. - /// This is an `Option` because we need to extract it in the destructor. - bg_thread: Option<(oneshot::Sender<()>, thread::JoinHandle<()>)>, + /// Sinks to propagate status updates. + status_sinks: Arc>>>>, + /// Are we connected to any peer? + is_offline: Arc, + /// Are we actively catching up with the chain? + is_major_syncing: Arc, + /// Peers whom we are connected with. + peers: Arc>>>, + /// Network service + network: Arc>>>, + /// Peerset manager (PSM); manages the reputation of nodes and indicates the network which + /// nodes it should be connected to or not. + peerset: Arc, + /// Protocol sender + protocol_sender: Sender>, + /// Sender for messages to the background service task, and handle for the background thread. + /// Dropping the sender should close the task and the thread. + /// This is an `Option` because we need to extract it in the destructor. + bg_thread: Option<(oneshot::Sender<()>, thread::JoinHandle<()>)>, } impl> Service { - /// Creates and register protocol with the network service - pub fn new( - params: Params, - protocol_id: ProtocolId, - import_queue: Box>, - ) -> Result<(Arc>, NetworkChan), Error> { - let (network_chan, network_port) = network_channel(); - let status_sinks = Arc::new(Mutex::new(Vec::new())); - // Start in off-line mode, since we're not connected to any nodes yet. - let is_offline = Arc::new(AtomicBool::new(true)); - let is_major_syncing = Arc::new(AtomicBool::new(false)); - let peers: Arc>>> = Arc::new(Default::default()); - let (protocol_sender, network_to_protocol_sender) = Protocol::new( - status_sinks.clone(), - is_offline.clone(), - is_major_syncing.clone(), - peers.clone(), - network_chan.clone(), - params.config, - params.chain, - import_queue.clone(), - params.on_demand, - params.transaction_pool, - params.specialization, - )?; - let versions = [(protocol::CURRENT_VERSION as u8)]; - let registered = RegisteredProtocol::new(protocol_id, &versions[..]); - let (thread, network, peerset) = start_thread( - network_to_protocol_sender, - network_port, - params.network_config, - registered, - )?; - - let service = Arc::new(Service { - status_sinks, - is_offline, - is_major_syncing, - peers, - peerset, - network, - protocol_sender: protocol_sender.clone(), - bg_thread: Some(thread), - }); - - // connect the import-queue to the network service. - let link = NetworkLink { - protocol_sender, - network_sender: network_chan.clone(), - }; - - import_queue.start(Box::new(link))?; - - Ok((service, network_chan)) - } - - /// Returns the downloaded bytes per second averaged over the past few seconds. - #[inline] - pub fn average_download_per_sec(&self) -> u64 { - self.network.lock().average_download_per_sec() - } - - /// Returns the uploaded bytes per second averaged over the past few seconds. - #[inline] - pub fn average_upload_per_sec(&self) -> u64 { - self.network.lock().average_upload_per_sec() - } - - /// Returns the network identity of the node. - pub fn local_peer_id(&self) -> PeerId { - self.network.lock().peer_id().clone() - } - - /// Called when a new block is imported by the client. - pub fn on_block_imported(&self, hash: B::Hash, header: B::Header) { - let _ = self - .protocol_sender - .send(ProtocolMsg::BlockImported(hash, header)); - } - - /// Called when a new block is finalized by the client. - pub fn on_block_finalized(&self, hash: B::Hash, header: B::Header) { - let _ = self - .protocol_sender - .send(ProtocolMsg::BlockFinalized(hash, header)); - } - - /// Called when new transactons are imported by the client. - pub fn trigger_repropagate(&self) { - let _ = self.protocol_sender.send(ProtocolMsg::PropagateExtrinsics); - } - - /// Make sure an important block is propagated to peers. - /// - /// In chain-based consensus, we often need to make sure non-best forks are - /// at least temporarily synced. - pub fn announce_block(&self, hash: B::Hash) { - let _ = self.protocol_sender.send(ProtocolMsg::AnnounceBlock(hash)); - } - - /// Send a consensus message through the gossip - pub fn gossip_consensus_message( - &self, - topic: B::Hash, - engine_id: ConsensusEngineId, - message: Vec, - force: bool, - ) { - let _ = self - .protocol_sender - .send(ProtocolMsg::GossipConsensusMessage( - topic, engine_id, message, force, - )); - } - - /// Execute a closure with the chain-specific network specialization. - pub fn with_spec(&self, f: F) - where F: FnOnce(&mut S, &mut Context) + Send + 'static - { - let _ = self - .protocol_sender - .send(ProtocolMsg::ExecuteWithSpec(Box::new(f))); - } - - /// Execute a closure with the consensus gossip. - pub fn with_gossip(&self, f: F) - where F: FnOnce(&mut ConsensusGossip, &mut Context) + Send + 'static - { - let _ = self - .protocol_sender - .send(ProtocolMsg::ExecuteWithGossip(Box::new(f))); - } - - /// Are we in the process of downloading the chain? - /// Used by both SyncProvider and SyncOracle. - fn is_major_syncing(&self) -> bool { - self.is_major_syncing.load(Ordering::Relaxed) - } + /// Creates and register protocol with the network service + pub fn new( + params: Params, + protocol_id: ProtocolId, + import_queue: Box>, + ) -> Result<(Arc>, NetworkChan), Error> { + let (network_chan, network_port) = network_channel(); + let status_sinks = Arc::new(Mutex::new(Vec::new())); + // Start in off-line mode, since we're not connected to any nodes yet. + let is_offline = Arc::new(AtomicBool::new(true)); + let is_major_syncing = Arc::new(AtomicBool::new(false)); + let peers: Arc>>> = Arc::new(Default::default()); + let (protocol_sender, network_to_protocol_sender) = Protocol::new( + status_sinks.clone(), + is_offline.clone(), + is_major_syncing.clone(), + peers.clone(), + network_chan.clone(), + params.config, + params.chain, + import_queue.clone(), + params.on_demand, + params.transaction_pool, + params.specialization, + )?; + let versions = [(protocol::CURRENT_VERSION as u8)]; + let registered = RegisteredProtocol::new(protocol_id, &versions[..]); + let (thread, network, peerset) = start_thread( + network_to_protocol_sender, + network_port, + params.network_config, + registered, + )?; + + let service = Arc::new(Service { + status_sinks, + is_offline, + is_major_syncing, + peers, + peerset, + network, + protocol_sender: protocol_sender.clone(), + bg_thread: Some(thread), + }); + + // connect the import-queue to the network service. + let link = NetworkLink { + protocol_sender, + network_sender: network_chan.clone(), + }; + + import_queue.start(Box::new(link))?; + + Ok((service, network_chan)) + } + + /// Returns the downloaded bytes per second averaged over the past few seconds. + #[inline] + pub fn average_download_per_sec(&self) -> u64 { + self.network.lock().average_download_per_sec() + } + + /// Returns the uploaded bytes per second averaged over the past few seconds. + #[inline] + pub fn average_upload_per_sec(&self) -> u64 { + self.network.lock().average_upload_per_sec() + } + + /// Returns the network identity of the node. + pub fn local_peer_id(&self) -> PeerId { + self.network.lock().peer_id().clone() + } + + /// Called when a new block is imported by the client. + pub fn on_block_imported(&self, hash: B::Hash, header: B::Header) { + let _ = self + .protocol_sender + .send(ProtocolMsg::BlockImported(hash, header)); + } + + /// Called when a new block is finalized by the client. + pub fn on_block_finalized(&self, hash: B::Hash, header: B::Header) { + let _ = self + .protocol_sender + .send(ProtocolMsg::BlockFinalized(hash, header)); + } + + /// Called when new transactons are imported by the client. + pub fn trigger_repropagate(&self) { + let _ = self.protocol_sender.send(ProtocolMsg::PropagateExtrinsics); + } + + /// Make sure an important block is propagated to peers. + /// + /// In chain-based consensus, we often need to make sure non-best forks are + /// at least temporarily synced. + pub fn announce_block(&self, hash: B::Hash) { + let _ = self.protocol_sender.send(ProtocolMsg::AnnounceBlock(hash)); + } + + /// Send a consensus message through the gossip + pub fn gossip_consensus_message( + &self, + topic: B::Hash, + engine_id: ConsensusEngineId, + message: Vec, + force: bool, + ) { + let _ = self + .protocol_sender + .send(ProtocolMsg::GossipConsensusMessage( + topic, engine_id, message, force, + )); + } + + /// Execute a closure with the chain-specific network specialization. + pub fn with_spec(&self, f: F) + where + F: FnOnce(&mut S, &mut Context) + Send + 'static, + { + let _ = self + .protocol_sender + .send(ProtocolMsg::ExecuteWithSpec(Box::new(f))); + } + + /// Execute a closure with the consensus gossip. + pub fn with_gossip(&self, f: F) + where + F: FnOnce(&mut ConsensusGossip, &mut Context) + Send + 'static, + { + let _ = self + .protocol_sender + .send(ProtocolMsg::ExecuteWithGossip(Box::new(f))); + } + + /// Are we in the process of downloading the chain? + /// Used by both SyncProvider and SyncOracle. + fn is_major_syncing(&self) -> bool { + self.is_major_syncing.load(Ordering::Relaxed) + } } impl> ::consensus::SyncOracle for Service { - fn is_major_syncing(&self) -> bool { - self.is_major_syncing() - } + fn is_major_syncing(&self) -> bool { + self.is_major_syncing() + } - fn is_offline(&self) -> bool { - self.is_offline.load(Ordering::Relaxed) - } + fn is_offline(&self) -> bool { + self.is_offline.load(Ordering::Relaxed) + } } impl> Drop for Service { - fn drop(&mut self) { - if let Some((sender, join)) = self.bg_thread.take() { - let _ = sender.send(()); - if let Err(e) = join.join() { - error!("Error while waiting on background thread: {:?}", e); - } - } - } + fn drop(&mut self) { + if let Some((sender, join)) = self.bg_thread.take() { + let _ = sender.send(()); + if let Err(e) = join.join() { + error!("Error while waiting on background thread: {:?}", e); + } + } + } } impl> SyncProvider for Service { - fn is_major_syncing(&self) -> bool { - self.is_major_syncing() - } - - /// Get sync status - fn status(&self) -> mpsc::UnboundedReceiver> { - let (sink, stream) = mpsc::unbounded(); - self.status_sinks.lock().push(sink); - stream - } - - fn network_state(&self) -> NetworkState { - self.network.lock().state() - } - - fn peers(&self) -> Vec<(PeerId, PeerInfo)> { - let peers = (*self.peers.read()).clone(); - peers.into_iter().map(|(idx, connected)| (idx, connected.peer_info)).collect() - } + fn is_major_syncing(&self) -> bool { + self.is_major_syncing() + } + + /// Get sync status + fn status(&self) -> mpsc::UnboundedReceiver> { + let (sink, stream) = mpsc::unbounded(); + self.status_sinks.lock().push(sink); + stream + } + + fn network_state(&self) -> NetworkState { + self.network.lock().state() + } + + fn peers(&self) -> Vec<(PeerId, PeerInfo)> { + let peers = (*self.peers.read()).clone(); + peers + .into_iter() + .map(|(idx, connected)| (idx, connected.peer_info)) + .collect() + } } /// Trait for managing network pub trait ManageNetwork { - /// Set to allow unreserved peers to connect - fn accept_unreserved_peers(&self); - /// Set to deny unreserved peers to connect - fn deny_unreserved_peers(&self); - /// Remove reservation for the peer - fn remove_reserved_peer(&self, peer: PeerId); - /// Add reserved peer - fn add_reserved_peer(&self, peer: String) -> Result<(), String>; - /// Returns a user-friendly identifier of our node. - fn node_id(&self) -> Option; + /// Set to allow unreserved peers to connect + fn accept_unreserved_peers(&self); + /// Set to deny unreserved peers to connect + fn deny_unreserved_peers(&self); + /// Remove reservation for the peer + fn remove_reserved_peer(&self, peer: PeerId); + /// Add reserved peer + fn add_reserved_peer(&self, peer: String) -> Result<(), String>; + /// Returns a user-friendly identifier of our node. + fn node_id(&self) -> Option; } impl> ManageNetwork for Service { - fn accept_unreserved_peers(&self) { - self.peerset.set_reserved_only(false); - } - - fn deny_unreserved_peers(&self) { - self.peerset.set_reserved_only(true); - } - - fn remove_reserved_peer(&self, peer: PeerId) { - self.peerset.remove_reserved_peer(&peer); - } - - fn add_reserved_peer(&self, peer: String) -> Result<(), String> { - let (peer_id, addr) = parse_str_addr(&peer).map_err(|e| format!("{:?}", e))?; - self.peerset.add_reserved_peer(peer_id.clone()); - self.network.lock().add_known_address(peer_id, addr); - Ok(()) - } - - fn node_id(&self) -> Option { - let network = self.network.lock(); - let ret = network - .listeners() - .next() - .map(|addr| { - let mut addr = addr.clone(); - addr.append(multiaddr::Protocol::P2p(network.peer_id().clone().into())); - addr.to_string() - }); - ret - } + fn accept_unreserved_peers(&self) { + self.peerset.set_reserved_only(false); + } + + fn deny_unreserved_peers(&self) { + self.peerset.set_reserved_only(true); + } + + fn remove_reserved_peer(&self, peer: PeerId) { + self.peerset.remove_reserved_peer(&peer); + } + + fn add_reserved_peer(&self, peer: String) -> Result<(), String> { + let (peer_id, addr) = parse_str_addr(&peer).map_err(|e| format!("{:?}", e))?; + self.peerset.add_reserved_peer(peer_id.clone()); + self.network.lock().add_known_address(peer_id, addr); + Ok(()) + } + + fn node_id(&self) -> Option { + let network = self.network.lock(); + let ret = network.listeners().next().map(|addr| { + let mut addr = addr.clone(); + addr.append(multiaddr::Protocol::P2p(network.peer_id().clone().into())); + addr.to_string() + }); + ret + } } - /// Create a NetworkPort/Chan pair. pub fn network_channel() -> (NetworkChan, NetworkPort) { - let (network_sender, network_receiver) = channel::unbounded(); - let task_notify = Arc::new(AtomicTask::new()); - let network_port = NetworkPort::new(network_receiver, task_notify.clone()); - let network_chan = NetworkChan::new(network_sender, task_notify); - (network_chan, network_port) + let (network_sender, network_receiver) = channel::unbounded(); + let task_notify = Arc::new(AtomicTask::new()); + let network_port = NetworkPort::new(network_receiver, task_notify.clone()); + let network_chan = NetworkChan::new(network_sender, task_notify); + (network_chan, network_port) } - /// A sender of NetworkMsg that notifies a task when a message has been sent. #[derive(Clone)] pub struct NetworkChan { - sender: Sender>, - task_notify: Arc, + sender: Sender>, + task_notify: Arc, } impl NetworkChan { - /// Create a new network chan. - pub fn new(sender: Sender>, task_notify: Arc) -> Self { - NetworkChan { - sender, - task_notify, - } - } - - /// Send a messaging, to be handled on a stream. Notify the task handling the stream. - pub fn send(&self, msg: NetworkMsg) { - let _ = self.sender.send(msg); - self.task_notify.notify(); - } + /// Create a new network chan. + pub fn new(sender: Sender>, task_notify: Arc) -> Self { + NetworkChan { + sender, + task_notify, + } + } + + /// Send a messaging, to be handled on a stream. Notify the task handling the stream. + pub fn send(&self, msg: NetworkMsg) { + let _ = self.sender.send(msg); + self.task_notify.notify(); + } } impl Drop for NetworkChan { - /// Notifying the task when a sender is dropped(when all are dropped, the stream is finished). - fn drop(&mut self) { - self.task_notify.notify(); - } + /// Notifying the task when a sender is dropped(when all are dropped, the stream is finished). + fn drop(&mut self) { + self.task_notify.notify(); + } } - /// A receiver of NetworkMsg that makes the protocol-id available with each message. pub struct NetworkPort { - receiver: Receiver>, - task_notify: Arc, + receiver: Receiver>, + task_notify: Arc, } impl NetworkPort { - /// Create a new network port for a given protocol-id. - pub fn new(receiver: Receiver>, task_notify: Arc) -> Self { - Self { - receiver, - task_notify, - } - } - - /// Receive a message, if any is currently-enqueued. - /// Register the current tokio task for notification when a new message is available. - pub fn take_one_message(&self) -> Result>, ()> { - self.task_notify.register(); - match self.receiver.try_recv() { - Ok(msg) => Ok(Some(msg)), - Err(TryRecvError::Empty) => Ok(None), - Err(TryRecvError::Disconnected) => Err(()), - } - } - - /// Get a reference to the underlying crossbeam receiver. - #[cfg(any(test, feature = "test-helpers"))] - pub fn receiver(&self) -> &Receiver> { - &self.receiver - } + /// Create a new network port for a given protocol-id. + pub fn new(receiver: Receiver>, task_notify: Arc) -> Self { + Self { + receiver, + task_notify, + } + } + + /// Receive a message, if any is currently-enqueued. + /// Register the current tokio task for notification when a new message is available. + pub fn take_one_message(&self) -> Result>, ()> { + self.task_notify.register(); + match self.receiver.try_recv() { + Ok(msg) => Ok(Some(msg)), + Err(TryRecvError::Empty) => Ok(None), + Err(TryRecvError::Disconnected) => Err(()), + } + } + + /// Get a reference to the underlying crossbeam receiver. + #[cfg(any(test, feature = "test-helpers"))] + pub fn receiver(&self) -> &Receiver> { + &self.receiver + } } /// Messages to be handled by NetworkService. #[derive(Debug)] pub enum NetworkMsg { - /// Send an outgoing custom message. - Outgoing(PeerId, Message), - /// Report a peer. - ReportPeer(PeerId, Severity), + /// Send an outgoing custom message. + Outgoing(PeerId, Message), + /// Report a peer. + ReportPeer(PeerId, Severity), } /// Starts the background thread that handles the networking. fn start_thread( - protocol_sender: Sender>, - network_port: NetworkPort, - config: NetworkConfiguration, - registered: RegisteredProtocol>, -) -> Result<((oneshot::Sender<()>, thread::JoinHandle<()>), Arc>>>, Arc), Error> { - // Start the main service. - let (service, peerset) = match start_service(config, registered) { - Ok((service, peerset)) => (Arc::new(Mutex::new(service)), peerset), - Err(err) => { - warn!("Error starting network: {}", err); - return Err(err.into()) - }, - }; - - let (close_tx, close_rx) = oneshot::channel(); - let service_clone = service.clone(); - let mut runtime = RuntimeBuilder::new().name_prefix("libp2p-").build()?; - let thread = thread::Builder::new().name("network".to_string()).spawn(move || { - let fut = run_thread(protocol_sender, service_clone, network_port) - .select(close_rx.then(|_| Ok(()))) - .map(|(val, _)| val) - .map_err(|(err,_ )| err); - - // Note that we use `block_on` and not `block_on_all` because we want to kill the thread - // instantly if `close_rx` receives something. - match runtime.block_on(fut) { - Ok(()) => debug!(target: "sub-libp2p", "Networking thread finished"), - Err(err) => error!(target: "sub-libp2p", "Error while running libp2p: {:?}", err), - }; - })?; - - Ok(((close_tx, thread), service, peerset)) + protocol_sender: Sender>, + network_port: NetworkPort, + config: NetworkConfiguration, + registered: RegisteredProtocol>, +) -> Result< + ( + (oneshot::Sender<()>, thread::JoinHandle<()>), + Arc>>>, + Arc, + ), + Error, +> { + // Start the main service. + let (service, peerset) = match start_service(config, registered) { + Ok((service, peerset)) => (Arc::new(Mutex::new(service)), peerset), + Err(err) => { + warn!("Error starting network: {}", err); + return Err(err.into()); + } + }; + + let (close_tx, close_rx) = oneshot::channel(); + let service_clone = service.clone(); + let mut runtime = RuntimeBuilder::new().name_prefix("libp2p-").build()?; + let thread = thread::Builder::new() + .name("network".to_string()) + .spawn(move || { + let fut = run_thread(protocol_sender, service_clone, network_port) + .select(close_rx.then(|_| Ok(()))) + .map(|(val, _)| val) + .map_err(|(err, _)| err); + + // Note that we use `block_on` and not `block_on_all` because we want to kill the thread + // instantly if `close_rx` receives something. + match runtime.block_on(fut) { + Ok(()) => debug!(target: "sub-libp2p", "Networking thread finished"), + Err(err) => error!(target: "sub-libp2p", "Error while running libp2p: {:?}", err), + }; + })?; + + Ok(((close_tx, thread), service, peerset)) } /// Runs the background thread that handles the networking. fn run_thread( - protocol_sender: Sender>, - network_service: Arc>>>, - network_port: NetworkPort, + protocol_sender: Sender>, + network_service: Arc>>>, + network_port: NetworkPort, ) -> impl Future { - - let network_service_2 = network_service.clone(); - - // Protocol produces a stream of messages about what happens in sync. - let protocol = stream::poll_fn(move || { - match network_port.take_one_message() { - Ok(Some(message)) => Ok(Async::Ready(Some(message))), - Ok(None) => Ok(Async::NotReady), - Err(_) => Err(()) - } - }).for_each(move |msg| { - // Handle message from Protocol. - match msg { - NetworkMsg::Outgoing(who, outgoing_message) => { - network_service_2 - .lock() - .send_custom_message(&who, outgoing_message); - }, - NetworkMsg::ReportPeer(who, severity) => { - match severity { - Severity::Bad(message) => { - info!(target: "sync", "Banning {:?} because {:?}", who, message); - warn!(target: "sync", "Banning a node is a deprecated mechanism that \ + let network_service_2 = network_service.clone(); + + // Protocol produces a stream of messages about what happens in sync. + let protocol = stream::poll_fn(move || match network_port.take_one_message() { + Ok(Some(message)) => Ok(Async::Ready(Some(message))), + Ok(None) => Ok(Async::NotReady), + Err(_) => Err(()), + }) + .for_each(move |msg| { + // Handle message from Protocol. + match msg { + NetworkMsg::Outgoing(who, outgoing_message) => { + network_service_2 + .lock() + .send_custom_message(&who, outgoing_message); + } + NetworkMsg::ReportPeer(who, severity) => match severity { + Severity::Bad(message) => { + info!(target: "sync", "Banning {:?} because {:?}", who, message); + warn!(target: "sync", "Banning a node is a deprecated mechanism that \ should be removed"); - network_service_2.lock().drop_node(&who) - }, - Severity::Useless(message) => { - debug!(target: "sync", "Dropping {:?} because {:?}", who, message); - network_service_2.lock().drop_node(&who) - }, - Severity::Timeout => { - debug!(target: "sync", "Dropping {:?} because it timed out", who); - network_service_2.lock().drop_node(&who) - }, - } - }, - } - Ok(()) - }) - .then(|res| { - match res { - Ok(()) => (), - Err(_) => error!("Protocol disconnected"), - }; - Ok(()) - }); - - // The network service produces events about what happens on the network. Let's process them. - let network = stream::poll_fn(move || network_service.lock().poll()).for_each(move |event| { - match event { - NetworkServiceEvent::OpenedCustomProtocol { peer_id, version, debug_info, .. } => { - debug_assert_eq!(version, protocol::CURRENT_VERSION as u8); - let _ = protocol_sender.send(FromNetworkMsg::PeerConnected(peer_id, debug_info)); - } - NetworkServiceEvent::ClosedCustomProtocol { peer_id, debug_info, .. } => { - let _ = protocol_sender.send(FromNetworkMsg::PeerDisconnected(peer_id, debug_info)); - } - NetworkServiceEvent::CustomMessage { peer_id, message, .. } => { - let _ = protocol_sender.send(FromNetworkMsg::CustomMessage(peer_id, message)); - return Ok(()) - } - NetworkServiceEvent::Clogged { peer_id, messages, .. } => { - debug!(target: "sync", "{} clogging messages:", messages.len()); - for msg in messages.into_iter().take(5) { - debug!(target: "sync", "{:?}", msg); - let _ = protocol_sender.send(FromNetworkMsg::PeerClogged(peer_id.clone(), Some(msg))); - } - } - }; - Ok(()) - }); - - // Merge all futures into one. - let futures: Vec + Send>> = vec![ - Box::new(protocol) as Box<_>, - Box::new(network) as Box<_> - ]; - - futures::select_all(futures) - .and_then(move |_| { - debug!("Networking ended"); - Ok(()) - }) - .map_err(|(r, _, _)| r) + network_service_2.lock().drop_node(&who) + } + Severity::Useless(message) => { + debug!(target: "sync", "Dropping {:?} because {:?}", who, message); + network_service_2.lock().drop_node(&who) + } + Severity::Timeout => { + debug!(target: "sync", "Dropping {:?} because it timed out", who); + network_service_2.lock().drop_node(&who) + } + }, + } + Ok(()) + }) + .then(|res| { + match res { + Ok(()) => (), + Err(_) => error!("Protocol disconnected"), + }; + Ok(()) + }); + + // The network service produces events about what happens on the network. Let's process them. + let network = stream::poll_fn(move || network_service.lock().poll()).for_each(move |event| { + match event { + NetworkServiceEvent::OpenedCustomProtocol { + peer_id, + version, + debug_info, + .. + } => { + debug_assert_eq!(version, protocol::CURRENT_VERSION as u8); + let _ = protocol_sender.send(FromNetworkMsg::PeerConnected(peer_id, debug_info)); + } + NetworkServiceEvent::ClosedCustomProtocol { + peer_id, + debug_info, + .. + } => { + let _ = protocol_sender.send(FromNetworkMsg::PeerDisconnected(peer_id, debug_info)); + } + NetworkServiceEvent::CustomMessage { + peer_id, message, .. + } => { + let _ = protocol_sender.send(FromNetworkMsg::CustomMessage(peer_id, message)); + return Ok(()); + } + NetworkServiceEvent::Clogged { + peer_id, messages, .. + } => { + debug!(target: "sync", "{} clogging messages:", messages.len()); + for msg in messages.into_iter().take(5) { + debug!(target: "sync", "{:?}", msg); + let _ = protocol_sender + .send(FromNetworkMsg::PeerClogged(peer_id.clone(), Some(msg))); + } + } + }; + Ok(()) + }); + + // Merge all futures into one. + let futures: Vec + Send>> = + vec![Box::new(protocol) as Box<_>, Box::new(network) as Box<_>]; + + futures::select_all(futures) + .and_then(move |_| { + debug!("Networking ended"); + Ok(()) + }) + .map_err(|(r, _, _)| r) } diff --git a/core/network/src/specialization.rs b/core/network/src/specialization.rs index e440097dd1..6e40b3fc29 100644 --- a/core/network/src/specialization.rs +++ b/core/network/src/specialization.rs @@ -16,31 +16,36 @@ //! Specializations of the substrate network protocol to allow more complex forms of communication. +use crate::protocol::Context; use crate::PeerId; use runtime_primitives::traits::Block as BlockT; -use crate::protocol::Context; /// A specialization of the substrate network protocol. Handles events and sends messages. pub trait NetworkSpecialization: Send + Sync + 'static { - /// Get the current specialization-status. - fn status(&self) -> Vec; + /// Get the current specialization-status. + fn status(&self) -> Vec; - /// Called when a peer successfully handshakes. - fn on_connect(&mut self, ctx: &mut Context, who: PeerId, status: crate::message::Status); + /// Called when a peer successfully handshakes. + fn on_connect(&mut self, ctx: &mut Context, who: PeerId, status: crate::message::Status); - /// Called when a peer is disconnected. If the peer ID is unknown, it should be ignored. - fn on_disconnect(&mut self, ctx: &mut Context, who: PeerId); + /// Called when a peer is disconnected. If the peer ID is unknown, it should be ignored. + fn on_disconnect(&mut self, ctx: &mut Context, who: PeerId); - /// Called when a network-specific message arrives. - fn on_message(&mut self, ctx: &mut Context, who: PeerId, message: &mut Option>); + /// Called when a network-specific message arrives. + fn on_message( + &mut self, + ctx: &mut Context, + who: PeerId, + message: &mut Option>, + ); - /// Called on abort. - fn on_abort(&mut self) { } + /// Called on abort. + fn on_abort(&mut self) {} - /// Called periodically to maintain peers and handle timeouts. - fn maintain_peers(&mut self, _ctx: &mut Context) { } + /// Called periodically to maintain peers and handle timeouts. + fn maintain_peers(&mut self, _ctx: &mut Context) {} - /// Called when a block is _imported_ at the head of the chain (not during major sync). - /// Not guaranteed to be called for every block, but will be most of the after major sync. - fn on_block_imported(&mut self, _ctx: &mut Context, _hash: B::Hash, _header: &B::Header) { } + /// Called when a block is _imported_ at the head of the chain (not during major sync). + /// Not guaranteed to be called for every block, but will be most of the after major sync. + fn on_block_imported(&mut self, _ctx: &mut Context, _hash: B::Hash, _header: &B::Header) {} } diff --git a/core/network/src/sync.rs b/core/network/src/sync.rs index 80ff8221a1..7e8d6622d0 100644 --- a/core/network/src/sync.rs +++ b/core/network/src/sync.rs @@ -14,26 +14,28 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::cmp::max; -use std::collections::{HashMap, VecDeque}; -use std::time::{Duration, Instant}; -use log::{debug, trace, warn}; +use crate::blocks::BlockCollection; +use crate::config::Roles; +use crate::message::{self, generic::Message as GenericMessage}; use crate::protocol::Context; -use fork_tree::ForkTree; -use network_libp2p::{Severity, PeerId}; +use client::error::Error as ClientError; use client::{BlockStatus, ClientInfo}; -use consensus::BlockOrigin; use consensus::import_queue::{ImportQueue, IncomingBlock}; -use client::error::Error as ClientError; -use crate::blocks::BlockCollection; -use runtime_primitives::Justification; -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, As, NumberFor, Zero, CheckedSub}; +use consensus::BlockOrigin; +use fork_tree::ForkTree; +use log::{debug, trace, warn}; +use network_libp2p::{PeerId, Severity}; use runtime_primitives::generic::BlockId; -use crate::message::{self, generic::Message as GenericMessage}; -use crate::config::Roles; +use runtime_primitives::traits::{ + As, Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, Zero, +}; +use runtime_primitives::Justification; +use std::cmp::max; use std::collections::HashSet; -use std::sync::Arc; +use std::collections::{HashMap, VecDeque}; use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; +use std::time::{Duration, Instant}; // Maximum blocks to request in a single packet. const MAX_BLOCKS_TO_REQUEST: usize = 128; @@ -50,39 +52,39 @@ const MAX_UNKNOWN_FORK_DOWNLOAD_LEN: u32 = 32; #[derive(Debug)] struct PeerSync { - pub common_number: NumberFor, - pub best_hash: B::Hash, - pub best_number: NumberFor, - pub state: PeerSyncState, - pub recently_announced: VecDeque, + pub common_number: NumberFor, + pub best_hash: B::Hash, + pub best_number: NumberFor, + pub state: PeerSyncState, + pub recently_announced: VecDeque, } #[derive(Debug)] /// Peer sync status. pub(crate) struct PeerInfo { - /// Their best block hash. - pub best_hash: B::Hash, - /// Their best block number. - pub best_number: NumberFor, + /// Their best block hash. + pub best_hash: B::Hash, + /// Their best block number. + pub best_number: NumberFor, } #[derive(Copy, Clone, Eq, PartialEq, Debug)] enum AncestorSearchState { - /// Use exponential backoff to find an ancestor, then switch to binary search. - /// We keep track of the exponent. - ExponentialBackoff(NumberFor), - /// Using binary search to find the best ancestor. - /// We keep track of left and right bounds. - BinarySearch(NumberFor, NumberFor), + /// Use exponential backoff to find an ancestor, then switch to binary search. + /// We keep track of the exponent. + ExponentialBackoff(NumberFor), + /// Using binary search to find the best ancestor. + /// We keep track of left and right bounds. + BinarySearch(NumberFor, NumberFor), } #[derive(Copy, Clone, Eq, PartialEq, Debug)] enum PeerSyncState { - AncestorSearch(NumberFor, AncestorSearchState), - Available, - DownloadingNew(NumberFor), - DownloadingStale(B::Hash), - DownloadingJustification(B::Hash), + AncestorSearch(NumberFor, AncestorSearchState), + Available, + DownloadingNew(NumberFor), + DownloadingStale(B::Hash), + DownloadingJustification(B::Hash), } /// Pending justification request for the given block (hash and number). @@ -94,979 +96,1112 @@ type PendingJustification = (::Hash, NumberFor); /// fetched in-order, and that obsolete changes are pruned (when finalizing a /// competing fork). struct PendingJustifications { - justifications: ForkTree, ()>, - pending_requests: VecDeque>, - peer_requests: HashMap>, - previous_requests: HashMap, Vec<(PeerId, Instant)>>, - importing_requests: HashSet>, + justifications: ForkTree, ()>, + pending_requests: VecDeque>, + peer_requests: HashMap>, + previous_requests: HashMap, Vec<(PeerId, Instant)>>, + importing_requests: HashSet>, } impl PendingJustifications { - fn new() -> PendingJustifications { - PendingJustifications { - justifications: ForkTree::new(), - pending_requests: VecDeque::new(), - peer_requests: HashMap::new(), - previous_requests: HashMap::new(), - importing_requests: HashSet::new(), - } - } - - /// Dispatches all possible pending requests to the given peers. Peers are - /// filtered according to the current known best block (i.e. we won't send a - /// justification request for block #10 to a peer at block #2), and we also - /// throttle requests to the same peer if a previous justification request - /// yielded no results. - fn dispatch(&mut self, peers: &mut HashMap>, protocol: &mut Context) { - if self.pending_requests.is_empty() { - return; - } - - let initial_pending_requests = self.pending_requests.len(); - - // clean up previous failed requests so we can retry again - for (_, requests) in self.previous_requests.iter_mut() { - requests.retain(|(_, instant)| instant.elapsed() < JUSTIFICATION_RETRY_WAIT); - } - - let mut available_peers = peers.iter().filter_map(|(peer, sync)| { - // don't request to any peers that already have pending requests or are unavailable - if sync.state != PeerSyncState::Available || self.peer_requests.contains_key(&peer) { - None - } else { - Some((peer.clone(), sync.best_number)) - } - }).collect::>(); - - let mut last_peer = available_peers.back().map(|p| p.0.clone()); - let mut unhandled_requests = VecDeque::new(); - - loop { - let (peer, peer_best_number) = match available_peers.pop_front() { - Some(p) => p, - _ => break, - }; - - // only ask peers that have synced past the block number that we're - // asking the justification for and to whom we haven't already made - // the same request recently - let peer_eligible = { - let request = match self.pending_requests.front() { - Some(r) => r.clone(), - _ => break, - }; - - peer_best_number >= request.1 && - !self.previous_requests - .get(&request) - .map(|requests| requests.iter().any(|i| i.0 == peer)) - .unwrap_or(false) - }; - - if !peer_eligible { - available_peers.push_back((peer.clone(), peer_best_number)); - - // we tried all peers and none can answer this request - if Some(peer) == last_peer { - last_peer = available_peers.back().map(|p| p.0.clone()); - - let request = self.pending_requests.pop_front() - .expect("verified to be Some in the beginning of the loop; qed"); - - unhandled_requests.push_back(request); - } - - continue; - } - - last_peer = available_peers.back().map(|p| p.0.clone()); - - let request = self.pending_requests.pop_front() - .expect("verified to be Some in the beginning of the loop; qed"); - - self.peer_requests.insert(peer.clone(), request); - - peers.get_mut(&peer) + fn new() -> PendingJustifications { + PendingJustifications { + justifications: ForkTree::new(), + pending_requests: VecDeque::new(), + peer_requests: HashMap::new(), + previous_requests: HashMap::new(), + importing_requests: HashSet::new(), + } + } + + /// Dispatches all possible pending requests to the given peers. Peers are + /// filtered according to the current known best block (i.e. we won't send a + /// justification request for block #10 to a peer at block #2), and we also + /// throttle requests to the same peer if a previous justification request + /// yielded no results. + fn dispatch(&mut self, peers: &mut HashMap>, protocol: &mut Context) { + if self.pending_requests.is_empty() { + return; + } + + let initial_pending_requests = self.pending_requests.len(); + + // clean up previous failed requests so we can retry again + for (_, requests) in self.previous_requests.iter_mut() { + requests.retain(|(_, instant)| instant.elapsed() < JUSTIFICATION_RETRY_WAIT); + } + + let mut available_peers = peers + .iter() + .filter_map(|(peer, sync)| { + // don't request to any peers that already have pending requests or are unavailable + if sync.state != PeerSyncState::Available || self.peer_requests.contains_key(&peer) + { + None + } else { + Some((peer.clone(), sync.best_number)) + } + }) + .collect::>(); + + let mut last_peer = available_peers.back().map(|p| p.0.clone()); + let mut unhandled_requests = VecDeque::new(); + + loop { + let (peer, peer_best_number) = match available_peers.pop_front() { + Some(p) => p, + _ => break, + }; + + // only ask peers that have synced past the block number that we're + // asking the justification for and to whom we haven't already made + // the same request recently + let peer_eligible = { + let request = match self.pending_requests.front() { + Some(r) => r.clone(), + _ => break, + }; + + peer_best_number >= request.1 + && !self + .previous_requests + .get(&request) + .map(|requests| requests.iter().any(|i| i.0 == peer)) + .unwrap_or(false) + }; + + if !peer_eligible { + available_peers.push_back((peer.clone(), peer_best_number)); + + // we tried all peers and none can answer this request + if Some(peer) == last_peer { + last_peer = available_peers.back().map(|p| p.0.clone()); + + let request = self + .pending_requests + .pop_front() + .expect("verified to be Some in the beginning of the loop; qed"); + + unhandled_requests.push_back(request); + } + + continue; + } + + last_peer = available_peers.back().map(|p| p.0.clone()); + + let request = self + .pending_requests + .pop_front() + .expect("verified to be Some in the beginning of the loop; qed"); + + self.peer_requests.insert(peer.clone(), request); + + peers.get_mut(&peer) .expect("peer was is taken from available_peers; available_peers is a subset of peers; qed") .state = PeerSyncState::DownloadingJustification(request.0); - trace!(target: "sync", "Requesting justification for block #{} from {}", request.0, peer); - let request = message::generic::BlockRequest { - id: 0, - fields: message::BlockAttributes::JUSTIFICATION, - from: message::FromBlock::Hash(request.0), - to: None, - direction: message::Direction::Ascending, - max: Some(1), - }; - - protocol.send_message(peer, GenericMessage::BlockRequest(request)); - } - - self.pending_requests.append(&mut unhandled_requests); - - trace!(target: "sync", "Dispatched {} justification requests ({} pending)", - initial_pending_requests - self.pending_requests.len(), - self.pending_requests.len(), - ); - } - - /// Queue a justification request (without dispatching it). - fn queue_request( - &mut self, - justification: &PendingJustification, - is_descendent_of: F, - ) where F: Fn(&B::Hash, &B::Hash) -> Result { - match self.justifications.import(justification.0.clone(), justification.1.clone(), (), &is_descendent_of) { - Ok(true) => { - // this is a new root so we add it to the current `pending_requests` - self.pending_requests.push_back((justification.0, justification.1)); - }, - Err(err) => { - warn!(target: "sync", "Failed to insert requested justification {:?} {:?} into tree: {:?}", - justification.0, - justification.1, - err, - ); - return; - }, - _ => {}, - }; - } - - /// Retry any pending request if a peer disconnected. - fn peer_disconnected(&mut self, who: PeerId) { - if let Some(request) = self.peer_requests.remove(&who) { - self.pending_requests.push_front(request); - } - } - - /// Process the import of a justification. - /// Queues a retry in case the import failed. - fn justification_import_result(&mut self, hash: B::Hash, number: NumberFor, success: bool) { - let request = (hash, number); - - if !self.importing_requests.remove(&request) { - debug!(target: "sync", "Got justification import result for unknown justification {:?} {:?} request.", - request.0, - request.1, - ); - - return; - }; - - if success { - if self.justifications.finalize_root(&request.0).is_none() { - warn!(target: "sync", "Imported justification for {:?} {:?} which isn't a root in the tree: {:?}", - request.0, - request.1, - self.justifications.roots().collect::>(), - ); - - return; - }; - - self.previous_requests.clear(); - self.peer_requests.clear(); - self.pending_requests = - self.justifications.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect(); - - return; - } - self.pending_requests.push_front(request); - } - - /// Processes the response for the request previously sent to the given - /// peer. Queues a retry in case the given justification - /// was `None`. - fn on_response( - &mut self, - who: PeerId, - justification: Option, - import_queue: &ImportQueue, - ) { - // we assume that the request maps to the given response, this is - // currently enforced by the outer network protocol before passing on - // messages to chain sync. - if let Some(request) = self.peer_requests.remove(&who) { - if let Some(justification) = justification { - import_queue.import_justification(who.clone(), request.0, request.1, justification); - self.importing_requests.insert(request); - return - } - - self.previous_requests - .entry(request) - .or_insert(Vec::new()) - .push((who, Instant::now())); - - self.pending_requests.push_front(request); - } - } - - /// Removes any pending justification requests for blocks lower than the - /// given best finalized. - fn on_block_finalized( - &mut self, - best_finalized_hash: &B::Hash, - best_finalized_number: NumberFor, - is_descendent_of: F, - ) -> Result<(), fork_tree::Error> - where F: Fn(&B::Hash, &B::Hash) -> Result - { - if self.importing_requests.contains(&(*best_finalized_hash, best_finalized_number)) { - // we imported this justification ourselves, so we should get back a response - // from the import queue through `justification_import_result` - return Ok(()); - } - - self.justifications.finalize(best_finalized_hash, best_finalized_number, &is_descendent_of)?; - - let roots = self.justifications.roots().collect::>(); - - self.pending_requests.retain(|(h, n)| roots.contains(&(h, n, &()))); - self.peer_requests.retain(|_, (h, n)| roots.contains(&(h, n, &()))); - self.previous_requests.retain(|(h, n), _| roots.contains(&(h, n, &()))); - - Ok(()) - } - - /// Clear all data. - fn clear(&mut self) { - self.justifications = ForkTree::new(); - self.pending_requests.clear(); - self.peer_requests.clear(); - self.previous_requests.clear(); - } + trace!(target: "sync", "Requesting justification for block #{} from {}", request.0, peer); + let request = message::generic::BlockRequest { + id: 0, + fields: message::BlockAttributes::JUSTIFICATION, + from: message::FromBlock::Hash(request.0), + to: None, + direction: message::Direction::Ascending, + max: Some(1), + }; + + protocol.send_message(peer, GenericMessage::BlockRequest(request)); + } + + self.pending_requests.append(&mut unhandled_requests); + + trace!(target: "sync", "Dispatched {} justification requests ({} pending)", + initial_pending_requests - self.pending_requests.len(), + self.pending_requests.len(), + ); + } + + /// Queue a justification request (without dispatching it). + fn queue_request(&mut self, justification: &PendingJustification, is_descendent_of: F) + where + F: Fn(&B::Hash, &B::Hash) -> Result, + { + match self.justifications.import( + justification.0.clone(), + justification.1.clone(), + (), + &is_descendent_of, + ) { + Ok(true) => { + // this is a new root so we add it to the current `pending_requests` + self.pending_requests + .push_back((justification.0, justification.1)); + } + Err(err) => { + warn!(target: "sync", "Failed to insert requested justification {:?} {:?} into tree: {:?}", + justification.0, + justification.1, + err, + ); + return; + } + _ => {} + }; + } + + /// Retry any pending request if a peer disconnected. + fn peer_disconnected(&mut self, who: PeerId) { + if let Some(request) = self.peer_requests.remove(&who) { + self.pending_requests.push_front(request); + } + } + + /// Process the import of a justification. + /// Queues a retry in case the import failed. + fn justification_import_result(&mut self, hash: B::Hash, number: NumberFor, success: bool) { + let request = (hash, number); + + if !self.importing_requests.remove(&request) { + debug!(target: "sync", "Got justification import result for unknown justification {:?} {:?} request.", + request.0, + request.1, + ); + + return; + }; + + if success { + if self.justifications.finalize_root(&request.0).is_none() { + warn!(target: "sync", "Imported justification for {:?} {:?} which isn't a root in the tree: {:?}", + request.0, + request.1, + self.justifications.roots().collect::>(), + ); + + return; + }; + + self.previous_requests.clear(); + self.peer_requests.clear(); + self.pending_requests = self + .justifications + .roots() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect(); + + return; + } + self.pending_requests.push_front(request); + } + + /// Processes the response for the request previously sent to the given + /// peer. Queues a retry in case the given justification + /// was `None`. + fn on_response( + &mut self, + who: PeerId, + justification: Option, + import_queue: &ImportQueue, + ) { + // we assume that the request maps to the given response, this is + // currently enforced by the outer network protocol before passing on + // messages to chain sync. + if let Some(request) = self.peer_requests.remove(&who) { + if let Some(justification) = justification { + import_queue.import_justification(who.clone(), request.0, request.1, justification); + self.importing_requests.insert(request); + return; + } + + self.previous_requests + .entry(request) + .or_insert(Vec::new()) + .push((who, Instant::now())); + + self.pending_requests.push_front(request); + } + } + + /// Removes any pending justification requests for blocks lower than the + /// given best finalized. + fn on_block_finalized( + &mut self, + best_finalized_hash: &B::Hash, + best_finalized_number: NumberFor, + is_descendent_of: F, + ) -> Result<(), fork_tree::Error> + where + F: Fn(&B::Hash, &B::Hash) -> Result, + { + if self + .importing_requests + .contains(&(*best_finalized_hash, best_finalized_number)) + { + // we imported this justification ourselves, so we should get back a response + // from the import queue through `justification_import_result` + return Ok(()); + } + + self.justifications.finalize( + best_finalized_hash, + best_finalized_number, + &is_descendent_of, + )?; + + let roots = self.justifications.roots().collect::>(); + + self.pending_requests + .retain(|(h, n)| roots.contains(&(h, n, &()))); + self.peer_requests + .retain(|_, (h, n)| roots.contains(&(h, n, &()))); + self.previous_requests + .retain(|(h, n), _| roots.contains(&(h, n, &()))); + + Ok(()) + } + + /// Clear all data. + fn clear(&mut self) { + self.justifications = ForkTree::new(); + self.pending_requests.clear(); + self.peer_requests.clear(); + self.previous_requests.clear(); + } } /// Relay chain sync strategy. pub struct ChainSync { - genesis_hash: B::Hash, - peers: HashMap>, - blocks: BlockCollection, - best_queued_number: NumberFor, - best_queued_hash: B::Hash, - required_block_attributes: message::BlockAttributes, - justifications: PendingJustifications, - import_queue: Box>, - queue_blocks: HashSet, - best_importing_number: NumberFor, - is_stopping: AtomicBool, - is_offline: Arc, - is_major_syncing: Arc, + genesis_hash: B::Hash, + peers: HashMap>, + blocks: BlockCollection, + best_queued_number: NumberFor, + best_queued_hash: B::Hash, + required_block_attributes: message::BlockAttributes, + justifications: PendingJustifications, + import_queue: Box>, + queue_blocks: HashSet, + best_importing_number: NumberFor, + is_stopping: AtomicBool, + is_offline: Arc, + is_major_syncing: Arc, } /// Reported sync state. #[derive(Clone, Eq, PartialEq, Debug)] pub enum SyncState { - /// Initial sync is complete, keep-up sync is active. - Idle, - /// Actively catching up with the chain. - Downloading + /// Initial sync is complete, keep-up sync is active. + Idle, + /// Actively catching up with the chain. + Downloading, } /// Syncing status and statistics #[derive(Clone)] pub struct Status { - /// Current global sync state. - pub state: SyncState, - /// Target sync block number. - pub best_seen_block: Option>, - /// Number of peers participating in syncing. - pub num_peers: u32, + /// Current global sync state. + pub state: SyncState, + /// Target sync block number. + pub best_seen_block: Option>, + /// Number of peers participating in syncing. + pub num_peers: u32, } impl Status { - /// Whether the synchronization status is doing major downloading work or - /// is near the head of the chain. - pub fn is_major_syncing(&self) -> bool { - match self.state { - SyncState::Idle => false, - SyncState::Downloading => true, - } - } - - /// Are we all alone? - pub fn is_offline(&self) -> bool { - self.num_peers == 0 - } + /// Whether the synchronization status is doing major downloading work or + /// is near the head of the chain. + pub fn is_major_syncing(&self) -> bool { + match self.state { + SyncState::Idle => false, + SyncState::Downloading => true, + } + } + + /// Are we all alone? + pub fn is_offline(&self) -> bool { + self.num_peers == 0 + } } impl ChainSync { - /// Create a new instance. - pub(crate) fn new( - is_offline: Arc, - is_major_syncing: Arc, - role: Roles, - info: &ClientInfo, - import_queue: Box> - ) -> Self { - let mut required_block_attributes = message::BlockAttributes::HEADER | message::BlockAttributes::JUSTIFICATION; - if role.intersects(Roles::FULL | Roles::AUTHORITY) { - required_block_attributes |= message::BlockAttributes::BODY; - } - - ChainSync { - genesis_hash: info.chain.genesis_hash, - peers: HashMap::new(), - blocks: BlockCollection::new(), - best_queued_hash: info.best_queued_hash.unwrap_or(info.chain.best_hash), - best_queued_number: info.best_queued_number.unwrap_or(info.chain.best_number), - justifications: PendingJustifications::new(), - required_block_attributes, - import_queue, - queue_blocks: Default::default(), - best_importing_number: Zero::zero(), - is_stopping: Default::default(), - is_offline, - is_major_syncing, - } - } - - fn best_seen_block(&self) -> Option> { - self.peers.values().max_by_key(|p| p.best_number).map(|p| p.best_number) - } - - fn state(&self, best_seen: &Option>) -> SyncState { - match best_seen { - &Some(n) if n > self.best_queued_number && n - self.best_queued_number > As::sa(5) => SyncState::Downloading, - _ => SyncState::Idle, - } - } - - /// Returns peer sync status (if any). - pub(crate) fn peer_info(&self, who: &PeerId) -> Option> { - self.peers.get(who).map(|peer| { - PeerInfo { - best_hash: peer.best_hash, - best_number: peer.best_number, - } - }) - } - - /// Returns sync status. - pub(crate) fn status(&self) -> Status { - let best_seen = self.best_seen_block(); - let state = self.state(&best_seen); - Status { - state: state, - best_seen_block: best_seen, - num_peers: self.peers.len() as u32, - } - } - - /// Handle new connected peer. - pub(crate) fn new_peer(&mut self, protocol: &mut Context, who: PeerId) { - // Initialize some variables to determine if - // is_offline or is_major_syncing should be updated - // after processing this new peer. - let previous_len = self.peers.len(); - let previous_best_seen = self.best_seen_block(); - let previous_state = self.state(&previous_best_seen); - - if let Some(info) = protocol.peer_info(&who) { - let status = block_status(&*protocol.client(), &self.queue_blocks, info.best_hash); - match (status, info.best_number) { - (Err(e), _) => { - debug!(target:"sync", "Error reading blockchain: {:?}", e); - let reason = format!("Error legimimately reading blockchain status: {:?}", e); - protocol.report_peer(who, Severity::Useless(reason)); - }, - (Ok(BlockStatus::KnownBad), _) => { - let reason = format!("New peer with known bad best block {} ({}).", info.best_hash, info.best_number); - protocol.report_peer(who, Severity::Bad(reason)); - }, - (Ok(BlockStatus::Unknown), b) if b == As::sa(0) => { - let reason = format!("New peer with unknown genesis hash {} ({}).", info.best_hash, info.best_number); - protocol.report_peer(who, Severity::Bad(reason)); - }, - (Ok(BlockStatus::Unknown), _) if self.queue_blocks.len() > MAJOR_SYNC_BLOCKS => { - // when actively syncing the common point moves too fast. - debug!(target:"sync", "New peer with unknown best hash {} ({}), assuming common block.", self.best_queued_hash, self.best_queued_number); - self.peers.insert(who, PeerSync { - common_number: self.best_queued_number, - best_hash: info.best_hash, - best_number: info.best_number, - state: PeerSyncState::Available, - recently_announced: Default::default(), - }); - } - (Ok(BlockStatus::Unknown), _) => { - let our_best = self.best_queued_number; - if our_best > As::sa(0) { - let common_best = ::std::cmp::min(our_best, info.best_number); - debug!(target:"sync", "New peer with unknown best hash {} ({}), searching for common ancestor.", info.best_hash, info.best_number); - self.peers.insert(who.clone(), PeerSync { - common_number: As::sa(0), - best_hash: info.best_hash, - best_number: info.best_number, - state: PeerSyncState::AncestorSearch(common_best, AncestorSearchState::ExponentialBackoff(As::sa(1))), - recently_announced: Default::default(), - }); - Self::request_ancestry(protocol, who, common_best) - } else { - // We are at genesis, just start downloading - debug!(target:"sync", "New peer with best hash {} ({}).", info.best_hash, info.best_number); - self.peers.insert(who.clone(), PeerSync { - common_number: As::sa(0), - best_hash: info.best_hash, - best_number: info.best_number, - state: PeerSyncState::Available, - recently_announced: Default::default(), - }); - self.download_new(protocol, who) - } - }, - (Ok(BlockStatus::Queued), _) | (Ok(BlockStatus::InChainWithState), _) | (Ok(BlockStatus::InChainPruned), _) => { - debug!(target:"sync", "New peer with known best hash {} ({}).", info.best_hash, info.best_number); - self.peers.insert(who.clone(), PeerSync { - common_number: info.best_number, - best_hash: info.best_hash, - best_number: info.best_number, - state: PeerSyncState::Available, - recently_announced: Default::default(), - }); - } - } - } - - let current_best_seen = self.best_seen_block(); - let current_state = self.state(¤t_best_seen); - let current_len = self.peers.len(); - if previous_len == 0 && current_len > 0 { - // We were offline, and now we're connected to at least one peer. - self.is_offline.store(false, Ordering::Relaxed); - } - if previous_len < current_len { - // We added a peer, let's see if major_syncing should be updated. - match (previous_state, current_state) { - (SyncState::Idle, SyncState::Downloading) => self.is_major_syncing.store(true, Ordering::Relaxed), - (SyncState::Downloading, SyncState::Idle) => self.is_major_syncing.store(false, Ordering::Relaxed), - _ => {}, - } - } - } - - fn handle_ancestor_search_state( - state: AncestorSearchState, - curr_block_num: NumberFor, - block_hash_match: bool, - ) -> Option<(AncestorSearchState, NumberFor)> { - match state { - AncestorSearchState::ExponentialBackoff(next_distance_to_tip) => { - if block_hash_match && next_distance_to_tip == As::sa(1) { - // We found the ancestor in the first step so there is no need to execute binary search. - return None; - } - if block_hash_match { - let left = curr_block_num; - let right = left + next_distance_to_tip / As::sa(2); - let middle = left + (right - left) / As::sa(2); - Some((AncestorSearchState::BinarySearch(left, right), middle)) - } else { - let next_block_num = curr_block_num.checked_sub(&next_distance_to_tip).unwrap_or(As::sa(0)); - let next_distance_to_tip = next_distance_to_tip * As::sa(2); - Some((AncestorSearchState::ExponentialBackoff(next_distance_to_tip), next_block_num)) - } - }, - AncestorSearchState::BinarySearch(mut left, mut right) => { - if left >= curr_block_num { - return None; - } - if block_hash_match { - left = curr_block_num; - } else { - right = curr_block_num; - } - assert!(right >= left); - let middle = left + (right - left) / As::sa(2); - Some((AncestorSearchState::BinarySearch(left, right), middle)) - }, - } - } - - /// Handle new block data. - pub(crate) fn on_block_data( - &mut self, - protocol: &mut Context, - who: PeerId, - request: message::BlockRequest, - response: message::BlockResponse - ) { - let new_blocks: Vec> = if let Some(ref mut peer) = self.peers.get_mut(&who) { - let mut blocks = response.blocks; - if request.direction == message::Direction::Descending { - trace!(target: "sync", "Reversing incoming block list"); - blocks.reverse(); - } - let peer_state = peer.state.clone(); - match peer_state { - PeerSyncState::DownloadingNew(start_block) => { - self.blocks.clear_peer_download(&who); - peer.state = PeerSyncState::Available; - self.blocks.insert(start_block, blocks, who); - self.blocks - .drain(self.best_queued_number + As::sa(1)) - .into_iter() - .map(|block_data| { - IncomingBlock { - hash: block_data.block.hash, - header: block_data.block.header, - body: block_data.block.body, - justification: block_data.block.justification, - origin: block_data.origin, - } - }).collect() - }, - PeerSyncState::DownloadingStale(_) => { - peer.state = PeerSyncState::Available; - blocks.into_iter().map(|b| { - IncomingBlock { - hash: b.hash, - header: b.header, - body: b.body, - justification: b.justification, - origin: Some(who.clone()), - } - }).collect() - }, - PeerSyncState::AncestorSearch(num, state) => { - let block_hash_match = match (blocks.get(0), protocol.client().block_hash(num)) { - (Some(ref block), Ok(maybe_our_block_hash)) => { - trace!(target: "sync", "Got ancestry block #{} ({}) from peer {}", num, block.hash, who); - maybe_our_block_hash.map_or(false, |x| x == block.hash) - }, - (None, _) => { - trace!(target:"sync", "Invalid response when searching for ancestor from {}", who); - protocol.report_peer(who, Severity::Bad("Invalid response when searching for ancestor".to_string())); - return; - }, - (_, Err(e)) => { - let reason = format!("Error answering legitimate blockchain query: {:?}", e); - protocol.report_peer(who, Severity::Useless(reason)); - return; - }, - }; - if block_hash_match && peer.common_number < num { - peer.common_number = num; - } - if !block_hash_match && num == As::sa(0) { - trace!(target:"sync", "Ancestry search: genesis mismatch for peer {}", who); - protocol.report_peer(who, Severity::Bad("Ancestry search: genesis mismatch for peer".to_string())); - return; - } - if let Some((next_state, next_block_num)) = Self::handle_ancestor_search_state(state, num, block_hash_match) { - peer.state = PeerSyncState::AncestorSearch(next_block_num, next_state); - Self::request_ancestry(protocol, who, next_block_num); - return; - } else { - peer.state = PeerSyncState::Available; - vec![] - } - }, - PeerSyncState::Available | PeerSyncState::DownloadingJustification(..) => Vec::new(), - } - } else { - Vec::new() - }; - - let is_recent = new_blocks - .first() - .map(|block| self.peers.iter().any(|(_, peer)| peer.recently_announced.contains(&block.hash))) - .unwrap_or(false); - let origin = if is_recent { BlockOrigin::NetworkBroadcast } else { BlockOrigin::NetworkInitialSync }; - - if let Some((hash, number)) = new_blocks.last() - .and_then(|b| b.header.as_ref().map(|h| (b.hash.clone(), *h.number()))) - { - trace!(target:"sync", "Accepted {} blocks ({:?}) with origin {:?}", new_blocks.len(), hash, origin); - self.block_queued(&hash, number); - } - self.maintain_sync(protocol); - let new_best_importing_number = new_blocks - .last() - .and_then(|b| b.header.as_ref().map(|h| h.number().clone())) - .unwrap_or_else(|| Zero::zero()); - self.queue_blocks - .extend(new_blocks.iter().map(|b| b.hash.clone())); - self.best_importing_number = max(new_best_importing_number, self.best_importing_number); - self.import_queue.import_blocks(origin, new_blocks); - } - - /// Handle new justification data. - pub(crate) fn on_block_justification_data( - &mut self, - protocol: &mut Context, - who: PeerId, - _request: message::BlockRequest, - response: message::BlockResponse, - ) { - if let Some(ref mut peer) = self.peers.get_mut(&who) { - if let PeerSyncState::DownloadingJustification(hash) = peer.state { - peer.state = PeerSyncState::Available; - - // we only request one justification at a time - match response.blocks.into_iter().next() { - Some(response) => { - if hash != response.hash { - let msg = format!( - "Invalid block justification provided: requested: {:?} got: {:?}", - hash, - response.hash, - ); - - protocol.report_peer(who, Severity::Bad(msg)); - return; - } - - self.justifications.on_response( - who, - response.justification, - &*self.import_queue, - ); - }, - None => { - // we might have asked the peer for a justification on a block that we thought it had - // (regardless of whether it had a justification for it or not). - trace!(target: "sync", "Peer {:?} provided empty response for justification request {:?}", - who, - hash, - ); - return; - }, - } - } - } - - self.maintain_sync(protocol); - } - - /// A batch of blocks have been processed, with or without errors. - pub fn blocks_processed(&mut self, processed_blocks: Vec, has_error: bool) { - for hash in processed_blocks { - self.queue_blocks.remove(&hash); - } - if has_error { - self.best_importing_number = Zero::zero(); - } - } - - /// Maintain the sync process (download new blocks, fetch justifications). - pub fn maintain_sync(&mut self, protocol: &mut Context) { - if self.is_stopping.load(Ordering::SeqCst) { - return - } - let peers: Vec = self.peers.keys().map(|p| p.clone()).collect(); - for peer in peers { - self.download_new(protocol, peer); - } - self.justifications.dispatch(&mut self.peers, protocol); - } - - /// Called periodically to perform any time-based actions. - pub fn tick(&mut self, protocol: &mut Context) { - self.justifications.dispatch(&mut self.peers, protocol); - } - - /// Request a justification for the given block. - /// - /// Queues a new justification request and tries to dispatch all pending requests. - pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor, protocol: &mut Context) { - self.justifications.queue_request( - &(*hash, number), - |base, block| protocol.client().is_descendent_of(base, block), - ); - - self.justifications.dispatch(&mut self.peers, protocol); - } - - /// Clears all pending justification requests. - pub fn clear_justification_requests(&mut self) { - self.justifications.clear(); - } - - pub fn justification_import_result(&mut self, hash: B::Hash, number: NumberFor, success: bool) { - self.justifications.justification_import_result(hash, number, success); - } - - pub fn stop(&self) { - self.is_stopping.store(true, Ordering::SeqCst); - self.import_queue.stop(); - } - - /// Notify about successful import of the given block. - pub fn block_imported(&mut self, hash: &B::Hash, number: NumberFor) { - trace!(target: "sync", "Block imported successfully {} ({})", number, hash); - } - - /// Notify about finalization of the given block. - pub fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor, protocol: &mut Context) { - if let Err(err) = self.justifications.on_block_finalized( - hash, - number, - |base, block| protocol.client().is_descendent_of(base, block), - ) { - warn!(target: "sync", "Error cleaning up pending justification requests: {:?}", err); - }; - } - - fn block_queued(&mut self, hash: &B::Hash, number: NumberFor) { - let best_seen = self.best_seen_block(); - let previous_state = self.state(&best_seen); - if number > self.best_queued_number { - self.best_queued_number = number; - self.best_queued_hash = *hash; - } - let current_state = self.state(&best_seen); - // If the latest queued block changed our state, update is_major_syncing. - match (previous_state, current_state) { - (SyncState::Idle, SyncState::Downloading) => self.is_major_syncing.store(true, Ordering::Relaxed), - (SyncState::Downloading, SyncState::Idle) => self.is_major_syncing.store(false, Ordering::Relaxed), - _ => {}, - } - // Update common blocks - for (n, peer) in self.peers.iter_mut() { - if let PeerSyncState::AncestorSearch(_, _) = peer.state { - // Abort search. - peer.state = PeerSyncState::Available; - } - trace!(target: "sync", "Updating peer {} info, ours={}, common={}, their best={}", n, number, peer.common_number, peer.best_number); - if peer.best_number >= number { - peer.common_number = number; - } else { - peer.common_number = peer.best_number; - } - } - } - - pub(crate) fn update_chain_info(&mut self, best_header: &B::Header) { - let hash = best_header.hash(); - self.block_queued(&hash, best_header.number().clone()) - } - - /// Handle new block announcement. - pub(crate) fn on_block_announce(&mut self, protocol: &mut Context, who: PeerId, hash: B::Hash, header: &B::Header) { - let number = *header.number(); - if number <= As::sa(0) { - trace!(target: "sync", "Ignored invalid block announcement from {}: {}", who, hash); - return; - } - let parent_status = block_status(&*protocol.client(), &self.queue_blocks, header.parent_hash().clone()).ok() - .unwrap_or(BlockStatus::Unknown); - let known_parent = parent_status != BlockStatus::Unknown; - let ancient_parent = parent_status == BlockStatus::InChainPruned; - - let known = self.is_known(protocol, &hash); - if let Some(ref mut peer) = self.peers.get_mut(&who) { - while peer.recently_announced.len() >= ANNOUNCE_HISTORY_SIZE { - peer.recently_announced.pop_front(); - } - peer.recently_announced.push_back(hash.clone()); - if number > peer.best_number { - // update their best block - peer.best_number = number; - peer.best_hash = hash; - } - if let PeerSyncState::AncestorSearch(_, _) = peer.state { - return; - } - if header.parent_hash() == &self.best_queued_hash || known_parent { - peer.common_number = number - As::sa(1); - } else if known { - peer.common_number = number - } - } else { - return; - } - - if !(known || self.is_already_downloading(&hash)) { - let stale = number <= self.best_queued_number; - if stale { - if !(known_parent || self.is_already_downloading(header.parent_hash())) { - if protocol.client().block_status(&BlockId::Number(*header.number())) - .unwrap_or(BlockStatus::Unknown) == BlockStatus::InChainPruned - { - trace!(target: "sync", "Ignored unknown ancient block announced from {}: {} {:?}", who, hash, header); - } else { - trace!(target: "sync", "Considering new unknown stale block announced from {}: {} {:?}", who, hash, header); - self.download_unknown_stale(protocol, who, &hash); - } - } else { - if ancient_parent { - trace!(target: "sync", "Ignored ancient stale block announced from {}: {} {:?}", who, hash, header); - } else { - self.download_stale(protocol, who, &hash); - } - } - } else { - if ancient_parent { - trace!(target: "sync", "Ignored ancient block announced from {}: {} {:?}", who, hash, header); - } else { - trace!(target: "sync", "Considering new block announced from {}: {} {:?}", who, hash, header); - self.download_new(protocol, who); - } - } - } else { - trace!(target: "sync", "Known block announce from {}: {}", who, hash); - } - } - - fn is_already_downloading(&self, hash: &B::Hash) -> bool { - self.peers.iter().any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) - } - - fn is_known(&self, protocol: &mut Context, hash: &B::Hash) -> bool { - block_status(&*protocol.client(), &self.queue_blocks, *hash).ok().map_or(false, |s| s != BlockStatus::Unknown) - } - - /// Handle disconnected peer. - pub(crate) fn peer_disconnected(&mut self, protocol: &mut Context, who: PeerId) { - let previous_best_seen = self.best_seen_block(); - let previous_state = self.state(&previous_best_seen); - self.blocks.clear_peer_download(&who); - self.peers.remove(&who); - if self.peers.len() == 0 { - // We're not connected to any peer anymore. - self.is_offline.store(true, Ordering::Relaxed); - } - let current_best_seen = self.best_seen_block(); - let current_state = self.state(¤t_best_seen); - // We removed a peer, let's see if this put us in idle state and is_major_syncing should be updated. - match (previous_state, current_state) { - (SyncState::Downloading, SyncState::Idle) => self.is_major_syncing.store(false, Ordering::Relaxed), - _ => {}, - } - self.justifications.peer_disconnected(who); - self.maintain_sync(protocol); - } - - /// Restart the sync process. - pub(crate) fn restart(&mut self, protocol: &mut Context) { - self.queue_blocks.clear(); - self.best_importing_number = Zero::zero(); - self.blocks.clear(); - match protocol.client().info() { - Ok(info) => { - self.best_queued_hash = info.best_queued_hash.unwrap_or(info.chain.best_hash); - self.best_queued_number = info.best_queued_number.unwrap_or(info.chain.best_number); - debug!(target:"sync", "Restarted with {} ({})", self.best_queued_number, self.best_queued_hash); - }, - Err(e) => { - debug!(target:"sync", "Error reading blockchain: {:?}", e); - self.best_queued_hash = self.genesis_hash; - self.best_queued_number = As::sa(0); - } - } - let ids: Vec = self.peers.drain().map(|(id, _)| id).collect(); - for id in ids { - self.new_peer(protocol, id); - } - } - - /// Clear all sync data. - pub(crate) fn clear(&mut self) { - self.blocks.clear(); - self.peers.clear(); - } - - // Download old block with known parent. - fn download_stale(&mut self, protocol: &mut Context, who: PeerId, hash: &B::Hash) { - if let Some(ref mut peer) = self.peers.get_mut(&who) { - match peer.state { - PeerSyncState::Available => { - let request = message::generic::BlockRequest { - id: 0, - fields: self.required_block_attributes.clone(), - from: message::FromBlock::Hash(*hash), - to: None, - direction: message::Direction::Ascending, - max: Some(1), - }; - peer.state = PeerSyncState::DownloadingStale(*hash); - protocol.send_message(who, GenericMessage::BlockRequest(request)); - }, - _ => (), - } - } - } - - // Download old block with unknown parent. - fn download_unknown_stale(&mut self, protocol: &mut Context, who: PeerId, hash: &B::Hash) { - if let Some(ref mut peer) = self.peers.get_mut(&who) { - match peer.state { - PeerSyncState::Available => { - let request = message::generic::BlockRequest { - id: 0, - fields: self.required_block_attributes.clone(), - from: message::FromBlock::Hash(*hash), - to: None, - direction: message::Direction::Descending, - max: Some(MAX_UNKNOWN_FORK_DOWNLOAD_LEN), - }; - peer.state = PeerSyncState::DownloadingStale(*hash); - protocol.send_message(who, GenericMessage::BlockRequest(request)); - }, - _ => (), - } - } - } - - // Issue a request for a peer to download new blocks, if any are available - fn download_new(&mut self, protocol: &mut Context, who: PeerId) { - if let Some(ref mut peer) = self.peers.get_mut(&who) { - // when there are too many blocks in the queue => do not try to download new blocks - if self.queue_blocks.len() > MAX_IMPORTING_BLOCKS { - trace!(target: "sync", "Too many blocks in the queue."); - return; - } - match peer.state { - PeerSyncState::Available => { - trace!(target: "sync", "Considering new block download from {}, common block is {}, best is {:?}", who, peer.common_number, peer.best_number); - if let Some(range) = self.blocks.needed_blocks(who.clone(), MAX_BLOCKS_TO_REQUEST, peer.best_number, peer.common_number) { - trace!(target: "sync", "Requesting blocks from {}, ({} to {})", who, range.start, range.end); - let request = message::generic::BlockRequest { - id: 0, - fields: self.required_block_attributes.clone(), - from: message::FromBlock::Number(range.start), - to: None, - direction: message::Direction::Ascending, - max: Some((range.end - range.start).as_() as u32), - }; - peer.state = PeerSyncState::DownloadingNew(range.start); - protocol.send_message(who, GenericMessage::BlockRequest(request)); - } else { - trace!(target: "sync", "Nothing to request"); - } - }, - _ => trace!(target: "sync", "Peer {} is busy", who), - } - } - } - - fn request_ancestry(protocol: &mut Context, who: PeerId, block: NumberFor) { - trace!(target: "sync", "Requesting ancestry block #{} from {}", block, who); - let request = message::generic::BlockRequest { - id: 0, - fields: message::BlockAttributes::HEADER | message::BlockAttributes::JUSTIFICATION, - from: message::FromBlock::Number(block), - to: None, - direction: message::Direction::Ascending, - max: Some(1), - }; - protocol.send_message(who, GenericMessage::BlockRequest(request)); - } + /// Create a new instance. + pub(crate) fn new( + is_offline: Arc, + is_major_syncing: Arc, + role: Roles, + info: &ClientInfo, + import_queue: Box>, + ) -> Self { + let mut required_block_attributes = + message::BlockAttributes::HEADER | message::BlockAttributes::JUSTIFICATION; + if role.intersects(Roles::FULL | Roles::AUTHORITY) { + required_block_attributes |= message::BlockAttributes::BODY; + } + + ChainSync { + genesis_hash: info.chain.genesis_hash, + peers: HashMap::new(), + blocks: BlockCollection::new(), + best_queued_hash: info.best_queued_hash.unwrap_or(info.chain.best_hash), + best_queued_number: info.best_queued_number.unwrap_or(info.chain.best_number), + justifications: PendingJustifications::new(), + required_block_attributes, + import_queue, + queue_blocks: Default::default(), + best_importing_number: Zero::zero(), + is_stopping: Default::default(), + is_offline, + is_major_syncing, + } + } + + fn best_seen_block(&self) -> Option> { + self.peers + .values() + .max_by_key(|p| p.best_number) + .map(|p| p.best_number) + } + + fn state(&self, best_seen: &Option>) -> SyncState { + match best_seen { + &Some(n) if n > self.best_queued_number && n - self.best_queued_number > As::sa(5) => { + SyncState::Downloading + } + _ => SyncState::Idle, + } + } + + /// Returns peer sync status (if any). + pub(crate) fn peer_info(&self, who: &PeerId) -> Option> { + self.peers.get(who).map(|peer| PeerInfo { + best_hash: peer.best_hash, + best_number: peer.best_number, + }) + } + + /// Returns sync status. + pub(crate) fn status(&self) -> Status { + let best_seen = self.best_seen_block(); + let state = self.state(&best_seen); + Status { + state: state, + best_seen_block: best_seen, + num_peers: self.peers.len() as u32, + } + } + + /// Handle new connected peer. + pub(crate) fn new_peer(&mut self, protocol: &mut Context, who: PeerId) { + // Initialize some variables to determine if + // is_offline or is_major_syncing should be updated + // after processing this new peer. + let previous_len = self.peers.len(); + let previous_best_seen = self.best_seen_block(); + let previous_state = self.state(&previous_best_seen); + + if let Some(info) = protocol.peer_info(&who) { + let status = block_status(&*protocol.client(), &self.queue_blocks, info.best_hash); + match (status, info.best_number) { + (Err(e), _) => { + debug!(target:"sync", "Error reading blockchain: {:?}", e); + let reason = format!("Error legimimately reading blockchain status: {:?}", e); + protocol.report_peer(who, Severity::Useless(reason)); + } + (Ok(BlockStatus::KnownBad), _) => { + let reason = format!( + "New peer with known bad best block {} ({}).", + info.best_hash, info.best_number + ); + protocol.report_peer(who, Severity::Bad(reason)); + } + (Ok(BlockStatus::Unknown), b) if b == As::sa(0) => { + let reason = format!( + "New peer with unknown genesis hash {} ({}).", + info.best_hash, info.best_number + ); + protocol.report_peer(who, Severity::Bad(reason)); + } + (Ok(BlockStatus::Unknown), _) if self.queue_blocks.len() > MAJOR_SYNC_BLOCKS => { + // when actively syncing the common point moves too fast. + debug!(target:"sync", "New peer with unknown best hash {} ({}), assuming common block.", self.best_queued_hash, self.best_queued_number); + self.peers.insert( + who, + PeerSync { + common_number: self.best_queued_number, + best_hash: info.best_hash, + best_number: info.best_number, + state: PeerSyncState::Available, + recently_announced: Default::default(), + }, + ); + } + (Ok(BlockStatus::Unknown), _) => { + let our_best = self.best_queued_number; + if our_best > As::sa(0) { + let common_best = ::std::cmp::min(our_best, info.best_number); + debug!(target:"sync", "New peer with unknown best hash {} ({}), searching for common ancestor.", info.best_hash, info.best_number); + self.peers.insert( + who.clone(), + PeerSync { + common_number: As::sa(0), + best_hash: info.best_hash, + best_number: info.best_number, + state: PeerSyncState::AncestorSearch( + common_best, + AncestorSearchState::ExponentialBackoff(As::sa(1)), + ), + recently_announced: Default::default(), + }, + ); + Self::request_ancestry(protocol, who, common_best) + } else { + // We are at genesis, just start downloading + debug!(target:"sync", "New peer with best hash {} ({}).", info.best_hash, info.best_number); + self.peers.insert( + who.clone(), + PeerSync { + common_number: As::sa(0), + best_hash: info.best_hash, + best_number: info.best_number, + state: PeerSyncState::Available, + recently_announced: Default::default(), + }, + ); + self.download_new(protocol, who) + } + } + (Ok(BlockStatus::Queued), _) + | (Ok(BlockStatus::InChainWithState), _) + | (Ok(BlockStatus::InChainPruned), _) => { + debug!(target:"sync", "New peer with known best hash {} ({}).", info.best_hash, info.best_number); + self.peers.insert( + who.clone(), + PeerSync { + common_number: info.best_number, + best_hash: info.best_hash, + best_number: info.best_number, + state: PeerSyncState::Available, + recently_announced: Default::default(), + }, + ); + } + } + } + + let current_best_seen = self.best_seen_block(); + let current_state = self.state(¤t_best_seen); + let current_len = self.peers.len(); + if previous_len == 0 && current_len > 0 { + // We were offline, and now we're connected to at least one peer. + self.is_offline.store(false, Ordering::Relaxed); + } + if previous_len < current_len { + // We added a peer, let's see if major_syncing should be updated. + match (previous_state, current_state) { + (SyncState::Idle, SyncState::Downloading) => { + self.is_major_syncing.store(true, Ordering::Relaxed) + } + (SyncState::Downloading, SyncState::Idle) => { + self.is_major_syncing.store(false, Ordering::Relaxed) + } + _ => {} + } + } + } + + fn handle_ancestor_search_state( + state: AncestorSearchState, + curr_block_num: NumberFor, + block_hash_match: bool, + ) -> Option<(AncestorSearchState, NumberFor)> { + match state { + AncestorSearchState::ExponentialBackoff(next_distance_to_tip) => { + if block_hash_match && next_distance_to_tip == As::sa(1) { + // We found the ancestor in the first step so there is no need to execute binary search. + return None; + } + if block_hash_match { + let left = curr_block_num; + let right = left + next_distance_to_tip / As::sa(2); + let middle = left + (right - left) / As::sa(2); + Some((AncestorSearchState::BinarySearch(left, right), middle)) + } else { + let next_block_num = curr_block_num + .checked_sub(&next_distance_to_tip) + .unwrap_or(As::sa(0)); + let next_distance_to_tip = next_distance_to_tip * As::sa(2); + Some(( + AncestorSearchState::ExponentialBackoff(next_distance_to_tip), + next_block_num, + )) + } + } + AncestorSearchState::BinarySearch(mut left, mut right) => { + if left >= curr_block_num { + return None; + } + if block_hash_match { + left = curr_block_num; + } else { + right = curr_block_num; + } + assert!(right >= left); + let middle = left + (right - left) / As::sa(2); + Some((AncestorSearchState::BinarySearch(left, right), middle)) + } + } + } + + /// Handle new block data. + pub(crate) fn on_block_data( + &mut self, + protocol: &mut Context, + who: PeerId, + request: message::BlockRequest, + response: message::BlockResponse, + ) { + let new_blocks: Vec> = if let Some(ref mut peer) = self.peers.get_mut(&who) + { + let mut blocks = response.blocks; + if request.direction == message::Direction::Descending { + trace!(target: "sync", "Reversing incoming block list"); + blocks.reverse(); + } + let peer_state = peer.state.clone(); + match peer_state { + PeerSyncState::DownloadingNew(start_block) => { + self.blocks.clear_peer_download(&who); + peer.state = PeerSyncState::Available; + self.blocks.insert(start_block, blocks, who); + self.blocks + .drain(self.best_queued_number + As::sa(1)) + .into_iter() + .map(|block_data| IncomingBlock { + hash: block_data.block.hash, + header: block_data.block.header, + body: block_data.block.body, + justification: block_data.block.justification, + origin: block_data.origin, + }) + .collect() + } + PeerSyncState::DownloadingStale(_) => { + peer.state = PeerSyncState::Available; + blocks + .into_iter() + .map(|b| IncomingBlock { + hash: b.hash, + header: b.header, + body: b.body, + justification: b.justification, + origin: Some(who.clone()), + }) + .collect() + } + PeerSyncState::AncestorSearch(num, state) => { + let block_hash_match = match (blocks.get(0), protocol.client().block_hash(num)) + { + (Some(ref block), Ok(maybe_our_block_hash)) => { + trace!(target: "sync", "Got ancestry block #{} ({}) from peer {}", num, block.hash, who); + maybe_our_block_hash.map_or(false, |x| x == block.hash) + } + (None, _) => { + trace!(target:"sync", "Invalid response when searching for ancestor from {}", who); + protocol.report_peer( + who, + Severity::Bad( + "Invalid response when searching for ancestor".to_string(), + ), + ); + return; + } + (_, Err(e)) => { + let reason = + format!("Error answering legitimate blockchain query: {:?}", e); + protocol.report_peer(who, Severity::Useless(reason)); + return; + } + }; + if block_hash_match && peer.common_number < num { + peer.common_number = num; + } + if !block_hash_match && num == As::sa(0) { + trace!(target:"sync", "Ancestry search: genesis mismatch for peer {}", who); + protocol.report_peer( + who, + Severity::Bad("Ancestry search: genesis mismatch for peer".to_string()), + ); + return; + } + if let Some((next_state, next_block_num)) = + Self::handle_ancestor_search_state(state, num, block_hash_match) + { + peer.state = PeerSyncState::AncestorSearch(next_block_num, next_state); + Self::request_ancestry(protocol, who, next_block_num); + return; + } else { + peer.state = PeerSyncState::Available; + vec![] + } + } + PeerSyncState::Available | PeerSyncState::DownloadingJustification(..) => { + Vec::new() + } + } + } else { + Vec::new() + }; + + let is_recent = new_blocks + .first() + .map(|block| { + self.peers + .iter() + .any(|(_, peer)| peer.recently_announced.contains(&block.hash)) + }) + .unwrap_or(false); + let origin = if is_recent { + BlockOrigin::NetworkBroadcast + } else { + BlockOrigin::NetworkInitialSync + }; + + if let Some((hash, number)) = new_blocks + .last() + .and_then(|b| b.header.as_ref().map(|h| (b.hash.clone(), *h.number()))) + { + trace!(target:"sync", "Accepted {} blocks ({:?}) with origin {:?}", new_blocks.len(), hash, origin); + self.block_queued(&hash, number); + } + self.maintain_sync(protocol); + let new_best_importing_number = new_blocks + .last() + .and_then(|b| b.header.as_ref().map(|h| h.number().clone())) + .unwrap_or_else(|| Zero::zero()); + self.queue_blocks + .extend(new_blocks.iter().map(|b| b.hash.clone())); + self.best_importing_number = max(new_best_importing_number, self.best_importing_number); + self.import_queue.import_blocks(origin, new_blocks); + } + + /// Handle new justification data. + pub(crate) fn on_block_justification_data( + &mut self, + protocol: &mut Context, + who: PeerId, + _request: message::BlockRequest, + response: message::BlockResponse, + ) { + if let Some(ref mut peer) = self.peers.get_mut(&who) { + if let PeerSyncState::DownloadingJustification(hash) = peer.state { + peer.state = PeerSyncState::Available; + + // we only request one justification at a time + match response.blocks.into_iter().next() { + Some(response) => { + if hash != response.hash { + let msg = format!( + "Invalid block justification provided: requested: {:?} got: {:?}", + hash, response.hash, + ); + + protocol.report_peer(who, Severity::Bad(msg)); + return; + } + + self.justifications.on_response( + who, + response.justification, + &*self.import_queue, + ); + } + None => { + // we might have asked the peer for a justification on a block that we thought it had + // (regardless of whether it had a justification for it or not). + trace!(target: "sync", "Peer {:?} provided empty response for justification request {:?}", + who, + hash, + ); + return; + } + } + } + } + + self.maintain_sync(protocol); + } + + /// A batch of blocks have been processed, with or without errors. + pub fn blocks_processed(&mut self, processed_blocks: Vec, has_error: bool) { + for hash in processed_blocks { + self.queue_blocks.remove(&hash); + } + if has_error { + self.best_importing_number = Zero::zero(); + } + } + + /// Maintain the sync process (download new blocks, fetch justifications). + pub fn maintain_sync(&mut self, protocol: &mut Context) { + if self.is_stopping.load(Ordering::SeqCst) { + return; + } + let peers: Vec = self.peers.keys().map(|p| p.clone()).collect(); + for peer in peers { + self.download_new(protocol, peer); + } + self.justifications.dispatch(&mut self.peers, protocol); + } + + /// Called periodically to perform any time-based actions. + pub fn tick(&mut self, protocol: &mut Context) { + self.justifications.dispatch(&mut self.peers, protocol); + } + + /// Request a justification for the given block. + /// + /// Queues a new justification request and tries to dispatch all pending requests. + pub fn request_justification( + &mut self, + hash: &B::Hash, + number: NumberFor, + protocol: &mut Context, + ) { + self.justifications + .queue_request(&(*hash, number), |base, block| { + protocol.client().is_descendent_of(base, block) + }); + + self.justifications.dispatch(&mut self.peers, protocol); + } + + /// Clears all pending justification requests. + pub fn clear_justification_requests(&mut self) { + self.justifications.clear(); + } + + pub fn justification_import_result( + &mut self, + hash: B::Hash, + number: NumberFor, + success: bool, + ) { + self.justifications + .justification_import_result(hash, number, success); + } + + pub fn stop(&self) { + self.is_stopping.store(true, Ordering::SeqCst); + self.import_queue.stop(); + } + + /// Notify about successful import of the given block. + pub fn block_imported(&mut self, hash: &B::Hash, number: NumberFor) { + trace!(target: "sync", "Block imported successfully {} ({})", number, hash); + } + + /// Notify about finalization of the given block. + pub fn on_block_finalized( + &mut self, + hash: &B::Hash, + number: NumberFor, + protocol: &mut Context, + ) { + if let Err(err) = self + .justifications + .on_block_finalized(hash, number, |base, block| { + protocol.client().is_descendent_of(base, block) + }) + { + warn!(target: "sync", "Error cleaning up pending justification requests: {:?}", err); + }; + } + + fn block_queued(&mut self, hash: &B::Hash, number: NumberFor) { + let best_seen = self.best_seen_block(); + let previous_state = self.state(&best_seen); + if number > self.best_queued_number { + self.best_queued_number = number; + self.best_queued_hash = *hash; + } + let current_state = self.state(&best_seen); + // If the latest queued block changed our state, update is_major_syncing. + match (previous_state, current_state) { + (SyncState::Idle, SyncState::Downloading) => { + self.is_major_syncing.store(true, Ordering::Relaxed) + } + (SyncState::Downloading, SyncState::Idle) => { + self.is_major_syncing.store(false, Ordering::Relaxed) + } + _ => {} + } + // Update common blocks + for (n, peer) in self.peers.iter_mut() { + if let PeerSyncState::AncestorSearch(_, _) = peer.state { + // Abort search. + peer.state = PeerSyncState::Available; + } + trace!(target: "sync", "Updating peer {} info, ours={}, common={}, their best={}", n, number, peer.common_number, peer.best_number); + if peer.best_number >= number { + peer.common_number = number; + } else { + peer.common_number = peer.best_number; + } + } + } + + pub(crate) fn update_chain_info(&mut self, best_header: &B::Header) { + let hash = best_header.hash(); + self.block_queued(&hash, best_header.number().clone()) + } + + /// Handle new block announcement. + pub(crate) fn on_block_announce( + &mut self, + protocol: &mut Context, + who: PeerId, + hash: B::Hash, + header: &B::Header, + ) { + let number = *header.number(); + if number <= As::sa(0) { + trace!(target: "sync", "Ignored invalid block announcement from {}: {}", who, hash); + return; + } + let parent_status = block_status( + &*protocol.client(), + &self.queue_blocks, + header.parent_hash().clone(), + ) + .ok() + .unwrap_or(BlockStatus::Unknown); + let known_parent = parent_status != BlockStatus::Unknown; + let ancient_parent = parent_status == BlockStatus::InChainPruned; + + let known = self.is_known(protocol, &hash); + if let Some(ref mut peer) = self.peers.get_mut(&who) { + while peer.recently_announced.len() >= ANNOUNCE_HISTORY_SIZE { + peer.recently_announced.pop_front(); + } + peer.recently_announced.push_back(hash.clone()); + if number > peer.best_number { + // update their best block + peer.best_number = number; + peer.best_hash = hash; + } + if let PeerSyncState::AncestorSearch(_, _) = peer.state { + return; + } + if header.parent_hash() == &self.best_queued_hash || known_parent { + peer.common_number = number - As::sa(1); + } else if known { + peer.common_number = number + } + } else { + return; + } + + if !(known || self.is_already_downloading(&hash)) { + let stale = number <= self.best_queued_number; + if stale { + if !(known_parent || self.is_already_downloading(header.parent_hash())) { + if protocol + .client() + .block_status(&BlockId::Number(*header.number())) + .unwrap_or(BlockStatus::Unknown) + == BlockStatus::InChainPruned + { + trace!(target: "sync", "Ignored unknown ancient block announced from {}: {} {:?}", who, hash, header); + } else { + trace!(target: "sync", "Considering new unknown stale block announced from {}: {} {:?}", who, hash, header); + self.download_unknown_stale(protocol, who, &hash); + } + } else { + if ancient_parent { + trace!(target: "sync", "Ignored ancient stale block announced from {}: {} {:?}", who, hash, header); + } else { + self.download_stale(protocol, who, &hash); + } + } + } else { + if ancient_parent { + trace!(target: "sync", "Ignored ancient block announced from {}: {} {:?}", who, hash, header); + } else { + trace!(target: "sync", "Considering new block announced from {}: {} {:?}", who, hash, header); + self.download_new(protocol, who); + } + } + } else { + trace!(target: "sync", "Known block announce from {}: {}", who, hash); + } + } + + fn is_already_downloading(&self, hash: &B::Hash) -> bool { + self.peers + .iter() + .any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) + } + + fn is_known(&self, protocol: &mut Context, hash: &B::Hash) -> bool { + block_status(&*protocol.client(), &self.queue_blocks, *hash) + .ok() + .map_or(false, |s| s != BlockStatus::Unknown) + } + + /// Handle disconnected peer. + pub(crate) fn peer_disconnected(&mut self, protocol: &mut Context, who: PeerId) { + let previous_best_seen = self.best_seen_block(); + let previous_state = self.state(&previous_best_seen); + self.blocks.clear_peer_download(&who); + self.peers.remove(&who); + if self.peers.len() == 0 { + // We're not connected to any peer anymore. + self.is_offline.store(true, Ordering::Relaxed); + } + let current_best_seen = self.best_seen_block(); + let current_state = self.state(¤t_best_seen); + // We removed a peer, let's see if this put us in idle state and is_major_syncing should be updated. + match (previous_state, current_state) { + (SyncState::Downloading, SyncState::Idle) => { + self.is_major_syncing.store(false, Ordering::Relaxed) + } + _ => {} + } + self.justifications.peer_disconnected(who); + self.maintain_sync(protocol); + } + + /// Restart the sync process. + pub(crate) fn restart(&mut self, protocol: &mut Context) { + self.queue_blocks.clear(); + self.best_importing_number = Zero::zero(); + self.blocks.clear(); + match protocol.client().info() { + Ok(info) => { + self.best_queued_hash = info.best_queued_hash.unwrap_or(info.chain.best_hash); + self.best_queued_number = info.best_queued_number.unwrap_or(info.chain.best_number); + debug!(target:"sync", "Restarted with {} ({})", self.best_queued_number, self.best_queued_hash); + } + Err(e) => { + debug!(target:"sync", "Error reading blockchain: {:?}", e); + self.best_queued_hash = self.genesis_hash; + self.best_queued_number = As::sa(0); + } + } + let ids: Vec = self.peers.drain().map(|(id, _)| id).collect(); + for id in ids { + self.new_peer(protocol, id); + } + } + + /// Clear all sync data. + pub(crate) fn clear(&mut self) { + self.blocks.clear(); + self.peers.clear(); + } + + // Download old block with known parent. + fn download_stale(&mut self, protocol: &mut Context, who: PeerId, hash: &B::Hash) { + if let Some(ref mut peer) = self.peers.get_mut(&who) { + match peer.state { + PeerSyncState::Available => { + let request = message::generic::BlockRequest { + id: 0, + fields: self.required_block_attributes.clone(), + from: message::FromBlock::Hash(*hash), + to: None, + direction: message::Direction::Ascending, + max: Some(1), + }; + peer.state = PeerSyncState::DownloadingStale(*hash); + protocol.send_message(who, GenericMessage::BlockRequest(request)); + } + _ => (), + } + } + } + + // Download old block with unknown parent. + fn download_unknown_stale(&mut self, protocol: &mut Context, who: PeerId, hash: &B::Hash) { + if let Some(ref mut peer) = self.peers.get_mut(&who) { + match peer.state { + PeerSyncState::Available => { + let request = message::generic::BlockRequest { + id: 0, + fields: self.required_block_attributes.clone(), + from: message::FromBlock::Hash(*hash), + to: None, + direction: message::Direction::Descending, + max: Some(MAX_UNKNOWN_FORK_DOWNLOAD_LEN), + }; + peer.state = PeerSyncState::DownloadingStale(*hash); + protocol.send_message(who, GenericMessage::BlockRequest(request)); + } + _ => (), + } + } + } + + // Issue a request for a peer to download new blocks, if any are available + fn download_new(&mut self, protocol: &mut Context, who: PeerId) { + if let Some(ref mut peer) = self.peers.get_mut(&who) { + // when there are too many blocks in the queue => do not try to download new blocks + if self.queue_blocks.len() > MAX_IMPORTING_BLOCKS { + trace!(target: "sync", "Too many blocks in the queue."); + return; + } + match peer.state { + PeerSyncState::Available => { + trace!(target: "sync", "Considering new block download from {}, common block is {}, best is {:?}", who, peer.common_number, peer.best_number); + if let Some(range) = self.blocks.needed_blocks( + who.clone(), + MAX_BLOCKS_TO_REQUEST, + peer.best_number, + peer.common_number, + ) { + trace!(target: "sync", "Requesting blocks from {}, ({} to {})", who, range.start, range.end); + let request = message::generic::BlockRequest { + id: 0, + fields: self.required_block_attributes.clone(), + from: message::FromBlock::Number(range.start), + to: None, + direction: message::Direction::Ascending, + max: Some((range.end - range.start).as_() as u32), + }; + peer.state = PeerSyncState::DownloadingNew(range.start); + protocol.send_message(who, GenericMessage::BlockRequest(request)); + } else { + trace!(target: "sync", "Nothing to request"); + } + } + _ => trace!(target: "sync", "Peer {} is busy", who), + } + } + } + + fn request_ancestry(protocol: &mut Context, who: PeerId, block: NumberFor) { + trace!(target: "sync", "Requesting ancestry block #{} from {}", block, who); + let request = message::generic::BlockRequest { + id: 0, + fields: message::BlockAttributes::HEADER | message::BlockAttributes::JUSTIFICATION, + from: message::FromBlock::Number(block), + to: None, + direction: message::Direction::Ascending, + max: Some(1), + }; + protocol.send_message(who, GenericMessage::BlockRequest(request)); + } } /// Get block status, taking into account import queue. fn block_status( - chain: &crate::chain::Client, - queue_blocks: &HashSet, - hash: B::Hash) -> Result -{ - if queue_blocks.contains(&hash) { - return Ok(BlockStatus::Queued); - } - - chain.block_status(&BlockId::Hash(hash)) + chain: &crate::chain::Client, + queue_blocks: &HashSet, + hash: B::Hash, +) -> Result { + if queue_blocks.contains(&hash) { + return Ok(BlockStatus::Queued); + } + + chain.block_status(&BlockId::Hash(hash)) } diff --git a/core/network/src/test/block_import.rs b/core/network/src/test/block_import.rs index 3b5e44cc47..1ea28ee1d1 100644 --- a/core/network/src/test/block_import.rs +++ b/core/network/src/test/block_import.rs @@ -16,69 +16,107 @@ //! Testing block import logic. -use consensus::import_queue::{import_single_block, BasicQueue, BlockImportError, BlockImportResult}; -use test_client::{self, TestClient}; -use test_client::runtime::{Block, Hash}; -use runtime_primitives::generic::BlockId; use super::*; +use consensus::import_queue::{ + import_single_block, BasicQueue, BlockImportError, BlockImportResult, +}; +use runtime_primitives::generic::BlockId; +use test_client::runtime::{Block, Hash}; +use test_client::{self, TestClient}; struct TestLink {} impl Link for TestLink {} -fn prepare_good_block() -> (client::Client, Hash, u64, PeerId, IncomingBlock) { - let client = test_client::new(); - let block = client.new_block().unwrap().bake().unwrap(); - client.import(BlockOrigin::File, block).unwrap(); +fn prepare_good_block() -> ( + client::Client< + test_client::Backend, + test_client::Executor, + Block, + test_client::runtime::RuntimeApi, + >, + Hash, + u64, + PeerId, + IncomingBlock, +) { + let client = test_client::new(); + let block = client.new_block().unwrap().bake().unwrap(); + client.import(BlockOrigin::File, block).unwrap(); - let (hash, number) = (client.block_hash(1).unwrap().unwrap(), 1); - let header = client.header(&BlockId::Number(1)).unwrap(); - let justification = client.justification(&BlockId::Number(1)).unwrap(); - let peer_id = PeerId::random(); - (client, hash, number, peer_id.clone(), IncomingBlock { - hash, - header, - body: None, - justification, - origin: Some(peer_id.clone()) - }) + let (hash, number) = (client.block_hash(1).unwrap().unwrap(), 1); + let header = client.header(&BlockId::Number(1)).unwrap(); + let justification = client.justification(&BlockId::Number(1)).unwrap(); + let peer_id = PeerId::random(); + ( + client, + hash, + number, + peer_id.clone(), + IncomingBlock { + hash, + header, + body: None, + justification, + origin: Some(peer_id.clone()), + }, + ) } #[test] fn import_single_good_block_works() { - let (_, _hash, number, peer_id, block) = prepare_good_block(); - assert_eq!( - import_single_block(&test_client::new(), BlockOrigin::File, block, Arc::new(PassThroughVerifier(true))), - Ok(BlockImportResult::ImportedUnknown(number, Default::default(), Some(peer_id))) - ); + let (_, _hash, number, peer_id, block) = prepare_good_block(); + assert_eq!( + import_single_block( + &test_client::new(), + BlockOrigin::File, + block, + Arc::new(PassThroughVerifier(true)) + ), + Ok(BlockImportResult::ImportedUnknown( + number, + Default::default(), + Some(peer_id) + )) + ); } #[test] fn import_single_good_known_block_is_ignored() { - let (client, _hash, number, _, block) = prepare_good_block(); - assert_eq!( - import_single_block(&client, BlockOrigin::File, block, Arc::new(PassThroughVerifier(true))), - Ok(BlockImportResult::ImportedKnown(number)) - ); + let (client, _hash, number, _, block) = prepare_good_block(); + assert_eq!( + import_single_block( + &client, + BlockOrigin::File, + block, + Arc::new(PassThroughVerifier(true)) + ), + Ok(BlockImportResult::ImportedKnown(number)) + ); } #[test] fn import_single_good_block_without_header_fails() { - let (_, _, _, peer_id, mut block) = prepare_good_block(); - block.header = None; - assert_eq!( - import_single_block(&test_client::new(), BlockOrigin::File, block, Arc::new(PassThroughVerifier(true))), - Err(BlockImportError::IncompleteHeader(Some(peer_id))) - ); + let (_, _, _, peer_id, mut block) = prepare_good_block(); + block.header = None; + assert_eq!( + import_single_block( + &test_client::new(), + BlockOrigin::File, + block, + Arc::new(PassThroughVerifier(true)) + ), + Err(BlockImportError::IncompleteHeader(Some(peer_id))) + ); } #[test] fn async_import_queue_drops() { - // Perform this test multiple times since it exhibits non-deterministic behavior. - for _ in 0..100 { - let verifier = Arc::new(PassThroughVerifier(true)); - let queue = BasicQueue::new(verifier, Arc::new(test_client::new()), None); - queue.start(Box::new(TestLink{})).unwrap(); - drop(queue); - } + // Perform this test multiple times since it exhibits non-deterministic behavior. + for _ in 0..100 { + let verifier = Arc::new(PassThroughVerifier(true)); + let queue = BasicQueue::new(verifier, Arc::new(test_client::new()), None); + queue.start(Box::new(TestLink {})).unwrap(); + drop(queue); + } } diff --git a/core/network/src/test/mod.rs b/core/network/src/test/mod.rs index b4b1c4f4e2..c2e6d1a52f 100644 --- a/core/network/src/test/mod.rs +++ b/core/network/src/test/mod.rs @@ -22,34 +22,38 @@ mod block_import; mod sync; use std::collections::{HashMap, HashSet}; -use std::sync::Arc; use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; use std::thread; use std::time::Duration; -use log::trace; +use crate::config::ProtocolConfig; +use crate::consensus_gossip::ConsensusGossip; +use crate::message::Message; +use crate::protocol::{ConnectedPeer, Context, FromNetworkMsg, Protocol, ProtocolMsg}; +use crate::service::{ + network_channel, NetworkChan, NetworkLink, NetworkMsg, NetworkPort, TransactionPool, +}; +use crate::specialization::NetworkSpecialization; use client; use client::block_builder::BlockBuilder; -use crate::config::ProtocolConfig; use consensus::import_queue::{BasicQueue, ImportQueue, IncomingBlock}; use consensus::import_queue::{Link, SharedBlockImport, SharedJustificationImport, Verifier}; -use consensus::{Error as ConsensusError, ErrorKind as ConsensusErrorKind}; use consensus::{BlockOrigin, ForkChoiceStrategy, ImportBlock, JustificationImport}; -use crate::consensus_gossip::ConsensusGossip; -use crossbeam_channel::{self as channel, Sender, select}; -use futures::Future; +use consensus::{Error as ConsensusError, ErrorKind as ConsensusErrorKind}; +use crossbeam_channel::{self as channel, select, Sender}; use futures::sync::{mpsc, oneshot}; -use crate::message::Message; +use futures::Future; +use log::trace; use network_libp2p::PeerId; use parity_codec::Encode; use parking_lot::{Mutex, RwLock}; -use primitives::{H256, ed25519::Public as AuthorityId}; -use crate::protocol::{ConnectedPeer, Context, FromNetworkMsg, Protocol, ProtocolMsg}; +use primitives::{ed25519::Public as AuthorityId, H256}; use runtime_primitives::generic::BlockId; -use runtime_primitives::traits::{AuthorityIdFor, Block as BlockT, Digest, DigestItem, Header, NumberFor}; -use runtime_primitives::{Justification, ConsensusEngineId}; -use crate::service::{network_channel, NetworkChan, NetworkLink, NetworkMsg, NetworkPort, TransactionPool}; -use crate::specialization::NetworkSpecialization; +use runtime_primitives::traits::{ + AuthorityIdFor, Block as BlockT, Digest, DigestItem, Header, NumberFor, +}; +use runtime_primitives::{ConsensusEngineId, Justification}; use test_client::{self, AccountKeyring}; pub use test_client::runtime::{Block, Extrinsic, Hash, Transfer}; @@ -63,450 +67,511 @@ pub struct PassThroughVerifier(pub bool); #[cfg(any(test, feature = "test-helpers"))] /// This Verifiyer accepts all data as valid impl Verifier for PassThroughVerifier { - fn verify( - &self, - origin: BlockOrigin, - header: B::Header, - justification: Option, - body: Option> - ) -> Result<(ImportBlock, Option>>), String> { - let new_authorities = header.digest().log(DigestItem::as_authorities_change) - .map(|auth| auth.iter().cloned().collect()); - - Ok((ImportBlock { - origin, - header, - body, - finalized: self.0, - justification, - post_digests: vec![], - auxiliary: Vec::new(), - fork_choice: ForkChoiceStrategy::LongestChain, - }, new_authorities)) - } + fn verify( + &self, + origin: BlockOrigin, + header: B::Header, + justification: Option, + body: Option>, + ) -> Result<(ImportBlock, Option>>), String> { + let new_authorities = header + .digest() + .log(DigestItem::as_authorities_change) + .map(|auth| auth.iter().cloned().collect()); + + Ok(( + ImportBlock { + origin, + header, + body, + finalized: self.0, + justification, + post_digests: vec![], + auxiliary: Vec::new(), + fork_choice: ForkChoiceStrategy::LongestChain, + }, + new_authorities, + )) + } } /// A link implementation that does nothing. -pub struct NoopLink { } +pub struct NoopLink {} -impl Link for NoopLink { } +impl Link for NoopLink {} /// The test specialization. #[derive(Clone)] pub struct DummySpecialization; impl NetworkSpecialization for DummySpecialization { - fn status(&self) -> Vec { - vec![] - } - - fn on_connect(&mut self, _ctx: &mut Context, _peer_id: PeerId, _status: crate::message::Status) { - } - - fn on_disconnect(&mut self, _ctx: &mut Context, _peer_id: PeerId) { - } - - fn on_message( - &mut self, - _ctx: &mut Context, - _peer_id: PeerId, - _message: &mut Option>, - ) { - } + fn status(&self) -> Vec { + vec![] + } + + fn on_connect( + &mut self, + _ctx: &mut Context, + _peer_id: PeerId, + _status: crate::message::Status, + ) { + } + + fn on_disconnect(&mut self, _ctx: &mut Context, _peer_id: PeerId) {} + + fn on_message( + &mut self, + _ctx: &mut Context, + _peer_id: PeerId, + _message: &mut Option>, + ) { + } } -pub type PeersClient = client::Client; +pub type PeersClient = client::Client< + test_client::Backend, + test_client::Executor, + Block, + test_client::runtime::RuntimeApi, +>; #[derive(Clone)] /// A Link that can wait for a block to have been imported. pub struct TestLink + Clone> { - import_done: Arc, - hash: Arc>, - link: NetworkLink, + import_done: Arc, + hash: Arc>, + link: NetworkLink, } impl + Clone> TestLink { - fn new( - protocol_sender: Sender>, - network_sender: NetworkChan - ) -> TestLink { - TestLink { - import_done: Arc::new(AtomicBool::new(false)), - hash: Arc::new(Mutex::new(Default::default())), - link: NetworkLink { - protocol_sender, - network_sender, - } - } - } - - /// Set the hash which will be awaited for import. - fn with_hash(&self, hash: Hash) { - self.import_done.store(false, Ordering::SeqCst); - *self.hash.lock() = hash; - } - - /// Simulate a synchronous import. - fn wait_for_import(&self) { - while !self.import_done.load(Ordering::SeqCst) { - thread::sleep(Duration::from_millis(20)); - } - } + fn new( + protocol_sender: Sender>, + network_sender: NetworkChan, + ) -> TestLink { + TestLink { + import_done: Arc::new(AtomicBool::new(false)), + hash: Arc::new(Mutex::new(Default::default())), + link: NetworkLink { + protocol_sender, + network_sender, + }, + } + } + + /// Set the hash which will be awaited for import. + fn with_hash(&self, hash: Hash) { + self.import_done.store(false, Ordering::SeqCst); + *self.hash.lock() = hash; + } + + /// Simulate a synchronous import. + fn wait_for_import(&self) { + while !self.import_done.load(Ordering::SeqCst) { + thread::sleep(Duration::from_millis(20)); + } + } } impl + Clone> Link for TestLink { - fn block_imported(&self, hash: &Hash, number: NumberFor) { - if hash == &*self.hash.lock() { - self.import_done.store(true, Ordering::SeqCst); - } - self.link.block_imported(hash, number); - } - - fn blocks_processed(&self, processed_blocks: Vec, has_error: bool) { - self.link.blocks_processed(processed_blocks, has_error); - } - - fn justification_imported(&self, who: PeerId, hash: &Hash, number:NumberFor, success: bool) { - self.link.justification_imported(who, hash, number, success); - } - - fn request_justification(&self, hash: &Hash, number: NumberFor) { - self.link.request_justification(hash, number); - } - - fn useless_peer(&self, who: PeerId, reason: &str) { - self.link.useless_peer(who, reason); - } - - fn note_useless_and_restart_sync(&self, who: PeerId, reason: &str) { - self.link.note_useless_and_restart_sync(who, reason); - } - - fn restart(&self) { - self.link.restart(); - } + fn block_imported(&self, hash: &Hash, number: NumberFor) { + if hash == &*self.hash.lock() { + self.import_done.store(true, Ordering::SeqCst); + } + self.link.block_imported(hash, number); + } + + fn blocks_processed(&self, processed_blocks: Vec, has_error: bool) { + self.link.blocks_processed(processed_blocks, has_error); + } + + fn justification_imported( + &self, + who: PeerId, + hash: &Hash, + number: NumberFor, + success: bool, + ) { + self.link.justification_imported(who, hash, number, success); + } + + fn request_justification(&self, hash: &Hash, number: NumberFor) { + self.link.request_justification(hash, number); + } + + fn useless_peer(&self, who: PeerId, reason: &str) { + self.link.useless_peer(who, reason); + } + + fn note_useless_and_restart_sync(&self, who: PeerId, reason: &str) { + self.link.note_useless_and_restart_sync(who, reason); + } + + fn restart(&self) { + self.link.restart(); + } } pub struct Peer + Clone> { - pub is_offline: Arc, - pub is_major_syncing: Arc, - pub peers: Arc>>>, - pub peer_id: PeerId, - client: Arc, - network_to_protocol_sender: Sender>, - pub protocol_sender: Sender>, - network_link: TestLink, - network_port: Arc>>, - pub import_queue: Box>, - pub data: D, - best_hash: Mutex>, - finalized_hash: Mutex>, + pub is_offline: Arc, + pub is_major_syncing: Arc, + pub peers: Arc>>>, + pub peer_id: PeerId, + client: Arc, + network_to_protocol_sender: Sender>, + pub protocol_sender: Sender>, + network_link: TestLink, + network_port: Arc>>, + pub import_queue: Box>, + pub data: D, + best_hash: Mutex>, + finalized_hash: Mutex>, } impl + Clone> Peer { - fn new( - is_offline: Arc, - is_major_syncing: Arc, - peers: Arc>>>, - client: Arc, - import_queue: Box>, - network_to_protocol_sender: Sender>, - protocol_sender: Sender>, - network_sender: NetworkChan, - network_port: NetworkPort, - data: D, - ) -> Self { - let network_port = Arc::new(Mutex::new(network_port)); - let network_link = TestLink::new(protocol_sender.clone(), network_sender.clone()); - import_queue.start(Box::new(network_link.clone())).expect("Test ImportQueue always starts"); - Peer { - is_offline, - is_major_syncing, - peers, - peer_id: PeerId::random(), - client, - network_to_protocol_sender, - protocol_sender, - import_queue, - network_link, - network_port, - data, - best_hash: Mutex::new(None), - finalized_hash: Mutex::new(None), - } - } - /// Called after blockchain has been populated to updated current state. - fn start(&self) { - // Update the sync state to the latest chain state. - let info = self.client.info().expect("In-mem client does not fail"); - let header = self - .client - .header(&BlockId::Hash(info.chain.best_hash)) - .unwrap() - .unwrap(); - let _ = self - .protocol_sender - .send(ProtocolMsg::BlockImported(info.chain.best_hash, header)); - } - - pub fn on_block_imported( - &self, - hash: ::Hash, - header: &::Header, - ) { - let _ = self - .protocol_sender - .send(ProtocolMsg::BlockImported(hash, header.clone())); - } - - // SyncOracle: are we connected to any peer? - #[cfg(test)] - fn is_offline(&self) -> bool { - self.is_offline.load(Ordering::Relaxed) - } - - // SyncOracle: are we in the process of catching-up with the chain? - #[cfg(test)] - fn is_major_syncing(&self) -> bool { - self.is_major_syncing.load(Ordering::Relaxed) - } - - /// Called on connection to other indicated peer. - fn on_connect(&self, other: &Self) { - let _ = self.network_to_protocol_sender.send(FromNetworkMsg::PeerConnected(other.peer_id.clone(), String::new())); - } - - /// Called on disconnect from other indicated peer. - fn on_disconnect(&self, other: &Self) { - let _ = self - .network_to_protocol_sender - .send(FromNetworkMsg::PeerDisconnected(other.peer_id.clone(), String::new())); - } - - /// Receive a message from another peer. Return a set of peers to disconnect. - fn receive_message(&self, from: &Self, msg: Message) { - let _ = self - .network_to_protocol_sender - .send(FromNetworkMsg::CustomMessage(from.peer_id.clone(), msg)); - } - - /// Produce the next pending message to send to another peer. - fn pending_message(&self) -> Option> { - select! { - recv(self.network_port.lock().receiver()) -> msg => return msg.ok(), - // If there are no messages ready, give protocol a change to send one. - recv(channel::after(Duration::from_millis(100))) -> _ => return None, - } - } - - /// Produce the next pending message to send to another peer, without waiting. - fn pending_message_fast(&self) -> Option> { - self.network_port.lock().receiver().try_recv().ok() - } - - /// Whether this peer is done syncing (has no messages to send). - fn is_done(&self) -> bool { - self.network_port.lock().receiver().is_empty() - } - - /// Execute a "sync step". This is called for each peer after it sends a packet. - fn sync_step(&self) { - let _ = self.protocol_sender.send(ProtocolMsg::Tick); - } - - /// Send block import notifications. - fn send_import_notifications(&self) { - let info = self.client.info().expect("In-mem client does not fail"); - - let mut best_hash = self.best_hash.lock(); - match *best_hash { - None => {}, - Some(hash) if hash != info.chain.best_hash => {}, - _ => return, - } - - let header = self.client.header(&BlockId::Hash(info.chain.best_hash)).unwrap().unwrap(); - let _ = self - .protocol_sender - .send(ProtocolMsg::BlockImported(info.chain.best_hash, header)); - - *best_hash = Some(info.chain.best_hash); - } - - /// Send block finalization notifications. - pub fn send_finality_notifications(&self) { - let info = self.client.info().expect("In-mem client does not fail"); - - let mut finalized_hash = self.finalized_hash.lock(); - match *finalized_hash { - None => {}, - Some(hash) if hash != info.chain.finalized_hash => {}, - _ => return, - } - - let header = self.client.header(&BlockId::Hash(info.chain.finalized_hash)).unwrap().unwrap(); - let _ = self - .protocol_sender - .send(ProtocolMsg::BlockFinalized(info.chain.finalized_hash, header.clone())); - - *finalized_hash = Some(info.chain.finalized_hash); - } - - /// Restart sync for a peer. - fn restart_sync(&self) { - let _ = self.protocol_sender.send(ProtocolMsg::Abort); - } - - /// Push a message into the gossip network and relay to peers. - /// `TestNet::sync_step` needs to be called to ensure it's propagated. - pub fn gossip_message( - &self, - topic: ::Hash, - engine_id: ConsensusEngineId, - data: Vec, - force: bool, - ) { - let _ = self - .protocol_sender - .send(ProtocolMsg::GossipConsensusMessage(topic, engine_id, data, force)); - } - - pub fn consensus_gossip_collect_garbage_for_topic(&self, _topic: ::Hash) { - self.with_gossip(move |gossip, _| gossip.collect_garbage()) - } - - /// access the underlying consensus gossip handler - pub fn consensus_gossip_messages_for( - &self, - engine_id: ConsensusEngineId, - topic: ::Hash, - ) -> mpsc::UnboundedReceiver> { - let (tx, rx) = oneshot::channel(); - self.with_gossip(move |gossip, _| { - let inner_rx = gossip.messages_for(engine_id, topic); - let _ = tx.send(inner_rx); - }); - rx.wait().ok().expect("1. Network is running, 2. it should handle the above closure successfully") - } - - /// Execute a closure with the consensus gossip. - pub fn with_gossip(&self, f: F) - where F: FnOnce(&mut ConsensusGossip, &mut Context) + Send + 'static - { - let _ = self - .protocol_sender - .send(ProtocolMsg::ExecuteWithGossip(Box::new(f))); - } - - /// Announce a block to peers. - pub fn announce_block(&self, block: Hash) { - let _ = self.protocol_sender.send(ProtocolMsg::AnnounceBlock(block)); - } - - /// Request a justification for the given block. - #[cfg(test)] - fn request_justification(&self, hash: &::primitives::H256, number: NumberFor) { - let _ = self - .protocol_sender - .send(ProtocolMsg::RequestJustification(hash.clone(), number)); - } - - /// Add blocks to the peer -- edit the block before adding - pub fn generate_blocks(&self, count: usize, origin: BlockOrigin, edit_block: F) -> H256 - where F: FnMut(BlockBuilder) -> Block - { - let best_hash = self.client.info().unwrap().chain.best_hash; - self.generate_blocks_at(BlockId::Hash(best_hash), count, origin, edit_block) - } - - /// Add blocks to the peer -- edit the block before adding. The chain will - /// start at the given block iD. - pub fn generate_blocks_at(&self, at: BlockId, count: usize, origin: BlockOrigin, mut edit_block: F) -> H256 - where F: FnMut(BlockBuilder) -> Block - { - let mut at = self.client.header(&at).unwrap().unwrap().hash(); - for _ in 0..count { - let builder = self.client.new_block_at(&BlockId::Hash(at)).unwrap(); - let block = edit_block(builder); - let hash = block.header.hash(); - trace!( - "Generating {}, (#{}, parent={})", - hash, - block.header.number, - block.header.parent_hash - ); - let header = block.header.clone(); - at = hash; - self.network_link.with_hash(hash); - self.import_queue.import_blocks( - origin, - vec![IncomingBlock { - origin: None, - hash, - header: Some(header), - body: Some(block.extrinsics), - justification: None, - }], - ); - // Simulate a sync import. - self.network_link.wait_for_import(); - } - at - } - - /// Push blocks to the peer (simplified: with or without a TX) - pub fn push_blocks(&self, count: usize, with_tx: bool) -> H256 { - let best_hash = self.client.info().unwrap().chain.best_hash; - self.push_blocks_at(BlockId::Hash(best_hash), count, with_tx) - } - - /// Push blocks to the peer (simplified: with or without a TX) starting from - /// given hash. - pub fn push_blocks_at(&self, at: BlockId, count: usize, with_tx: bool) -> H256 { - let mut nonce = 0; - if with_tx { - self.generate_blocks_at(at, count, BlockOrigin::File, |mut builder| { - let transfer = Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Alice.into(), - amount: 1, - nonce, - }; - let signature = AccountKeyring::from_public(&transfer.from).unwrap().sign(&transfer.encode()).into(); - builder.push(Extrinsic::Transfer(transfer, signature)).unwrap(); - nonce = nonce + 1; - builder.bake().unwrap() - }) - } else { - self.generate_blocks_at(at, count, BlockOrigin::File, |builder| builder.bake().unwrap()) - } - } - - pub fn push_authorities_change_block(&self, new_authorities: Vec) -> H256 { - self.generate_blocks(1, BlockOrigin::File, |mut builder| { - builder.push(Extrinsic::AuthoritiesChange(new_authorities.clone())).unwrap(); - builder.bake().unwrap() - }) - } - - /// Get a reference to the client. - pub fn client(&self) -> &Arc { - &self.client - } + fn new( + is_offline: Arc, + is_major_syncing: Arc, + peers: Arc>>>, + client: Arc, + import_queue: Box>, + network_to_protocol_sender: Sender>, + protocol_sender: Sender>, + network_sender: NetworkChan, + network_port: NetworkPort, + data: D, + ) -> Self { + let network_port = Arc::new(Mutex::new(network_port)); + let network_link = TestLink::new(protocol_sender.clone(), network_sender.clone()); + import_queue + .start(Box::new(network_link.clone())) + .expect("Test ImportQueue always starts"); + Peer { + is_offline, + is_major_syncing, + peers, + peer_id: PeerId::random(), + client, + network_to_protocol_sender, + protocol_sender, + import_queue, + network_link, + network_port, + data, + best_hash: Mutex::new(None), + finalized_hash: Mutex::new(None), + } + } + /// Called after blockchain has been populated to updated current state. + fn start(&self) { + // Update the sync state to the latest chain state. + let info = self.client.info().expect("In-mem client does not fail"); + let header = self + .client + .header(&BlockId::Hash(info.chain.best_hash)) + .unwrap() + .unwrap(); + let _ = self + .protocol_sender + .send(ProtocolMsg::BlockImported(info.chain.best_hash, header)); + } + + pub fn on_block_imported( + &self, + hash: ::Hash, + header: &::Header, + ) { + let _ = self + .protocol_sender + .send(ProtocolMsg::BlockImported(hash, header.clone())); + } + + // SyncOracle: are we connected to any peer? + #[cfg(test)] + fn is_offline(&self) -> bool { + self.is_offline.load(Ordering::Relaxed) + } + + // SyncOracle: are we in the process of catching-up with the chain? + #[cfg(test)] + fn is_major_syncing(&self) -> bool { + self.is_major_syncing.load(Ordering::Relaxed) + } + + /// Called on connection to other indicated peer. + fn on_connect(&self, other: &Self) { + let _ = self + .network_to_protocol_sender + .send(FromNetworkMsg::PeerConnected( + other.peer_id.clone(), + String::new(), + )); + } + + /// Called on disconnect from other indicated peer. + fn on_disconnect(&self, other: &Self) { + let _ = self + .network_to_protocol_sender + .send(FromNetworkMsg::PeerDisconnected( + other.peer_id.clone(), + String::new(), + )); + } + + /// Receive a message from another peer. Return a set of peers to disconnect. + fn receive_message(&self, from: &Self, msg: Message) { + let _ = self + .network_to_protocol_sender + .send(FromNetworkMsg::CustomMessage(from.peer_id.clone(), msg)); + } + + /// Produce the next pending message to send to another peer. + fn pending_message(&self) -> Option> { + select! { + recv(self.network_port.lock().receiver()) -> msg => return msg.ok(), + // If there are no messages ready, give protocol a change to send one. + recv(channel::after(Duration::from_millis(100))) -> _ => return None, + } + } + + /// Produce the next pending message to send to another peer, without waiting. + fn pending_message_fast(&self) -> Option> { + self.network_port.lock().receiver().try_recv().ok() + } + + /// Whether this peer is done syncing (has no messages to send). + fn is_done(&self) -> bool { + self.network_port.lock().receiver().is_empty() + } + + /// Execute a "sync step". This is called for each peer after it sends a packet. + fn sync_step(&self) { + let _ = self.protocol_sender.send(ProtocolMsg::Tick); + } + + /// Send block import notifications. + fn send_import_notifications(&self) { + let info = self.client.info().expect("In-mem client does not fail"); + + let mut best_hash = self.best_hash.lock(); + match *best_hash { + None => {} + Some(hash) if hash != info.chain.best_hash => {} + _ => return, + } + + let header = self + .client + .header(&BlockId::Hash(info.chain.best_hash)) + .unwrap() + .unwrap(); + let _ = self + .protocol_sender + .send(ProtocolMsg::BlockImported(info.chain.best_hash, header)); + + *best_hash = Some(info.chain.best_hash); + } + + /// Send block finalization notifications. + pub fn send_finality_notifications(&self) { + let info = self.client.info().expect("In-mem client does not fail"); + + let mut finalized_hash = self.finalized_hash.lock(); + match *finalized_hash { + None => {} + Some(hash) if hash != info.chain.finalized_hash => {} + _ => return, + } + + let header = self + .client + .header(&BlockId::Hash(info.chain.finalized_hash)) + .unwrap() + .unwrap(); + let _ = self.protocol_sender.send(ProtocolMsg::BlockFinalized( + info.chain.finalized_hash, + header.clone(), + )); + + *finalized_hash = Some(info.chain.finalized_hash); + } + + /// Restart sync for a peer. + fn restart_sync(&self) { + let _ = self.protocol_sender.send(ProtocolMsg::Abort); + } + + /// Push a message into the gossip network and relay to peers. + /// `TestNet::sync_step` needs to be called to ensure it's propagated. + pub fn gossip_message( + &self, + topic: ::Hash, + engine_id: ConsensusEngineId, + data: Vec, + force: bool, + ) { + let _ = self + .protocol_sender + .send(ProtocolMsg::GossipConsensusMessage( + topic, engine_id, data, force, + )); + } + + pub fn consensus_gossip_collect_garbage_for_topic(&self, _topic: ::Hash) { + self.with_gossip(move |gossip, _| gossip.collect_garbage()) + } + + /// access the underlying consensus gossip handler + pub fn consensus_gossip_messages_for( + &self, + engine_id: ConsensusEngineId, + topic: ::Hash, + ) -> mpsc::UnboundedReceiver> { + let (tx, rx) = oneshot::channel(); + self.with_gossip(move |gossip, _| { + let inner_rx = gossip.messages_for(engine_id, topic); + let _ = tx.send(inner_rx); + }); + rx.wait() + .ok() + .expect("1. Network is running, 2. it should handle the above closure successfully") + } + + /// Execute a closure with the consensus gossip. + pub fn with_gossip(&self, f: F) + where + F: FnOnce(&mut ConsensusGossip, &mut Context) + Send + 'static, + { + let _ = self + .protocol_sender + .send(ProtocolMsg::ExecuteWithGossip(Box::new(f))); + } + + /// Announce a block to peers. + pub fn announce_block(&self, block: Hash) { + let _ = self.protocol_sender.send(ProtocolMsg::AnnounceBlock(block)); + } + + /// Request a justification for the given block. + #[cfg(test)] + fn request_justification(&self, hash: &::primitives::H256, number: NumberFor) { + let _ = self + .protocol_sender + .send(ProtocolMsg::RequestJustification(hash.clone(), number)); + } + + /// Add blocks to the peer -- edit the block before adding + pub fn generate_blocks(&self, count: usize, origin: BlockOrigin, edit_block: F) -> H256 + where + F: FnMut(BlockBuilder) -> Block, + { + let best_hash = self.client.info().unwrap().chain.best_hash; + self.generate_blocks_at(BlockId::Hash(best_hash), count, origin, edit_block) + } + + /// Add blocks to the peer -- edit the block before adding. The chain will + /// start at the given block iD. + pub fn generate_blocks_at( + &self, + at: BlockId, + count: usize, + origin: BlockOrigin, + mut edit_block: F, + ) -> H256 + where + F: FnMut(BlockBuilder) -> Block, + { + let mut at = self.client.header(&at).unwrap().unwrap().hash(); + for _ in 0..count { + let builder = self.client.new_block_at(&BlockId::Hash(at)).unwrap(); + let block = edit_block(builder); + let hash = block.header.hash(); + trace!( + "Generating {}, (#{}, parent={})", + hash, + block.header.number, + block.header.parent_hash + ); + let header = block.header.clone(); + at = hash; + self.network_link.with_hash(hash); + self.import_queue.import_blocks( + origin, + vec![IncomingBlock { + origin: None, + hash, + header: Some(header), + body: Some(block.extrinsics), + justification: None, + }], + ); + // Simulate a sync import. + self.network_link.wait_for_import(); + } + at + } + + /// Push blocks to the peer (simplified: with or without a TX) + pub fn push_blocks(&self, count: usize, with_tx: bool) -> H256 { + let best_hash = self.client.info().unwrap().chain.best_hash; + self.push_blocks_at(BlockId::Hash(best_hash), count, with_tx) + } + + /// Push blocks to the peer (simplified: with or without a TX) starting from + /// given hash. + pub fn push_blocks_at(&self, at: BlockId, count: usize, with_tx: bool) -> H256 { + let mut nonce = 0; + if with_tx { + self.generate_blocks_at(at, count, BlockOrigin::File, |mut builder| { + let transfer = Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Alice.into(), + amount: 1, + nonce, + }; + let signature = AccountKeyring::from_public(&transfer.from) + .unwrap() + .sign(&transfer.encode()) + .into(); + builder + .push(Extrinsic::Transfer(transfer, signature)) + .unwrap(); + nonce = nonce + 1; + builder.bake().unwrap() + }) + } else { + self.generate_blocks_at(at, count, BlockOrigin::File, |builder| { + builder.bake().unwrap() + }) + } + } + + pub fn push_authorities_change_block(&self, new_authorities: Vec) -> H256 { + self.generate_blocks(1, BlockOrigin::File, |mut builder| { + builder + .push(Extrinsic::AuthoritiesChange(new_authorities.clone())) + .unwrap(); + builder.bake().unwrap() + }) + } + + /// Get a reference to the client. + pub fn client(&self) -> &Arc { + &self.client + } } pub struct EmptyTransactionPool; impl TransactionPool for EmptyTransactionPool { - fn transactions(&self) -> Vec<(Hash, Extrinsic)> { - Vec::new() - } + fn transactions(&self) -> Vec<(Hash, Extrinsic)> { + Vec::new() + } - fn import(&self, _transaction: &Extrinsic) -> Option { - None - } + fn import(&self, _transaction: &Extrinsic) -> Option { + None + } - fn on_broadcasted(&self, _: HashMap>) {} + fn on_broadcasted(&self, _: HashMap>) {} } pub trait SpecializationFactory { @@ -514,347 +579,384 @@ pub trait SpecializationFactory { } impl SpecializationFactory for DummySpecialization { - fn create() -> DummySpecialization { - DummySpecialization - } + fn create() -> DummySpecialization { + DummySpecialization + } } pub trait TestNetFactory: Sized { - type Specialization: NetworkSpecialization + Clone + SpecializationFactory; - type Verifier: 'static + Verifier; - type PeerData: Default; - - /// These two need to be implemented! - fn from_config(config: &ProtocolConfig) -> Self; - fn make_verifier(&self, client: Arc, config: &ProtocolConfig) -> Arc; - - /// Get reference to peer. - fn peer(&self, i: usize) -> &Peer; - fn peers(&self) -> &Vec>>; - fn mut_peers>>)>(&mut self, closure: F); - - fn started(&self) -> bool; - fn set_started(&mut self, now: bool); - - /// Get custom block import handle for fresh client, along with peer data. - fn make_block_import(&self, client: Arc) - -> (SharedBlockImport, Option>, Self::PeerData) - { - (client, None, Default::default()) - } - - fn default_config() -> ProtocolConfig { - ProtocolConfig::default() - } - - /// Create new test network with this many peers. - fn new(n: usize) -> Self { - let config = Self::default_config(); - let mut net = Self::from_config(&config); - - for _ in 0..n { - net.add_peer(&config); - } - net - } - - /// Add a peer. - fn add_peer(&mut self, config: &ProtocolConfig) { - let client = Arc::new(test_client::new()); - let tx_pool = Arc::new(EmptyTransactionPool); - let verifier = self.make_verifier(client.clone(), config); - let (block_import, justification_import, data) = self.make_block_import(client.clone()); - let (network_sender, network_port) = network_channel(); - - let import_queue = Box::new(BasicQueue::new(verifier, block_import, justification_import)); - let status_sinks = Arc::new(Mutex::new(Vec::new())); - let is_offline = Arc::new(AtomicBool::new(true)); - let is_major_syncing = Arc::new(AtomicBool::new(false)); - let specialization = self::SpecializationFactory::create(); - let peers: Arc>>> = Arc::new(Default::default()); - - let (protocol_sender, network_to_protocol_sender) = Protocol::new( - status_sinks, - is_offline.clone(), - is_major_syncing.clone(), - peers.clone(), - network_sender.clone(), - config.clone(), - client.clone(), - import_queue.clone(), - None, - tx_pool, - specialization, - ).unwrap(); - - let peer = Arc::new(Peer::new( - is_offline, - is_major_syncing, - peers, - client, - import_queue, - network_to_protocol_sender, - protocol_sender, - network_sender, - network_port, - data, - )); - - self.mut_peers(|peers| { - peers.push(peer) - }); - } - - /// Start network. - fn start(&mut self) { - if self.started() { - return; - } - for peer in self.peers() { - peer.start(); - for client in self.peers() { - if peer.peer_id != client.peer_id { - peer.on_connect(client); - } - } - } - self.route(None); - self.set_started(true); - } - - /// Do one step of routing. - fn route(&mut self, disconnected: Option>) { - self.mut_peers(move |peers| { - let mut to_disconnect = HashSet::new(); - for (peer_pos, peer) in peers.iter().enumerate() { - let packet = peer.pending_message(); - match packet { - None => continue, - Some(NetworkMsg::Outgoing(recipient, packet)) => { - let recipient = peers.iter().position(|p| p.peer_id == recipient).unwrap(); - if let Some(disconnected) = disconnected.as_ref() { - let mut current = HashSet::new(); - current.insert(peer_pos); - current.insert(recipient); - // Not routing message between "disconnected" nodes. - if disconnected.is_subset(¤t) { - continue; - } - } - peers[recipient].receive_message(peer, packet) - } - Some(NetworkMsg::ReportPeer(who, _)) => { - to_disconnect.insert(who); - } - } - } - for d in to_disconnect { - if let Some(d) = peers.iter().find(|p| p.peer_id == d) { - for peer in 0..peers.len() { - peers[peer].on_disconnect(d); - } - } - } - }); - } - - /// Route all pending outgoing messages, without waiting or disconnecting. - fn route_fast(&mut self) { - self.mut_peers(move |peers| { - for peer in 0..peers.len() { - while let Some(NetworkMsg::Outgoing(recipient, packet)) = peers[peer].pending_message_fast() { - if let Some(p) = peers.iter().find(|p| p.peer_id == recipient) { - p.receive_message(&peers[peer], packet) - } - } - } - }); - } - - /// Do a step of synchronization. - fn sync_step(&mut self) { - self.route(None); - - self.mut_peers(|peers| { - for peer in peers { - peer.sync_step(); - } - }) - } - - /// Send block import notifications for all peers. - fn send_import_notifications(&mut self) { - self.mut_peers(|peers| { - for peer in peers { - peer.send_import_notifications(); - } - }) - } - - /// Send block finalization notifications for all peers. - fn send_finality_notifications(&mut self) { - self.mut_peers(|peers| { - for peer in peers { - peer.send_finality_notifications(); - } - }) - } - - /// Restart sync for a peer. - fn restart_peer(&mut self, i: usize) { - self.peers()[i].restart_sync(); - } - - /// Perform synchronization until complete, if provided the - /// given nodes set are excluded from sync. - fn sync_with(&mut self, disconnected: Option>) -> u32 { - self.start(); - let mut total_steps = 0; - let mut done = 0; - - loop { - if done > 3 { break; } - if self.done() { - done += 1; - } else { - done = 0; - } - - self.sync_step(); - self.route(disconnected.clone()); - - total_steps += 1; - } - - total_steps - } - - /// Perform synchronization until complete. - fn sync(&mut self) -> u32 { - self.sync_with(None) - } - - /// Perform synchronization until complete, - /// excluding sync between certain nodes. - fn sync_with_disconnected(&mut self, disconnected: HashSet) -> u32 { - self.sync_with(Some(disconnected)) - } - - /// Do the given amount of sync steps. - fn sync_steps(&mut self, count: usize) { - self.start(); - for _ in 0..count { - self.sync_step(); - } - } - - /// Whether all peers have synced. - fn done(&self) -> bool { - self.peers().iter().all(|p| p.is_done()) - } + type Specialization: NetworkSpecialization + Clone + SpecializationFactory; + type Verifier: 'static + Verifier; + type PeerData: Default; + + /// These two need to be implemented! + fn from_config(config: &ProtocolConfig) -> Self; + fn make_verifier( + &self, + client: Arc, + config: &ProtocolConfig, + ) -> Arc; + + /// Get reference to peer. + fn peer(&self, i: usize) -> &Peer; + fn peers(&self) -> &Vec>>; + fn mut_peers>>)>( + &mut self, + closure: F, + ); + + fn started(&self) -> bool; + fn set_started(&mut self, now: bool); + + /// Get custom block import handle for fresh client, along with peer data. + fn make_block_import( + &self, + client: Arc, + ) -> ( + SharedBlockImport, + Option>, + Self::PeerData, + ) { + (client, None, Default::default()) + } + + fn default_config() -> ProtocolConfig { + ProtocolConfig::default() + } + + /// Create new test network with this many peers. + fn new(n: usize) -> Self { + let config = Self::default_config(); + let mut net = Self::from_config(&config); + + for _ in 0..n { + net.add_peer(&config); + } + net + } + + /// Add a peer. + fn add_peer(&mut self, config: &ProtocolConfig) { + let client = Arc::new(test_client::new()); + let tx_pool = Arc::new(EmptyTransactionPool); + let verifier = self.make_verifier(client.clone(), config); + let (block_import, justification_import, data) = self.make_block_import(client.clone()); + let (network_sender, network_port) = network_channel(); + + let import_queue = Box::new(BasicQueue::new( + verifier, + block_import, + justification_import, + )); + let status_sinks = Arc::new(Mutex::new(Vec::new())); + let is_offline = Arc::new(AtomicBool::new(true)); + let is_major_syncing = Arc::new(AtomicBool::new(false)); + let specialization = self::SpecializationFactory::create(); + let peers: Arc>>> = + Arc::new(Default::default()); + + let (protocol_sender, network_to_protocol_sender) = Protocol::new( + status_sinks, + is_offline.clone(), + is_major_syncing.clone(), + peers.clone(), + network_sender.clone(), + config.clone(), + client.clone(), + import_queue.clone(), + None, + tx_pool, + specialization, + ) + .unwrap(); + + let peer = Arc::new(Peer::new( + is_offline, + is_major_syncing, + peers, + client, + import_queue, + network_to_protocol_sender, + protocol_sender, + network_sender, + network_port, + data, + )); + + self.mut_peers(|peers| peers.push(peer)); + } + + /// Start network. + fn start(&mut self) { + if self.started() { + return; + } + for peer in self.peers() { + peer.start(); + for client in self.peers() { + if peer.peer_id != client.peer_id { + peer.on_connect(client); + } + } + } + self.route(None); + self.set_started(true); + } + + /// Do one step of routing. + fn route(&mut self, disconnected: Option>) { + self.mut_peers(move |peers| { + let mut to_disconnect = HashSet::new(); + for (peer_pos, peer) in peers.iter().enumerate() { + let packet = peer.pending_message(); + match packet { + None => continue, + Some(NetworkMsg::Outgoing(recipient, packet)) => { + let recipient = peers.iter().position(|p| p.peer_id == recipient).unwrap(); + if let Some(disconnected) = disconnected.as_ref() { + let mut current = HashSet::new(); + current.insert(peer_pos); + current.insert(recipient); + // Not routing message between "disconnected" nodes. + if disconnected.is_subset(¤t) { + continue; + } + } + peers[recipient].receive_message(peer, packet) + } + Some(NetworkMsg::ReportPeer(who, _)) => { + to_disconnect.insert(who); + } + } + } + for d in to_disconnect { + if let Some(d) = peers.iter().find(|p| p.peer_id == d) { + for peer in 0..peers.len() { + peers[peer].on_disconnect(d); + } + } + } + }); + } + + /// Route all pending outgoing messages, without waiting or disconnecting. + fn route_fast(&mut self) { + self.mut_peers(move |peers| { + for peer in 0..peers.len() { + while let Some(NetworkMsg::Outgoing(recipient, packet)) = + peers[peer].pending_message_fast() + { + if let Some(p) = peers.iter().find(|p| p.peer_id == recipient) { + p.receive_message(&peers[peer], packet) + } + } + } + }); + } + + /// Do a step of synchronization. + fn sync_step(&mut self) { + self.route(None); + + self.mut_peers(|peers| { + for peer in peers { + peer.sync_step(); + } + }) + } + + /// Send block import notifications for all peers. + fn send_import_notifications(&mut self) { + self.mut_peers(|peers| { + for peer in peers { + peer.send_import_notifications(); + } + }) + } + + /// Send block finalization notifications for all peers. + fn send_finality_notifications(&mut self) { + self.mut_peers(|peers| { + for peer in peers { + peer.send_finality_notifications(); + } + }) + } + + /// Restart sync for a peer. + fn restart_peer(&mut self, i: usize) { + self.peers()[i].restart_sync(); + } + + /// Perform synchronization until complete, if provided the + /// given nodes set are excluded from sync. + fn sync_with(&mut self, disconnected: Option>) -> u32 { + self.start(); + let mut total_steps = 0; + let mut done = 0; + + loop { + if done > 3 { + break; + } + if self.done() { + done += 1; + } else { + done = 0; + } + + self.sync_step(); + self.route(disconnected.clone()); + + total_steps += 1; + } + + total_steps + } + + /// Perform synchronization until complete. + fn sync(&mut self) -> u32 { + self.sync_with(None) + } + + /// Perform synchronization until complete, + /// excluding sync between certain nodes. + fn sync_with_disconnected(&mut self, disconnected: HashSet) -> u32 { + self.sync_with(Some(disconnected)) + } + + /// Do the given amount of sync steps. + fn sync_steps(&mut self, count: usize) { + self.start(); + for _ in 0..count { + self.sync_step(); + } + } + + /// Whether all peers have synced. + fn done(&self) -> bool { + self.peers().iter().all(|p| p.is_done()) + } } pub struct TestNet { - peers: Vec>>, - started: bool, + peers: Vec>>, + started: bool, } impl TestNetFactory for TestNet { - type Specialization = DummySpecialization; - type Verifier = PassThroughVerifier; - type PeerData = (); - - /// Create new test network with peers and given config. - fn from_config(_config: &ProtocolConfig) -> Self { - TestNet { - peers: Vec::new(), - started: false - } - } - - fn make_verifier(&self, _client: Arc, _config: &ProtocolConfig) - -> Arc - { - Arc::new(PassThroughVerifier(false)) - } - - fn peer(&self, i: usize) -> &Peer<(), Self::Specialization> { - &self.peers[i] - } - - fn peers(&self) -> &Vec>> { - &self.peers - } - - fn mut_peers>>)>(&mut self, closure: F) { - closure(&mut self.peers); - } - - fn started(&self) -> bool { - self.started - } - - fn set_started(&mut self, new: bool) { - self.started = new; - } + type Specialization = DummySpecialization; + type Verifier = PassThroughVerifier; + type PeerData = (); + + /// Create new test network with peers and given config. + fn from_config(_config: &ProtocolConfig) -> Self { + TestNet { + peers: Vec::new(), + started: false, + } + } + + fn make_verifier( + &self, + _client: Arc, + _config: &ProtocolConfig, + ) -> Arc { + Arc::new(PassThroughVerifier(false)) + } + + fn peer(&self, i: usize) -> &Peer<(), Self::Specialization> { + &self.peers[i] + } + + fn peers(&self) -> &Vec>> { + &self.peers + } + + fn mut_peers>>)>(&mut self, closure: F) { + closure(&mut self.peers); + } + + fn started(&self) -> bool { + self.started + } + + fn set_started(&mut self, new: bool) { + self.started = new; + } } pub struct ForceFinalized(Arc); impl JustificationImport for ForceFinalized { - type Error = ConsensusError; - - fn import_justification( - &self, - hash: H256, - _number: NumberFor, - justification: Justification, - ) -> Result<(), Self::Error> { - self.0.finalize_block(BlockId::Hash(hash), Some(justification), true) - .map_err(|_| ConsensusErrorKind::InvalidJustification.into()) - } + type Error = ConsensusError; + + fn import_justification( + &self, + hash: H256, + _number: NumberFor, + justification: Justification, + ) -> Result<(), Self::Error> { + self.0 + .finalize_block(BlockId::Hash(hash), Some(justification), true) + .map_err(|_| ConsensusErrorKind::InvalidJustification.into()) + } } pub struct JustificationTestNet(TestNet); impl TestNetFactory for JustificationTestNet { - type Specialization = DummySpecialization; - type Verifier = PassThroughVerifier; - type PeerData = (); - - fn from_config(config: &ProtocolConfig) -> Self { - JustificationTestNet(TestNet::from_config(config)) - } - - fn make_verifier(&self, client: Arc, config: &ProtocolConfig) - -> Arc - { - self.0.make_verifier(client, config) - } - - fn peer(&self, i: usize) -> &Peer { - self.0.peer(i) - } - - fn peers(&self) -> &Vec>> { - self.0.peers() - } - - fn mut_peers>>)>(&mut self, closure: F ) { - self.0.mut_peers(closure) - } - - fn started(&self) -> bool { - self.0.started() - } - - fn set_started(&mut self, new: bool) { - self.0.set_started(new) - } - - fn make_block_import(&self, client: Arc) - -> (SharedBlockImport, Option>, Self::PeerData) - { - (client.clone(), Some(Arc::new(ForceFinalized(client))), Default::default()) - } + type Specialization = DummySpecialization; + type Verifier = PassThroughVerifier; + type PeerData = (); + + fn from_config(config: &ProtocolConfig) -> Self { + JustificationTestNet(TestNet::from_config(config)) + } + + fn make_verifier( + &self, + client: Arc, + config: &ProtocolConfig, + ) -> Arc { + self.0.make_verifier(client, config) + } + + fn peer(&self, i: usize) -> &Peer { + self.0.peer(i) + } + + fn peers(&self) -> &Vec>> { + self.0.peers() + } + + fn mut_peers>>)>( + &mut self, + closure: F, + ) { + self.0.mut_peers(closure) + } + + fn started(&self) -> bool { + self.0.started() + } + + fn set_started(&mut self, new: bool) { + self.0.set_started(new) + } + + fn make_block_import( + &self, + client: Arc, + ) -> ( + SharedBlockImport, + Option>, + Self::PeerData, + ) { + ( + client.clone(), + Some(Arc::new(ForceFinalized(client))), + Default::default(), + ) + } } diff --git a/core/network/src/test/sync.rs b/core/network/src/test/sync.rs index 9bbf0a32b7..c091b6a26d 100644 --- a/core/network/src/test/sync.rs +++ b/core/network/src/test/sync.rs @@ -14,400 +14,637 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use super::*; +use crate::config::Roles; use client::backend::Backend; use client::blockchain::HeaderBackend as BlockchainHeaderBackend; -use crate::config::Roles; use consensus::BlockOrigin; use std::collections::HashSet; use std::thread; use std::time::Duration; -use super::*; fn test_ancestor_search_when_common_is(n: usize) { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); - - net.peer(0).push_blocks(n, false); - net.peer(1).push_blocks(n, false); - net.peer(2).push_blocks(n, false); - - net.peer(0).push_blocks(10, true); - net.peer(1).push_blocks(100, false); - net.peer(2).push_blocks(100, false); - - net.restart_peer(0); - net.sync(); - assert!(net.peer(0).client.backend().as_in_memory().blockchain() - .canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); + + net.peer(0).push_blocks(n, false); + net.peer(1).push_blocks(n, false); + net.peer(2).push_blocks(n, false); + + net.peer(0).push_blocks(10, true); + net.peer(1).push_blocks(100, false); + net.peer(2).push_blocks(100, false); + + net.restart_peer(0); + net.sync(); + assert!(net + .peer(0) + .client + .backend() + .as_in_memory() + .blockchain() + .canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); } #[test] fn sync_peers_works() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); - net.sync(); - for peer in 0..3 { - // Assert peers is up to date. - let peers = net.peer(peer).peers.read(); - assert_eq!(peers.len(), 2); - // And then disconnect. - for other in 0..3 { - if other != peer { - net.peer(peer).on_disconnect(net.peer(other)); - } - } - } - net.sync(); - // Now peers are disconnected. - for peer in 0..3 { - let peers = net.peer(peer).peers.read(); - assert_eq!(peers.len(), 0); - } + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); + net.sync(); + for peer in 0..3 { + // Assert peers is up to date. + let peers = net.peer(peer).peers.read(); + assert_eq!(peers.len(), 2); + // And then disconnect. + for other in 0..3 { + if other != peer { + net.peer(peer).on_disconnect(net.peer(other)); + } + } + } + net.sync(); + // Now peers are disconnected. + for peer in 0..3 { + let peers = net.peer(peer).peers.read(); + assert_eq!(peers.len(), 0); + } } #[test] fn sync_cycle_from_offline_to_syncing_to_offline() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); - for peer in 0..3 { - // Offline, and not major syncing. - assert!(net.peer(peer).is_offline()); - assert!(!net.peer(peer).is_major_syncing()); - } - - // Generate blocks. - net.peer(2).push_blocks(100, false); - net.start(); - net.route_fast(); - thread::sleep(Duration::from_millis(100)); - net.route_fast(); - for peer in 0..3 { - // Online - assert!(!net.peer(peer).is_offline()); - if peer < 2 { - // Major syncing. - assert!(net.peer(peer).is_major_syncing()); - } - } - net.sync(); - for peer in 0..3 { - // All done syncing. - assert!(!net.peer(peer).is_major_syncing()); - } - - // Now disconnect them all. - for peer in 0..3 { - for other in 0..3 { - if other != peer { - net.peer(peer).on_disconnect(net.peer(other)); - } - } - thread::sleep(Duration::from_millis(100)); - assert!(net.peer(peer).is_offline()); - assert!(!net.peer(peer).is_major_syncing()); - } + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); + for peer in 0..3 { + // Offline, and not major syncing. + assert!(net.peer(peer).is_offline()); + assert!(!net.peer(peer).is_major_syncing()); + } + + // Generate blocks. + net.peer(2).push_blocks(100, false); + net.start(); + net.route_fast(); + thread::sleep(Duration::from_millis(100)); + net.route_fast(); + for peer in 0..3 { + // Online + assert!(!net.peer(peer).is_offline()); + if peer < 2 { + // Major syncing. + assert!(net.peer(peer).is_major_syncing()); + } + } + net.sync(); + for peer in 0..3 { + // All done syncing. + assert!(!net.peer(peer).is_major_syncing()); + } + + // Now disconnect them all. + for peer in 0..3 { + for other in 0..3 { + if other != peer { + net.peer(peer).on_disconnect(net.peer(other)); + } + } + thread::sleep(Duration::from_millis(100)); + assert!(net.peer(peer).is_offline()); + assert!(!net.peer(peer).is_major_syncing()); + } } #[test] fn syncing_node_not_major_syncing_when_disconnected() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); - - // Generate blocks. - net.peer(2).push_blocks(100, false); - net.start(); - net.route_fast(); - thread::sleep(Duration::from_millis(100)); - net.route_fast(); - - // Peer 1 is major-syncing. - assert!(net.peer(1).is_major_syncing()); - - // Disconnect peer 1 form everyone else. - net.peer(1).on_disconnect(net.peer(0)); - net.peer(1).on_disconnect(net.peer(2)); - thread::sleep(Duration::from_millis(100)); - - // Peer 1 is not major-syncing. - assert!(!net.peer(1).is_major_syncing()); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); + + // Generate blocks. + net.peer(2).push_blocks(100, false); + net.start(); + net.route_fast(); + thread::sleep(Duration::from_millis(100)); + net.route_fast(); + + // Peer 1 is major-syncing. + assert!(net.peer(1).is_major_syncing()); + + // Disconnect peer 1 form everyone else. + net.peer(1).on_disconnect(net.peer(0)); + net.peer(1).on_disconnect(net.peer(2)); + thread::sleep(Duration::from_millis(100)); + + // Peer 1 is not major-syncing. + assert!(!net.peer(1).is_major_syncing()); } #[test] fn sync_from_two_peers_works() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); - net.peer(1).push_blocks(100, false); - net.peer(2).push_blocks(100, false); - net.sync(); - assert!(net.peer(0).client.backend().as_in_memory().blockchain() - .equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); - assert!(!net.peer(0).is_major_syncing()); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); + net.peer(1).push_blocks(100, false); + net.peer(2).push_blocks(100, false); + net.sync(); + assert!(net + .peer(0) + .client + .backend() + .as_in_memory() + .blockchain() + .equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); + assert!(!net.peer(0).is_major_syncing()); } #[test] fn sync_from_two_peers_with_ancestry_search_works() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); - net.peer(0).push_blocks(10, true); - net.peer(1).push_blocks(100, false); - net.peer(2).push_blocks(100, false); - net.restart_peer(0); - net.sync(); - assert!(net.peer(0).client.backend().as_in_memory().blockchain() - .canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); + net.peer(0).push_blocks(10, true); + net.peer(1).push_blocks(100, false); + net.peer(2).push_blocks(100, false); + net.restart_peer(0); + net.sync(); + assert!(net + .peer(0) + .client + .backend() + .as_in_memory() + .blockchain() + .canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); } #[test] fn ancestry_search_works_when_backoff_is_one() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); - - net.peer(0).push_blocks(1, false); - net.peer(1).push_blocks(2, false); - net.peer(2).push_blocks(2, false); - - net.restart_peer(0); - net.sync(); - assert!(net.peer(0).client.backend().as_in_memory().blockchain() - .canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); + + net.peer(0).push_blocks(1, false); + net.peer(1).push_blocks(2, false); + net.peer(2).push_blocks(2, false); + + net.restart_peer(0); + net.sync(); + assert!(net + .peer(0) + .client + .backend() + .as_in_memory() + .blockchain() + .canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); } #[test] fn ancestry_search_works_when_ancestor_is_genesis() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); - - net.peer(0).push_blocks(13, true); - net.peer(1).push_blocks(100, false); - net.peer(2).push_blocks(100, false); - - net.restart_peer(0); - net.sync(); - assert!(net.peer(0).client.backend().as_in_memory().blockchain() - .canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); + + net.peer(0).push_blocks(13, true); + net.peer(1).push_blocks(100, false); + net.peer(2).push_blocks(100, false); + + net.restart_peer(0); + net.sync(); + assert!(net + .peer(0) + .client + .backend() + .as_in_memory() + .blockchain() + .canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); } #[test] fn ancestry_search_works_when_common_is_one() { - test_ancestor_search_when_common_is(1); + test_ancestor_search_when_common_is(1); } #[test] fn ancestry_search_works_when_common_is_two() { - test_ancestor_search_when_common_is(2); + test_ancestor_search_when_common_is(2); } #[test] fn ancestry_search_works_when_common_is_hundred() { - test_ancestor_search_when_common_is(100); + test_ancestor_search_when_common_is(100); } #[test] fn sync_long_chain_works() { - let mut net = TestNet::new(2); - net.peer(1).push_blocks(500, false); - net.sync(); - assert!(net.peer(0).client.backend().as_in_memory().blockchain() - .equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); + let mut net = TestNet::new(2); + net.peer(1).push_blocks(500, false); + net.sync(); + assert!(net + .peer(0) + .client + .backend() + .as_in_memory() + .blockchain() + .equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); } #[test] fn sync_no_common_longer_chain_fails() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); - net.peer(0).push_blocks(20, true); - net.peer(1).push_blocks(20, false); - net.sync(); - assert!(!net.peer(0).client.backend().as_in_memory().blockchain() - .canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); + net.peer(0).push_blocks(20, true); + net.peer(1).push_blocks(20, false); + net.sync(); + assert!(!net + .peer(0) + .client + .backend() + .as_in_memory() + .blockchain() + .canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); } #[test] fn sync_justifications() { - let _ = ::env_logger::try_init(); - let mut net = JustificationTestNet::new(3); - net.peer(0).push_blocks(20, false); - net.sync(); - - // there's currently no justification for block #10 - assert_eq!(net.peer(0).client().justification(&BlockId::Number(10)).unwrap(), None); - assert_eq!(net.peer(1).client().justification(&BlockId::Number(10)).unwrap(), None); - - // we finalize block #10, #15 and #20 for peer 0 with a justification - net.peer(0).client().finalize_block(BlockId::Number(10), Some(Vec::new()), true).unwrap(); - net.peer(0).client().finalize_block(BlockId::Number(15), Some(Vec::new()), true).unwrap(); - net.peer(0).client().finalize_block(BlockId::Number(20), Some(Vec::new()), true).unwrap(); - - let h1 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap(); - let h2 = net.peer(1).client().header(&BlockId::Number(15)).unwrap().unwrap(); - let h3 = net.peer(1).client().header(&BlockId::Number(20)).unwrap().unwrap(); - - // peer 1 should get the justifications from the network - net.peer(1).request_justification(&h1.hash().into(), 10); - net.peer(1).request_justification(&h2.hash().into(), 15); - net.peer(1).request_justification(&h3.hash().into(), 20); - - net.sync(); - - for height in (10..21).step_by(5) { - assert_eq!(net.peer(0).client().justification(&BlockId::Number(height)).unwrap(), Some(Vec::new())); - assert_eq!(net.peer(1).client().justification(&BlockId::Number(height)).unwrap(), Some(Vec::new())); - } + let _ = ::env_logger::try_init(); + let mut net = JustificationTestNet::new(3); + net.peer(0).push_blocks(20, false); + net.sync(); + + // there's currently no justification for block #10 + assert_eq!( + net.peer(0) + .client() + .justification(&BlockId::Number(10)) + .unwrap(), + None + ); + assert_eq!( + net.peer(1) + .client() + .justification(&BlockId::Number(10)) + .unwrap(), + None + ); + + // we finalize block #10, #15 and #20 for peer 0 with a justification + net.peer(0) + .client() + .finalize_block(BlockId::Number(10), Some(Vec::new()), true) + .unwrap(); + net.peer(0) + .client() + .finalize_block(BlockId::Number(15), Some(Vec::new()), true) + .unwrap(); + net.peer(0) + .client() + .finalize_block(BlockId::Number(20), Some(Vec::new()), true) + .unwrap(); + + let h1 = net + .peer(1) + .client() + .header(&BlockId::Number(10)) + .unwrap() + .unwrap(); + let h2 = net + .peer(1) + .client() + .header(&BlockId::Number(15)) + .unwrap() + .unwrap(); + let h3 = net + .peer(1) + .client() + .header(&BlockId::Number(20)) + .unwrap() + .unwrap(); + + // peer 1 should get the justifications from the network + net.peer(1).request_justification(&h1.hash().into(), 10); + net.peer(1).request_justification(&h2.hash().into(), 15); + net.peer(1).request_justification(&h3.hash().into(), 20); + + net.sync(); + + for height in (10..21).step_by(5) { + assert_eq!( + net.peer(0) + .client() + .justification(&BlockId::Number(height)) + .unwrap(), + Some(Vec::new()) + ); + assert_eq!( + net.peer(1) + .client() + .justification(&BlockId::Number(height)) + .unwrap(), + Some(Vec::new()) + ); + } } #[test] fn sync_justifications_across_forks() { - let _ = ::env_logger::try_init(); - let mut net = JustificationTestNet::new(3); - // we push 5 blocks - net.peer(0).push_blocks(5, false); - // and then two forks 5 and 6 blocks long - let f1_best = net.peer(0).push_blocks_at(BlockId::Number(5), 5, false); - let f2_best = net.peer(0).push_blocks_at(BlockId::Number(5), 6, false); - - // peer 1 will only see the longer fork. but we'll request justifications - // for both and finalize the small fork instead. - net.sync(); - - net.peer(0).client().finalize_block(BlockId::Hash(f1_best), Some(Vec::new()), true).unwrap(); - - net.peer(1).request_justification(&f1_best, 10); - net.peer(1).request_justification(&f2_best, 11); - - net.sync(); - - assert_eq!(net.peer(0).client().justification(&BlockId::Number(10)).unwrap(), Some(Vec::new())); - assert_eq!(net.peer(1).client().justification(&BlockId::Number(10)).unwrap(), Some(Vec::new())); + let _ = ::env_logger::try_init(); + let mut net = JustificationTestNet::new(3); + // we push 5 blocks + net.peer(0).push_blocks(5, false); + // and then two forks 5 and 6 blocks long + let f1_best = net.peer(0).push_blocks_at(BlockId::Number(5), 5, false); + let f2_best = net.peer(0).push_blocks_at(BlockId::Number(5), 6, false); + + // peer 1 will only see the longer fork. but we'll request justifications + // for both and finalize the small fork instead. + net.sync(); + + net.peer(0) + .client() + .finalize_block(BlockId::Hash(f1_best), Some(Vec::new()), true) + .unwrap(); + + net.peer(1).request_justification(&f1_best, 10); + net.peer(1).request_justification(&f2_best, 11); + + net.sync(); + + assert_eq!( + net.peer(0) + .client() + .justification(&BlockId::Number(10)) + .unwrap(), + Some(Vec::new()) + ); + assert_eq!( + net.peer(1) + .client() + .justification(&BlockId::Number(10)) + .unwrap(), + Some(Vec::new()) + ); } #[test] fn sync_after_fork_works() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); - net.sync_step(); - net.peer(0).push_blocks(30, false); - net.peer(1).push_blocks(30, false); - net.peer(2).push_blocks(30, false); - - net.peer(0).push_blocks(10, true); - net.peer(1).push_blocks(20, false); - net.peer(2).push_blocks(20, false); - - net.peer(1).push_blocks(10, true); - net.peer(2).push_blocks(1, false); - - // peer 1 has the best chain - let peer1_chain = net.peer(1).client.backend().as_in_memory().blockchain().clone(); - net.sync(); - assert!(net.peer(0).client.backend().as_in_memory().blockchain().canon_equals_to(&peer1_chain)); - assert!(net.peer(1).client.backend().as_in_memory().blockchain().canon_equals_to(&peer1_chain)); - assert!(net.peer(2).client.backend().as_in_memory().blockchain().canon_equals_to(&peer1_chain)); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); + net.sync_step(); + net.peer(0).push_blocks(30, false); + net.peer(1).push_blocks(30, false); + net.peer(2).push_blocks(30, false); + + net.peer(0).push_blocks(10, true); + net.peer(1).push_blocks(20, false); + net.peer(2).push_blocks(20, false); + + net.peer(1).push_blocks(10, true); + net.peer(2).push_blocks(1, false); + + // peer 1 has the best chain + let peer1_chain = net + .peer(1) + .client + .backend() + .as_in_memory() + .blockchain() + .clone(); + net.sync(); + assert!(net + .peer(0) + .client + .backend() + .as_in_memory() + .blockchain() + .canon_equals_to(&peer1_chain)); + assert!(net + .peer(1) + .client + .backend() + .as_in_memory() + .blockchain() + .canon_equals_to(&peer1_chain)); + assert!(net + .peer(2) + .client + .backend() + .as_in_memory() + .blockchain() + .canon_equals_to(&peer1_chain)); } #[test] fn syncs_all_forks() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(4); - net.sync_step(); - net.peer(0).push_blocks(2, false); - net.peer(1).push_blocks(2, false); - - net.peer(0).push_blocks(2, true); - net.peer(1).push_blocks(4, false); - - net.sync(); - // Check that all peers have all of the blocks. - assert_eq!(9, net.peer(0).client.backend().as_in_memory().blockchain().blocks_count()); - assert_eq!(9, net.peer(1).client.backend().as_in_memory().blockchain().blocks_count()); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(4); + net.sync_step(); + net.peer(0).push_blocks(2, false); + net.peer(1).push_blocks(2, false); + + net.peer(0).push_blocks(2, true); + net.peer(1).push_blocks(4, false); + + net.sync(); + // Check that all peers have all of the blocks. + assert_eq!( + 9, + net.peer(0) + .client + .backend() + .as_in_memory() + .blockchain() + .blocks_count() + ); + assert_eq!( + 9, + net.peer(1) + .client + .backend() + .as_in_memory() + .blockchain() + .blocks_count() + ); } #[test] fn own_blocks_are_announced() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); - net.sync(); // connect'em - net.peer(0).generate_blocks(1, BlockOrigin::Own, |builder| builder.bake().unwrap()); - - let header = net.peer(0).client().header(&BlockId::Number(1)).unwrap().unwrap(); - net.peer(0).on_block_imported(header.hash(), &header); - net.sync(); - - assert_eq!(net.peer(0).client.backend().blockchain().info().unwrap().best_number, 1); - assert_eq!(net.peer(1).client.backend().blockchain().info().unwrap().best_number, 1); - let peer0_chain = net.peer(0).client.backend().as_in_memory().blockchain().clone(); - assert!(net.peer(1).client.backend().as_in_memory().blockchain().canon_equals_to(&peer0_chain)); - assert!(net.peer(2).client.backend().as_in_memory().blockchain().canon_equals_to(&peer0_chain)); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); + net.sync(); // connect'em + net.peer(0) + .generate_blocks(1, BlockOrigin::Own, |builder| builder.bake().unwrap()); + + let header = net + .peer(0) + .client() + .header(&BlockId::Number(1)) + .unwrap() + .unwrap(); + net.peer(0).on_block_imported(header.hash(), &header); + net.sync(); + + assert_eq!( + net.peer(0) + .client + .backend() + .blockchain() + .info() + .unwrap() + .best_number, + 1 + ); + assert_eq!( + net.peer(1) + .client + .backend() + .blockchain() + .info() + .unwrap() + .best_number, + 1 + ); + let peer0_chain = net + .peer(0) + .client + .backend() + .as_in_memory() + .blockchain() + .clone(); + assert!(net + .peer(1) + .client + .backend() + .as_in_memory() + .blockchain() + .canon_equals_to(&peer0_chain)); + assert!(net + .peer(2) + .client + .backend() + .as_in_memory() + .blockchain() + .canon_equals_to(&peer0_chain)); } #[test] fn blocks_are_not_announced_by_light_nodes() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(0); - - // full peer0 is connected to light peer - // light peer1 is connected to full peer2 - let mut light_config = ProtocolConfig::default(); - light_config.roles = Roles::LIGHT; - net.add_peer(&ProtocolConfig::default()); - net.add_peer(&light_config); - net.add_peer(&ProtocolConfig::default()); - - net.peer(0).push_blocks(1, false); - net.peer(0).start(); - net.peer(1).start(); - net.peer(2).start(); - net.peer(0).on_connect(net.peer(1)); - net.peer(1).on_connect(net.peer(2)); - - // Only sync between 0 -> 1, and 1 -> 2 - let mut disconnected = HashSet::new(); - disconnected.insert(0); - disconnected.insert(2); - net.sync_with_disconnected(disconnected); - - // peer 0 has the best chain - // peer 1 has the best chain - // peer 2 has genesis-chain only - assert_eq!(net.peer(0).client.backend().blockchain().info().unwrap().best_number, 1); - assert_eq!(net.peer(1).client.backend().blockchain().info().unwrap().best_number, 1); - assert_eq!(net.peer(2).client.backend().blockchain().info().unwrap().best_number, 0); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(0); + + // full peer0 is connected to light peer + // light peer1 is connected to full peer2 + let mut light_config = ProtocolConfig::default(); + light_config.roles = Roles::LIGHT; + net.add_peer(&ProtocolConfig::default()); + net.add_peer(&light_config); + net.add_peer(&ProtocolConfig::default()); + + net.peer(0).push_blocks(1, false); + net.peer(0).start(); + net.peer(1).start(); + net.peer(2).start(); + net.peer(0).on_connect(net.peer(1)); + net.peer(1).on_connect(net.peer(2)); + + // Only sync between 0 -> 1, and 1 -> 2 + let mut disconnected = HashSet::new(); + disconnected.insert(0); + disconnected.insert(2); + net.sync_with_disconnected(disconnected); + + // peer 0 has the best chain + // peer 1 has the best chain + // peer 2 has genesis-chain only + assert_eq!( + net.peer(0) + .client + .backend() + .blockchain() + .info() + .unwrap() + .best_number, + 1 + ); + assert_eq!( + net.peer(1) + .client + .backend() + .blockchain() + .info() + .unwrap() + .best_number, + 1 + ); + assert_eq!( + net.peer(2) + .client + .backend() + .blockchain() + .info() + .unwrap() + .best_number, + 0 + ); } #[test] fn can_sync_small_non_best_forks() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(2); - net.sync_step(); - net.peer(0).push_blocks(30, false); - net.peer(1).push_blocks(30, false); - - // small fork + reorg on peer 1. - net.peer(0).push_blocks_at(BlockId::Number(30), 2, true); - let small_hash = net.peer(0).client().info().unwrap().chain.best_hash; - net.peer(0).push_blocks_at(BlockId::Number(30), 10, false); - assert_eq!(net.peer(0).client().info().unwrap().chain.best_number, 40); - - // peer 1 only ever had the long fork. - net.peer(1).push_blocks(10, false); - assert_eq!(net.peer(1).client().info().unwrap().chain.best_number, 40); - - assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); - assert!(net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none()); - - net.sync(); - - // synchronization: 0 synced to longer chain and 1 didn't sync to small chain. - - assert_eq!(net.peer(0).client().info().unwrap().chain.best_number, 40); - - assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); - assert!(!net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); - - net.peer(0).announce_block(small_hash); - net.sync(); - - // after announcing, peer 1 downloads the block. - - assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); - assert!(net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(2); + net.sync_step(); + net.peer(0).push_blocks(30, false); + net.peer(1).push_blocks(30, false); + + // small fork + reorg on peer 1. + net.peer(0).push_blocks_at(BlockId::Number(30), 2, true); + let small_hash = net.peer(0).client().info().unwrap().chain.best_hash; + net.peer(0).push_blocks_at(BlockId::Number(30), 10, false); + assert_eq!(net.peer(0).client().info().unwrap().chain.best_number, 40); + + // peer 1 only ever had the long fork. + net.peer(1).push_blocks(10, false); + assert_eq!(net.peer(1).client().info().unwrap().chain.best_number, 40); + + assert!(net + .peer(0) + .client() + .header(&BlockId::Hash(small_hash)) + .unwrap() + .is_some()); + assert!(net + .peer(1) + .client() + .header(&BlockId::Hash(small_hash)) + .unwrap() + .is_none()); + + net.sync(); + + // synchronization: 0 synced to longer chain and 1 didn't sync to small chain. + + assert_eq!(net.peer(0).client().info().unwrap().chain.best_number, 40); + + assert!(net + .peer(0) + .client() + .header(&BlockId::Hash(small_hash)) + .unwrap() + .is_some()); + assert!(!net + .peer(1) + .client() + .header(&BlockId::Hash(small_hash)) + .unwrap() + .is_some()); + + net.peer(0).announce_block(small_hash); + net.sync(); + + // after announcing, peer 1 downloads the block. + + assert!(net + .peer(0) + .client() + .header(&BlockId::Hash(small_hash)) + .unwrap() + .is_some()); + assert!(net + .peer(1) + .client() + .header(&BlockId::Hash(small_hash)) + .unwrap() + .is_some()); } diff --git a/core/network/src/util.rs b/core/network/src/util.rs index 5e9e64aae7..10667d2029 100644 --- a/core/network/src/util.rs +++ b/core/network/src/util.rs @@ -22,55 +22,58 @@ use std::{hash::Hash, num::NonZeroUsize}; /// In the limit, for each element inserted the oldest existing element will be removed. #[derive(Debug, Clone)] pub(crate) struct LruHashSet { - set: LinkedHashSet, - limit: NonZeroUsize + set: LinkedHashSet, + limit: NonZeroUsize, } impl LruHashSet { - /// Create a new `LruHashSet` with the given (exclusive) limit. - pub(crate) fn new(limit: NonZeroUsize) -> Self { - Self { set: LinkedHashSet::new(), limit } - } + /// Create a new `LruHashSet` with the given (exclusive) limit. + pub(crate) fn new(limit: NonZeroUsize) -> Self { + Self { + set: LinkedHashSet::new(), + limit, + } + } - /// Insert element into the set. - /// - /// Returns `true` if this is a new element to the set, `false` otherwise. - /// Maintains the limit of the set by removing the oldest entry if necessary. - /// Inserting the same element will update its LRU position. - pub(crate) fn insert(&mut self, e: T) -> bool { - if self.set.insert(e) { - if self.set.len() == usize::from(self.limit) { - self.set.pop_front(); // remove oldest entry - } - return true - } - false - } + /// Insert element into the set. + /// + /// Returns `true` if this is a new element to the set, `false` otherwise. + /// Maintains the limit of the set by removing the oldest entry if necessary. + /// Inserting the same element will update its LRU position. + pub(crate) fn insert(&mut self, e: T) -> bool { + if self.set.insert(e) { + if self.set.len() == usize::from(self.limit) { + self.set.pop_front(); // remove oldest entry + } + return true; + } + false + } } #[cfg(test)] mod tests { - use super::*; + use super::*; - #[test] - fn maintains_limit() { - let three = NonZeroUsize::new(3).unwrap(); - let mut set = LruHashSet::::new(three); + #[test] + fn maintains_limit() { + let three = NonZeroUsize::new(3).unwrap(); + let mut set = LruHashSet::::new(three); - // First element. - assert!(set.insert(1)); - assert_eq!(vec![&1], set.set.iter().collect::>()); + // First element. + assert!(set.insert(1)); + assert_eq!(vec![&1], set.set.iter().collect::>()); - // Second element. - assert!(set.insert(2)); - assert_eq!(vec![&1, &2], set.set.iter().collect::>()); + // Second element. + assert!(set.insert(2)); + assert_eq!(vec![&1, &2], set.set.iter().collect::>()); - // Inserting the same element updates its LRU position. - assert!(!set.insert(1)); - assert_eq!(vec![&2, &1], set.set.iter().collect::>()); + // Inserting the same element updates its LRU position. + assert!(!set.insert(1)); + assert_eq!(vec![&2, &1], set.set.iter().collect::>()); - // We reached the limit. The next element forces the oldest one out. - assert!(set.insert(3)); - assert_eq!(vec![&1, &3], set.set.iter().collect::>()); - } + // We reached the limit. The next element forces the oldest one out. + assert!(set.insert(3)); + assert_eq!(vec![&1, &3], set.set.iter().collect::>()); + } } diff --git a/core/offchain/primitives/src/lib.rs b/core/offchain/primitives/src/lib.rs index c05e8dceb9..01f0a4bd9f 100644 --- a/core/offchain/primitives/src/lib.rs +++ b/core/offchain/primitives/src/lib.rs @@ -23,9 +23,9 @@ use client::decl_runtime_apis; use runtime_primitives::traits::NumberFor; decl_runtime_apis! { - /// The offchain worker api. - pub trait OffchainWorkerApi { - /// Starts the off-chain task for given block number. - fn offchain_worker(number: NumberFor); - } + /// The offchain worker api. + pub trait OffchainWorkerApi { + /// Starts the off-chain task for given block number. + fn offchain_worker(number: NumberFor); + } } diff --git a/core/offchain/src/api.rs b/core/offchain/src/api.rs index 5d2a636be3..fe3e4fb470 100644 --- a/core/offchain/src/api.rs +++ b/core/offchain/src/api.rs @@ -14,21 +14,21 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::sync::Arc; -use futures::{Stream, Future, sync::mpsc}; +use futures::{sync::mpsc, Future, Stream}; use inherents::pool::InherentsPool; -use log::{info, debug, warn}; +use log::{debug, info, warn}; use parity_codec::Decode; use primitives::OffchainExt; use runtime_primitives::{ - generic::BlockId, - traits::{self, Extrinsic}, + generic::BlockId, + traits::{self, Extrinsic}, }; -use transaction_pool::txpool::{Pool, ChainApi}; +use std::sync::Arc; +use transaction_pool::txpool::{ChainApi, Pool}; /// A message between the offchain extension and the processing thread. enum ExtMessage { - SubmitExtrinsic(Vec), + SubmitExtrinsic(Vec), } /// Asynchronous offchain API. @@ -37,63 +37,67 @@ enum ExtMessage { pub(crate) struct AsyncApi(mpsc::UnboundedSender); impl OffchainExt for AsyncApi { - fn submit_extrinsic(&mut self, ext: Vec) { - let _ = self.0.unbounded_send(ExtMessage::SubmitExtrinsic(ext)); - } + fn submit_extrinsic(&mut self, ext: Vec) { + let _ = self.0.unbounded_send(ExtMessage::SubmitExtrinsic(ext)); + } } /// Offchain extensions implementation API pub(crate) struct Api { - receiver: Option>, - transaction_pool: Arc>, - inherents_pool: Arc::Extrinsic>>, - at: BlockId, + receiver: Option>, + transaction_pool: Arc>, + inherents_pool: Arc::Extrinsic>>, + at: BlockId, } impl Api { - pub fn new( - transaction_pool: Arc>, - inherents_pool: Arc::Extrinsic>>, - at: BlockId, - ) -> (AsyncApi, Self) { - let (tx, rx) = mpsc::unbounded(); - let api = Self { - receiver: Some(rx), - transaction_pool, - inherents_pool, - at, - }; - (AsyncApi(tx), api) - } + pub fn new( + transaction_pool: Arc>, + inherents_pool: Arc::Extrinsic>>, + at: BlockId, + ) -> (AsyncApi, Self) { + let (tx, rx) = mpsc::unbounded(); + let api = Self { + receiver: Some(rx), + transaction_pool, + inherents_pool, + at, + }; + (AsyncApi(tx), api) + } - /// Run a processing task for the API - pub fn process(mut self) -> impl Future { - let receiver = self.receiver.take().expect("Take invoked only once."); + /// Run a processing task for the API + pub fn process(mut self) -> impl Future { + let receiver = self.receiver.take().expect("Take invoked only once."); - receiver.for_each(move |msg| { - match msg { - ExtMessage::SubmitExtrinsic(ext) => self.submit_extrinsic(ext), - } - Ok(()) - }) - } + receiver.for_each(move |msg| { + match msg { + ExtMessage::SubmitExtrinsic(ext) => self.submit_extrinsic(ext), + } + Ok(()) + }) + } - fn submit_extrinsic(&mut self, ext: Vec) { - let xt = match ::Extrinsic::decode(&mut &*ext) { - Some(xt) => xt, - None => { - warn!("Unable to decode extrinsic: {:?}", ext); - return - }, - }; + fn submit_extrinsic(&mut self, ext: Vec) { + let xt = match ::Extrinsic::decode(&mut &*ext) { + Some(xt) => xt, + None => { + warn!("Unable to decode extrinsic: {:?}", ext); + return; + } + }; - info!("Submitting to the pool: {:?} (isSigned: {:?})", xt, xt.is_signed()); - match self.transaction_pool.submit_one(&self.at, xt.clone()) { - Ok(hash) => debug!("[{:?}] Offchain transaction added to the pool.", hash), - Err(_) => { - debug!("Offchain inherent added to the pool."); - self.inherents_pool.add(xt); - }, - } - } + info!( + "Submitting to the pool: {:?} (isSigned: {:?})", + xt, + xt.is_signed() + ); + match self.transaction_pool.submit_one(&self.at, xt.clone()) { + Ok(hash) => debug!("[{:?}] Offchain transaction added to the pool.", hash), + Err(_) => { + debug!("Offchain inherent added to the pool."); + self.inherents_pool.add(xt); + } + } + } } diff --git a/core/offchain/src/lib.rs b/core/offchain/src/lib.rs index cac960f250..99977f4dd9 100644 --- a/core/offchain/src/lib.rs +++ b/core/offchain/src/lib.rs @@ -33,21 +33,18 @@ #![warn(missing_docs)] -use std::{ - marker::PhantomData, - sync::Arc, -}; +use std::{marker::PhantomData, sync::Arc}; use client::runtime_api::ApiExt; use inherents::pool::InherentsPool; use log::{debug, warn}; use primitives::ExecutionContext; use runtime_primitives::{ - generic::BlockId, - traits::{self, ProvideRuntimeApi}, + generic::BlockId, + traits::{self, ProvideRuntimeApi}, }; use tokio::runtime::TaskExecutor; -use transaction_pool::txpool::{Pool, ChainApi}; +use transaction_pool::txpool::{ChainApi, Pool}; mod api; @@ -56,77 +53,84 @@ pub use offchain_primitives::OffchainWorkerApi; /// An offchain workers manager. #[derive(Debug)] pub struct OffchainWorkers { - client: Arc, - inherents_pool: Arc::Extrinsic>>, - executor: TaskExecutor, - _block: PhantomData, + client: Arc, + inherents_pool: Arc::Extrinsic>>, + executor: TaskExecutor, + _block: PhantomData, } impl OffchainWorkers { - /// Creates new `OffchainWorkers`. - pub fn new( - client: Arc, - inherents_pool: Arc::Extrinsic>>, - executor: TaskExecutor, - ) -> Self { - Self { - client, - inherents_pool, - executor, - _block: PhantomData, - } - } + /// Creates new `OffchainWorkers`. + pub fn new( + client: Arc, + inherents_pool: Arc::Extrinsic>>, + executor: TaskExecutor, + ) -> Self { + Self { + client, + inherents_pool, + executor, + _block: PhantomData, + } + } } -impl OffchainWorkers where - Block: traits::Block, - C: ProvideRuntimeApi, - C::Api: OffchainWorkerApi, +impl OffchainWorkers +where + Block: traits::Block, + C: ProvideRuntimeApi, + C::Api: OffchainWorkerApi, { - /// Start the offchain workers after given block. - pub fn on_block_imported( - &self, - number: &::Number, - pool: &Arc>, - ) where - A: ChainApi + 'static, - { - let runtime = self.client.runtime_api(); - let at = BlockId::number(*number); - let has_api = runtime.has_api::>(&at); - debug!("Checking offchain workers at {:?}: {:?}", at, has_api); - - if has_api.unwrap_or(false) { - let (api, runner) = api::Api::new(pool.clone(), self.inherents_pool.clone(), at.clone()); - self.executor.spawn(runner.process()); - - debug!("Running offchain workers at {:?}", at); - let api = Box::new(api); - runtime.offchain_worker_with_context(&at, ExecutionContext::OffchainWorker(api), *number).unwrap(); - } - } + /// Start the offchain workers after given block. + pub fn on_block_imported( + &self, + number: &::Number, + pool: &Arc>, + ) where + A: ChainApi + 'static, + { + let runtime = self.client.runtime_api(); + let at = BlockId::number(*number); + let has_api = runtime.has_api::>(&at); + debug!("Checking offchain workers at {:?}: {:?}", at, has_api); + + if has_api.unwrap_or(false) { + let (api, runner) = + api::Api::new(pool.clone(), self.inherents_pool.clone(), at.clone()); + self.executor.spawn(runner.process()); + + debug!("Running offchain workers at {:?}", at); + let api = Box::new(api); + runtime + .offchain_worker_with_context(&at, ExecutionContext::OffchainWorker(api), *number) + .unwrap(); + } + } } #[cfg(test)] mod tests { - use super::*; - use futures::Future; - - #[test] - fn should_call_into_runtime_and_produce_extrinsic() { - // given - let _ = env_logger::try_init(); - let runtime = tokio::runtime::Runtime::new().unwrap(); - let client = Arc::new(test_client::new()); - let pool = Arc::new(Pool::new(Default::default(), ::transaction_pool::ChainApi::new(client.clone()))); - let inherents = Arc::new(InherentsPool::default()); - - // when - let offchain = OffchainWorkers::new(client, inherents.clone(), runtime.executor()); - offchain.on_block_imported(&0u64, &pool); - - // then - runtime.shutdown_on_idle().wait().unwrap(); - assert_eq!(inherents.drain().len(), 1); - } + use super::*; + use futures::Future; + + #[test] + fn should_call_into_runtime_and_produce_extrinsic() { + // given + let _ = env_logger::try_init(); + let runtime = tokio::runtime::Runtime::new().unwrap(); + let client = Arc::new(test_client::new()); + let pool = Arc::new(Pool::new( + Default::default(), + ::transaction_pool::ChainApi::new(client.clone()), + )); + let inherents = Arc::new(InherentsPool::default()); + + // when + let offchain = OffchainWorkers::new(client, inherents.clone(), runtime.executor()); + offchain.on_block_imported(&0u64, &pool); + + // then + runtime.shutdown_on_idle().wait().unwrap(); + assert_eq!(inherents.drain().len(), 1); + } } diff --git a/core/panic-handler/src/lib.rs b/core/panic-handler/src/lib.rs index b2fd7238e0..5ae69e03e1 100644 --- a/core/panic-handler/src/lib.rs +++ b/core/panic-handler/src/lib.rs @@ -17,94 +17,97 @@ //! Custom panic hook with bug report link use backtrace::Backtrace; +use std::cell::Cell; use std::io::{self, Write}; use std::panic::{self, PanicInfo}; -use std::cell::Cell; use std::thread; thread_local! { - pub static ABORT: Cell = Cell::new(true); + pub static ABORT: Cell = Cell::new(true); } /// Set the panic hook pub fn set(bug_url: &'static str) { - panic::set_hook(Box::new(move |c| panic_hook(c, bug_url))); + panic::set_hook(Box::new(move |c| panic_hook(c, bug_url))); } macro_rules! ABOUT_PANIC { - () => (" + () => { + " This is a bug. Please report it at: {} -")} +" + }; +} /// Set aborting flag. Returns previous value of the flag. pub fn set_abort(enabled: bool) -> bool { - ABORT.with(|flag| { - let prev = flag.get(); - flag.set(enabled); - prev - }) + ABORT.with(|flag| { + let prev = flag.get(); + flag.set(enabled); + prev + }) } /// Abort flag guard. Sets abort on construction and reverts to previous setting when dropped. pub struct AbortGuard(bool); impl AbortGuard { - /// Create a new guard and set abort flag to specified value. - pub fn new(enable: bool) -> AbortGuard { - AbortGuard(set_abort(enable)) - } + /// Create a new guard and set abort flag to specified value. + pub fn new(enable: bool) -> AbortGuard { + AbortGuard(set_abort(enable)) + } } impl Drop for AbortGuard { - fn drop(&mut self) { - set_abort(self.0); - } + fn drop(&mut self) { + set_abort(self.0); + } } fn panic_hook(info: &PanicInfo, report_url: &'static str) { - let location = info.location(); - let file = location.as_ref().map(|l| l.file()).unwrap_or(""); - let line = location.as_ref().map(|l| l.line()).unwrap_or(0); - - let msg = match info.payload().downcast_ref::<&'static str>() { - Some(s) => *s, - None => match info.payload().downcast_ref::() { - Some(s) => &s[..], - None => "Box", - } - }; - - let thread = thread::current(); - let name = thread.name().unwrap_or(""); - - let backtrace = Backtrace::new(); - - let mut stderr = io::stderr(); - - let _ = writeln!(stderr, ""); - let _ = writeln!(stderr, "===================="); - let _ = writeln!(stderr, ""); - let _ = writeln!(stderr, "{:?}", backtrace); - let _ = writeln!(stderr, ""); - let _ = writeln!( - stderr, - "Thread '{}' panicked at '{}', {}:{}", - name, msg, file, line - ); - - let _ = writeln!(stderr, ABOUT_PANIC!(), report_url); - ABORT.with(|flag| { - if flag.get() { - ::std::process::exit(1); - } - }) + let location = info.location(); + let file = location.as_ref().map(|l| l.file()).unwrap_or(""); + let line = location.as_ref().map(|l| l.line()).unwrap_or(0); + + let msg = match info.payload().downcast_ref::<&'static str>() { + Some(s) => *s, + None => match info.payload().downcast_ref::() { + Some(s) => &s[..], + None => "Box", + }, + }; + + let thread = thread::current(); + let name = thread.name().unwrap_or(""); + + let backtrace = Backtrace::new(); + + let mut stderr = io::stderr(); + + let _ = writeln!(stderr, ""); + let _ = writeln!(stderr, "===================="); + let _ = writeln!(stderr, ""); + let _ = writeln!(stderr, "{:?}", backtrace); + let _ = writeln!(stderr, ""); + let _ = writeln!( + stderr, + "Thread '{}' panicked at '{}', {}:{}", + name, msg, file, line + ); + + let _ = writeln!(stderr, ABOUT_PANIC!(), report_url); + ABORT.with(|flag| { + if flag.get() { + ::std::process::exit(1); + } + }) } #[test] fn does_not_abort() { - set("test"); - let _guard = AbortGuard::new(false); - ::std::panic::catch_unwind(|| panic!()).ok(); + set("test"); + let _guard = AbortGuard::new(false); + ::std::panic::catch_unwind(|| panic!()).ok(); } diff --git a/core/peerset/src/lib.rs b/core/peerset/src/lib.rs index b64cafc3e5..44cfc38335 100644 --- a/core/peerset/src/lib.rs +++ b/core/peerset/src/lib.rs @@ -17,51 +17,51 @@ //! Peer Set Manager (PSM). Contains the strategy for choosing which nodes the network should be //! connected to. -use std::collections::HashSet; use futures::{prelude::*, sync::mpsc}; use libp2p::PeerId; use parking_lot::Mutex; +use std::collections::HashSet; use std::sync::Arc; pub use serde_json::Value; /// Shared part of the peer set manager (PSM). Distributed around the code. pub struct Peerset { - tx: mpsc::UnboundedSender, - inner: Mutex, + tx: mpsc::UnboundedSender, + inner: Mutex, } struct Inner { - /// List of nodes that we know exist but we are not connected to. - /// Elements in this list must never be in `out_slots` or `in_slots`. - discovered: Vec, - /// List of reserved nodes. - reserved: HashSet, - /// If true, we only accept reserved nodes. - reserved_only: bool, - /// Node slots for outgoing connections. Each slot contains either `None` if the node is free, - /// or `Some` if it is assigned to a peer. - out_slots: Vec>, - /// Node slots for incoming connections. Each slot contains either `None` if the node is free, - /// or `Some` if it is assigned to a peer. - in_slots: Vec>, + /// List of nodes that we know exist but we are not connected to. + /// Elements in this list must never be in `out_slots` or `in_slots`. + discovered: Vec, + /// List of reserved nodes. + reserved: HashSet, + /// If true, we only accept reserved nodes. + reserved_only: bool, + /// Node slots for outgoing connections. Each slot contains either `None` if the node is free, + /// or `Some` if it is assigned to a peer. + out_slots: Vec>, + /// Node slots for incoming connections. Each slot contains either `None` if the node is free, + /// or `Some` if it is assigned to a peer. + in_slots: Vec>, } /// Message that can be sent by the peer set manager (PSM). #[derive(Debug)] pub enum Message { - /// Request to open a connection to the given peer. From the point of view of the PSM, we are - /// immediately connected. - Connect(PeerId), + /// Request to open a connection to the given peer. From the point of view of the PSM, we are + /// immediately connected. + Connect(PeerId), - /// Drop the connection to the given peer, or cancel the connection attempt after a `Connect`. - Drop(PeerId), + /// Drop the connection to the given peer, or cancel the connection attempt after a `Connect`. + Drop(PeerId), - /// Equivalent to `Connect` for the peer corresponding to this incoming index. - Accept(IncomingIndex), + /// Equivalent to `Connect` for the peer corresponding to this incoming index. + Accept(IncomingIndex), - /// Equivalent to `Drop` for the peer corresponding to this incoming index. - Reject(IncomingIndex), + /// Equivalent to `Drop` for the peer corresponding to this incoming index. + Reject(IncomingIndex), } /// Opaque identifier for an incoming connection. Allocated by the network. @@ -69,34 +69,34 @@ pub enum Message { pub struct IncomingIndex(pub u64); impl From for IncomingIndex { - fn from(val: u64) -> IncomingIndex { - IncomingIndex(val) - } + fn from(val: u64) -> IncomingIndex { + IncomingIndex(val) + } } /// Configuration to pass when creating the peer set manager. #[derive(Debug)] pub struct PeersetConfig { - /// Maximum number of ingoing links to peers. - pub in_peers: u32, - - /// Maximum number of outgoing links to peers. - pub out_peers: u32, - - /// List of bootstrap nodes to initialize the peer with. - /// - /// > **Note**: Keep in mind that the networking has to know an address for these nodes, - /// > otherwise it will not be able to connect to them. - pub bootnodes: Vec, - - /// If true, we only accept reserved nodes. - pub reserved_only: bool, - - /// List of nodes that we should always be connected to. - /// - /// > **Note**: Keep in mind that the networking has to know an address for these nodes, - /// > otherwise it will not be able to connect to them. - pub reserved_nodes: Vec, + /// Maximum number of ingoing links to peers. + pub in_peers: u32, + + /// Maximum number of outgoing links to peers. + pub out_peers: u32, + + /// List of bootstrap nodes to initialize the peer with. + /// + /// > **Note**: Keep in mind that the networking has to know an address for these nodes, + /// > otherwise it will not be able to connect to them. + pub bootnodes: Vec, + + /// If true, we only accept reserved nodes. + pub reserved_only: bool, + + /// List of nodes that we should always be connected to. + /// + /// > **Note**: Keep in mind that the networking has to know an address for these nodes, + /// > otherwise it will not be able to connect to them. + pub reserved_nodes: Vec, } /// Side of the peer set manager owned by the network. In other words, the "receiving" side. @@ -104,213 +104,234 @@ pub struct PeersetConfig { /// Implements the `Stream` trait and can be polled for messages. The `Stream` never ends and never /// errors. pub struct PeersetMut { - parent: Arc, - rx: mpsc::UnboundedReceiver, + parent: Arc, + rx: mpsc::UnboundedReceiver, } impl Peerset { - /// Builds a new peerset from the given configuration. - pub fn from_config(config: PeersetConfig) -> (Arc, PeersetMut) { - let (tx, rx) = mpsc::unbounded(); - - let mut inner = Inner { - discovered: config.bootnodes.into_iter().collect(), - reserved: Default::default(), - reserved_only: config.reserved_only, - out_slots: (0 .. config.out_peers).map(|_| None).collect(), - in_slots: (0 .. config.in_peers).map(|_| None).collect(), - }; - - alloc_slots(&mut inner, &tx); - - let peerset = Arc::new(Peerset { - tx, - inner: Mutex::new(inner), - }); - - let rx = PeersetMut { - parent: peerset.clone(), - rx, - }; - - for reserved in config.reserved_nodes { - peerset.add_reserved_peer(reserved); - } - - (peerset, rx) - } - - /// Adds a new reserved peer. The peerset will make an effort to always remain connected to - /// this peer. - /// - /// Has no effect if the node was already a reserved peer. - /// - /// > **Note**: Keep in mind that the networking has to know an address for this node, - /// > otherwise it will not be able to connect to it. - pub fn add_reserved_peer(&self, peer_id: PeerId) { - let mut inner = self.inner.lock(); - if !inner.reserved.insert(peer_id.clone()) { - // Immediately return if this peer was already in the list. - return; - } - - // Nothing more to do if we're already connected. - if inner.out_slots.iter().chain(inner.in_slots.iter()).any(|s| s.as_ref() == Some(&peer_id)) { - return; - } - - // Assign a slot for this reserved peer. - if let Some(pos) = inner.out_slots.iter().position(|s| s.as_ref().map(|n| !inner.reserved.contains(n)).unwrap_or(true)) { - let _ = self.tx.unbounded_send(Message::Connect(peer_id.clone())); - inner.out_slots[pos] = Some(peer_id); - - } else { - // All slots are filled with reserved peers. - if inner.discovered.iter().all(|p| *p != peer_id) { - inner.discovered.push(peer_id); - } - } - } - - /// Remove a previously-added reserved peer. - /// - /// Has no effect if the node was not a reserved peer. - pub fn remove_reserved_peer(&self, peer_id: &PeerId) { - let mut inner = self.inner.lock(); - inner.reserved.remove(peer_id); - } - - /// Sets whether or not the peerset only has connections . - pub fn set_reserved_only(&self, reserved_only: bool) { - let mut inner = self.inner.lock(); - let inner = &mut *inner; // Fixes a borrowing issue. - inner.reserved_only = reserved_only; - - // Disconnect non-reserved nodes. - if reserved_only { - for slot in inner.out_slots.iter_mut().chain(inner.in_slots.iter_mut()) { - if let Some(peer) = slot.as_ref() { - if inner.reserved.contains(peer) { - continue; - } - - let _ = self.tx.unbounded_send(Message::Drop(peer.clone())); - } - - *slot = None; - } - } - } - - /// Reports an adjustement to the reputation of the given peer. - pub fn report_peer(&self, _peer_id: &PeerId, _score_diff: i32) { - // This is not implemented in this dummy implementation. - } + /// Builds a new peerset from the given configuration. + pub fn from_config(config: PeersetConfig) -> (Arc, PeersetMut) { + let (tx, rx) = mpsc::unbounded(); + + let mut inner = Inner { + discovered: config.bootnodes.into_iter().collect(), + reserved: Default::default(), + reserved_only: config.reserved_only, + out_slots: (0..config.out_peers).map(|_| None).collect(), + in_slots: (0..config.in_peers).map(|_| None).collect(), + }; + + alloc_slots(&mut inner, &tx); + + let peerset = Arc::new(Peerset { + tx, + inner: Mutex::new(inner), + }); + + let rx = PeersetMut { + parent: peerset.clone(), + rx, + }; + + for reserved in config.reserved_nodes { + peerset.add_reserved_peer(reserved); + } + + (peerset, rx) + } + + /// Adds a new reserved peer. The peerset will make an effort to always remain connected to + /// this peer. + /// + /// Has no effect if the node was already a reserved peer. + /// + /// > **Note**: Keep in mind that the networking has to know an address for this node, + /// > otherwise it will not be able to connect to it. + pub fn add_reserved_peer(&self, peer_id: PeerId) { + let mut inner = self.inner.lock(); + if !inner.reserved.insert(peer_id.clone()) { + // Immediately return if this peer was already in the list. + return; + } + + // Nothing more to do if we're already connected. + if inner + .out_slots + .iter() + .chain(inner.in_slots.iter()) + .any(|s| s.as_ref() == Some(&peer_id)) + { + return; + } + + // Assign a slot for this reserved peer. + if let Some(pos) = inner.out_slots.iter().position(|s| { + s.as_ref() + .map(|n| !inner.reserved.contains(n)) + .unwrap_or(true) + }) { + let _ = self.tx.unbounded_send(Message::Connect(peer_id.clone())); + inner.out_slots[pos] = Some(peer_id); + } else { + // All slots are filled with reserved peers. + if inner.discovered.iter().all(|p| *p != peer_id) { + inner.discovered.push(peer_id); + } + } + } + + /// Remove a previously-added reserved peer. + /// + /// Has no effect if the node was not a reserved peer. + pub fn remove_reserved_peer(&self, peer_id: &PeerId) { + let mut inner = self.inner.lock(); + inner.reserved.remove(peer_id); + } + + /// Sets whether or not the peerset only has connections . + pub fn set_reserved_only(&self, reserved_only: bool) { + let mut inner = self.inner.lock(); + let inner = &mut *inner; // Fixes a borrowing issue. + inner.reserved_only = reserved_only; + + // Disconnect non-reserved nodes. + if reserved_only { + for slot in inner.out_slots.iter_mut().chain(inner.in_slots.iter_mut()) { + if let Some(peer) = slot.as_ref() { + if inner.reserved.contains(peer) { + continue; + } + + let _ = self.tx.unbounded_send(Message::Drop(peer.clone())); + } + + *slot = None; + } + } + } + + /// Reports an adjustement to the reputation of the given peer. + pub fn report_peer(&self, _peer_id: &PeerId, _score_diff: i32) { + // This is not implemented in this dummy implementation. + } } fn alloc_slots(inner: &mut Inner, tx: &mpsc::UnboundedSender) { - if inner.reserved_only { - return; - } - - for slot in inner.out_slots.iter_mut() { - if slot.is_some() { - continue; - } - - if !inner.discovered.is_empty() { - let elem = inner.discovered.remove(0); - *slot = Some(elem.clone()); - let _ = tx.unbounded_send(Message::Connect(elem)); - } - } + if inner.reserved_only { + return; + } + + for slot in inner.out_slots.iter_mut() { + if slot.is_some() { + continue; + } + + if !inner.discovered.is_empty() { + let elem = inner.discovered.remove(0); + *slot = Some(elem.clone()); + let _ = tx.unbounded_send(Message::Connect(elem)); + } + } } impl PeersetMut { - /// Indicate that we received an incoming connection. Must be answered either with - /// a corresponding `Accept` or `Reject`, except if we were already connected to this peer. - /// - /// Note that this mechanism is orthogonal to `Connect`/`Drop`. Accepting an incoming - /// connection implicitely means `Accept`, but incoming connections aren't cancelled by - /// `dropped`. - /// - /// Because of concurrency issues, it is acceptable to call `incoming` with a `PeerId` the - /// peerset is already connected to, in which case it must not answer. - pub fn incoming(&self, peer_id: PeerId, index: IncomingIndex) { - let mut inner = self.parent.inner.lock(); - if inner.out_slots.iter().chain(inner.in_slots.iter()).any(|s| s.as_ref() == Some(&peer_id)) { - return - } - - if let Some(pos) = inner.in_slots.iter().position(|s| s.is_none()) { - inner.in_slots[pos] = Some(peer_id); - let _ = self.parent.tx.unbounded_send(Message::Accept(index)); - } else { - if inner.discovered.iter().all(|p| *p != peer_id) { - inner.discovered.push(peer_id); - } - let _ = self.parent.tx.unbounded_send(Message::Reject(index)); - } - } - - /// Indicate that we dropped an active connection with a peer, or that we failed to connect. - /// - /// Must only be called after the PSM has either generated a `Connect` message with this - /// `PeerId`, or accepted an incoming connection with this `PeerId`. - pub fn dropped(&self, peer_id: &PeerId) { - let mut inner = self.parent.inner.lock(); - let inner = &mut *inner; // Fixes a borrowing issue. - - // Automatically connect back if reserved. - if inner.reserved.contains(peer_id) { - let _ = self.parent.tx.unbounded_send(Message::Connect(peer_id.clone())); - return - } - - // Otherwise, free the slot. - for slot in inner.out_slots.iter_mut().chain(inner.in_slots.iter_mut()) { - if slot.as_ref() == Some(peer_id) { - *slot = None; - break; - } - } - - // Note: in this dummy implementation we consider that peers never expire. As soon as we - // are disconnected from a peer, we try again. - if inner.discovered.iter().all(|p| p != peer_id) { - inner.discovered.push(peer_id.clone()); - } - alloc_slots(inner, &self.parent.tx); - } - - /// Adds a discovered peer id to the PSM. - /// - /// > **Note**: There is no equivalent "expired" message, meaning that it is the responsibility - /// > of the PSM to remove `PeerId`s that fail to dial too often. - pub fn discovered(&self, peer_id: PeerId) { - let mut inner = self.parent.inner.lock(); - - if inner.out_slots.iter().chain(inner.in_slots.iter()).any(|p| p.as_ref() == Some(&peer_id)) { - return; - } - - if inner.discovered.iter().all(|p| *p != peer_id) { - inner.discovered.push(peer_id); - } - alloc_slots(&mut inner, &self.parent.tx); - } - - /// Produces a JSON object containing the state of the peerset manager, for debugging purposes. - pub fn debug_info(&self) -> serde_json::Value { - serde_json::Value::Null - } + /// Indicate that we received an incoming connection. Must be answered either with + /// a corresponding `Accept` or `Reject`, except if we were already connected to this peer. + /// + /// Note that this mechanism is orthogonal to `Connect`/`Drop`. Accepting an incoming + /// connection implicitely means `Accept`, but incoming connections aren't cancelled by + /// `dropped`. + /// + /// Because of concurrency issues, it is acceptable to call `incoming` with a `PeerId` the + /// peerset is already connected to, in which case it must not answer. + pub fn incoming(&self, peer_id: PeerId, index: IncomingIndex) { + let mut inner = self.parent.inner.lock(); + if inner + .out_slots + .iter() + .chain(inner.in_slots.iter()) + .any(|s| s.as_ref() == Some(&peer_id)) + { + return; + } + + if let Some(pos) = inner.in_slots.iter().position(|s| s.is_none()) { + inner.in_slots[pos] = Some(peer_id); + let _ = self.parent.tx.unbounded_send(Message::Accept(index)); + } else { + if inner.discovered.iter().all(|p| *p != peer_id) { + inner.discovered.push(peer_id); + } + let _ = self.parent.tx.unbounded_send(Message::Reject(index)); + } + } + + /// Indicate that we dropped an active connection with a peer, or that we failed to connect. + /// + /// Must only be called after the PSM has either generated a `Connect` message with this + /// `PeerId`, or accepted an incoming connection with this `PeerId`. + pub fn dropped(&self, peer_id: &PeerId) { + let mut inner = self.parent.inner.lock(); + let inner = &mut *inner; // Fixes a borrowing issue. + + // Automatically connect back if reserved. + if inner.reserved.contains(peer_id) { + let _ = self + .parent + .tx + .unbounded_send(Message::Connect(peer_id.clone())); + return; + } + + // Otherwise, free the slot. + for slot in inner.out_slots.iter_mut().chain(inner.in_slots.iter_mut()) { + if slot.as_ref() == Some(peer_id) { + *slot = None; + break; + } + } + + // Note: in this dummy implementation we consider that peers never expire. As soon as we + // are disconnected from a peer, we try again. + if inner.discovered.iter().all(|p| p != peer_id) { + inner.discovered.push(peer_id.clone()); + } + alloc_slots(inner, &self.parent.tx); + } + + /// Adds a discovered peer id to the PSM. + /// + /// > **Note**: There is no equivalent "expired" message, meaning that it is the responsibility + /// > of the PSM to remove `PeerId`s that fail to dial too often. + pub fn discovered(&self, peer_id: PeerId) { + let mut inner = self.parent.inner.lock(); + + if inner + .out_slots + .iter() + .chain(inner.in_slots.iter()) + .any(|p| p.as_ref() == Some(&peer_id)) + { + return; + } + + if inner.discovered.iter().all(|p| *p != peer_id) { + inner.discovered.push(peer_id); + } + alloc_slots(&mut inner, &self.parent.tx); + } + + /// Produces a JSON object containing the state of the peerset manager, for debugging purposes. + pub fn debug_info(&self) -> serde_json::Value { + serde_json::Value::Null + } } impl Stream for PeersetMut { - type Item = Message; - type Error = (); + type Item = Message; + type Error = (); - fn poll(&mut self) -> Poll, Self::Error> { - self.rx.poll() - } + fn poll(&mut self) -> Poll, Self::Error> { + self.rx.poll() + } } diff --git a/core/primitives/src/changes_trie.rs b/core/primitives/src/changes_trie.rs index c8776a6f08..8d91d2af5d 100644 --- a/core/primitives/src/changes_trie.rs +++ b/core/primitives/src/changes_trie.rs @@ -16,143 +16,145 @@ //! Substrate changes trie configuration. +use parity_codec::{Decode, Encode}; #[cfg(any(feature = "std", test))] -use serde_derive::{Serialize, Deserialize}; -use parity_codec::{Encode, Decode}; +use serde_derive::{Deserialize, Serialize}; /// Substrate changes trie configuration. #[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq, Default, Encode, Decode)] pub struct ChangesTrieConfiguration { - /// Interval (in blocks) at which level1-digests are created. Digests are not - /// created when this is less or equal to 1. - pub digest_interval: u64, - /// Maximal number of digest levels in hierarchy. 0 means that digests are not - /// created at all (even level1 digests). 1 means only level1-digests are created. - /// 2 means that every digest_interval^2 there will be a level2-digest, and so on. - pub digest_levels: u32, + /// Interval (in blocks) at which level1-digests are created. Digests are not + /// created when this is less or equal to 1. + pub digest_interval: u64, + /// Maximal number of digest levels in hierarchy. 0 means that digests are not + /// created at all (even level1 digests). 1 means only level1-digests are created. + /// 2 means that every digest_interval^2 there will be a level2-digest, and so on. + pub digest_levels: u32, } impl ChangesTrieConfiguration { - /// Is digest build enabled? - pub fn is_digest_build_enabled(&self) -> bool { - self.digest_interval > 1 && self.digest_levels > 0 - } - - /// Do we need to build digest at given block? - pub fn is_digest_build_required_at_block(&self, block: u64) -> bool { - block != 0 - && self.is_digest_build_enabled() - && block % self.digest_interval == 0 - } - - /// Returns max digest interval. One if digests are not created at all. - /// Returns ::std::u64::MAX instead of panic in the case of overflow. - pub fn max_digest_interval(&self) -> u64 { - if !self.is_digest_build_enabled() { - return 1; - } - - // FIXME: use saturating_pow once stabilized - https://github.com/rust-lang/rust/issues/48320 - let mut max_digest_interval = self.digest_interval; - for _ in 1..self.digest_levels { - max_digest_interval = match max_digest_interval.checked_mul(self.digest_interval) { - Some(max_digest_interval) => max_digest_interval, - None => return u64::max_value(), - } - } - - max_digest_interval - } - - /// Returns Some if digest must be built at given block number. - /// The tuple is: - /// ( - /// digest level - /// digest interval (in blocks) - /// step between blocks we're interested in when digest is built - /// ) - pub fn digest_level_at_block(&self, block: u64) -> Option<(u32, u64, u64)> { - if !self.is_digest_build_required_at_block(block) { - return None; - } - - let mut digest_interval = self.digest_interval; - let mut current_level = 1u32; - let mut digest_step = 1u64; - while current_level < self.digest_levels { - let new_digest_interval = match digest_interval.checked_mul(self.digest_interval) { - Some(new_digest_interval) if block % new_digest_interval == 0 => new_digest_interval, - _ => break, - }; - - digest_step = digest_interval; - digest_interval = new_digest_interval; - current_level = current_level + 1; - } - - Some(( - current_level, - digest_interval, - digest_step, - )) - } + /// Is digest build enabled? + pub fn is_digest_build_enabled(&self) -> bool { + self.digest_interval > 1 && self.digest_levels > 0 + } + + /// Do we need to build digest at given block? + pub fn is_digest_build_required_at_block(&self, block: u64) -> bool { + block != 0 && self.is_digest_build_enabled() && block % self.digest_interval == 0 + } + + /// Returns max digest interval. One if digests are not created at all. + /// Returns ::std::u64::MAX instead of panic in the case of overflow. + pub fn max_digest_interval(&self) -> u64 { + if !self.is_digest_build_enabled() { + return 1; + } + + // FIXME: use saturating_pow once stabilized - https://github.com/rust-lang/rust/issues/48320 + let mut max_digest_interval = self.digest_interval; + for _ in 1..self.digest_levels { + max_digest_interval = match max_digest_interval.checked_mul(self.digest_interval) { + Some(max_digest_interval) => max_digest_interval, + None => return u64::max_value(), + } + } + + max_digest_interval + } + + /// Returns Some if digest must be built at given block number. + /// The tuple is: + /// ( + /// digest level + /// digest interval (in blocks) + /// step between blocks we're interested in when digest is built + /// ) + pub fn digest_level_at_block(&self, block: u64) -> Option<(u32, u64, u64)> { + if !self.is_digest_build_required_at_block(block) { + return None; + } + + let mut digest_interval = self.digest_interval; + let mut current_level = 1u32; + let mut digest_step = 1u64; + while current_level < self.digest_levels { + let new_digest_interval = match digest_interval.checked_mul(self.digest_interval) { + Some(new_digest_interval) if block % new_digest_interval == 0 => { + new_digest_interval + } + _ => break, + }; + + digest_step = digest_interval; + digest_interval = new_digest_interval; + current_level = current_level + 1; + } + + Some((current_level, digest_interval, digest_step)) + } } #[cfg(test)] mod tests { - use super::ChangesTrieConfiguration; - - fn config(interval: u64, levels: u32) -> ChangesTrieConfiguration { - ChangesTrieConfiguration { - digest_interval: interval, - digest_levels: levels, - } - } - - #[test] - fn is_digest_build_enabled_works() { - assert!(!config(0, 100).is_digest_build_enabled()); - assert!(!config(1, 100).is_digest_build_enabled()); - assert!(config(2, 100).is_digest_build_enabled()); - assert!(!config(100, 0).is_digest_build_enabled()); - assert!(config(100, 1).is_digest_build_enabled()); - } - - #[test] - fn is_digest_build_required_at_block_works() { - assert!(!config(8, 4).is_digest_build_required_at_block(0)); - assert!(!config(8, 4).is_digest_build_required_at_block(1)); - assert!(!config(8, 4).is_digest_build_required_at_block(2)); - assert!(!config(8, 4).is_digest_build_required_at_block(4)); - assert!(config(8, 4).is_digest_build_required_at_block(8)); - assert!(!config(8, 4).is_digest_build_required_at_block(9)); - assert!(config(8, 4).is_digest_build_required_at_block(64)); - assert!(config(8, 4).is_digest_build_required_at_block(64)); - assert!(config(8, 4).is_digest_build_required_at_block(512)); - assert!(config(8, 4).is_digest_build_required_at_block(4096)); - assert!(!config(8, 4).is_digest_build_required_at_block(4103)); - assert!(config(8, 4).is_digest_build_required_at_block(4104)); - assert!(!config(8, 4).is_digest_build_required_at_block(4108)); - } - - #[test] - fn digest_level_at_block_works() { - assert_eq!(config(8, 4).digest_level_at_block(0), None); - assert_eq!(config(8, 4).digest_level_at_block(7), None); - assert_eq!(config(8, 4).digest_level_at_block(63), None); - assert_eq!(config(8, 4).digest_level_at_block(8), Some((1, 8, 1))); - assert_eq!(config(8, 4).digest_level_at_block(64), Some((2, 64, 8))); - assert_eq!(config(8, 4).digest_level_at_block(512), Some((3, 512, 64))); - assert_eq!(config(8, 4).digest_level_at_block(4096), Some((4, 4096, 512))); - assert_eq!(config(8, 4).digest_level_at_block(4112), Some((1, 8, 1))); - } - - #[test] - fn max_digest_interval_works() { - assert_eq!(config(0, 0).max_digest_interval(), 1); - assert_eq!(config(2, 2).max_digest_interval(), 4); - assert_eq!(config(8, 4).max_digest_interval(), 4096); - assert_eq!(config(::std::u64::MAX, 1024).max_digest_interval(), ::std::u64::MAX); - } + use super::ChangesTrieConfiguration; + + fn config(interval: u64, levels: u32) -> ChangesTrieConfiguration { + ChangesTrieConfiguration { + digest_interval: interval, + digest_levels: levels, + } + } + + #[test] + fn is_digest_build_enabled_works() { + assert!(!config(0, 100).is_digest_build_enabled()); + assert!(!config(1, 100).is_digest_build_enabled()); + assert!(config(2, 100).is_digest_build_enabled()); + assert!(!config(100, 0).is_digest_build_enabled()); + assert!(config(100, 1).is_digest_build_enabled()); + } + + #[test] + fn is_digest_build_required_at_block_works() { + assert!(!config(8, 4).is_digest_build_required_at_block(0)); + assert!(!config(8, 4).is_digest_build_required_at_block(1)); + assert!(!config(8, 4).is_digest_build_required_at_block(2)); + assert!(!config(8, 4).is_digest_build_required_at_block(4)); + assert!(config(8, 4).is_digest_build_required_at_block(8)); + assert!(!config(8, 4).is_digest_build_required_at_block(9)); + assert!(config(8, 4).is_digest_build_required_at_block(64)); + assert!(config(8, 4).is_digest_build_required_at_block(64)); + assert!(config(8, 4).is_digest_build_required_at_block(512)); + assert!(config(8, 4).is_digest_build_required_at_block(4096)); + assert!(!config(8, 4).is_digest_build_required_at_block(4103)); + assert!(config(8, 4).is_digest_build_required_at_block(4104)); + assert!(!config(8, 4).is_digest_build_required_at_block(4108)); + } + + #[test] + fn digest_level_at_block_works() { + assert_eq!(config(8, 4).digest_level_at_block(0), None); + assert_eq!(config(8, 4).digest_level_at_block(7), None); + assert_eq!(config(8, 4).digest_level_at_block(63), None); + assert_eq!(config(8, 4).digest_level_at_block(8), Some((1, 8, 1))); + assert_eq!(config(8, 4).digest_level_at_block(64), Some((2, 64, 8))); + assert_eq!(config(8, 4).digest_level_at_block(512), Some((3, 512, 64))); + assert_eq!( + config(8, 4).digest_level_at_block(4096), + Some((4, 4096, 512)) + ); + assert_eq!(config(8, 4).digest_level_at_block(4112), Some((1, 8, 1))); + } + + #[test] + fn max_digest_interval_works() { + assert_eq!(config(0, 0).max_digest_interval(), 1); + assert_eq!(config(2, 2).max_digest_interval(), 4); + assert_eq!(config(8, 4).max_digest_interval(), 4096); + assert_eq!( + config(::std::u64::MAX, 1024).max_digest_interval(), + ::std::u64::MAX + ); + } } diff --git a/core/primitives/src/crypto.rs b/core/primitives/src/crypto.rs index e0ecd4ce42..ddc63201fb 100644 --- a/core/primitives/src/crypto.rs +++ b/core/primitives/src/crypto.rs @@ -19,14 +19,15 @@ // end::description[] #[cfg(feature = "std")] -use parity_codec::{Encode, Decode}; +use base58::{FromBase58, ToBase58}; #[cfg(feature = "std")] -use regex::Regex; +use parity_codec::{Decode, Encode}; #[cfg(feature = "std")] -use base58::{FromBase58, ToBase58}; +use regex::Regex; /// The root phrase for our publicly known keys. -pub const DEV_PHRASE: &str = "bottom drive obey lake curtain smoke basket hold race lonely fit walk"; +pub const DEV_PHRASE: &str = + "bottom drive obey lake curtain smoke basket hold race lonely fit walk"; /// The address of the associated root phrase for our publicly known keys. pub const DEV_ADDRESS: &str = "5DfhGyQdFobKM8NsWvEeAKk5EQQgYe9AydgJ7rMB6E1EqRzV"; @@ -44,40 +45,40 @@ pub const JUNCTION_ID_LEN: usize = 32; /// that data passed in makes sense. Basically, you're not guaranteed to get anything /// sensible out. pub trait UncheckedFrom { - /// Convert from an instance of `T` to Self. This is not guaranteed to be - /// whatever counts as a valid instance of `T` and it's up to the caller to - /// ensure that it makes sense. - fn unchecked_from(t: T) -> Self; + /// Convert from an instance of `T` to Self. This is not guaranteed to be + /// whatever counts as a valid instance of `T` and it's up to the caller to + /// ensure that it makes sense. + fn unchecked_from(t: T) -> Self; } /// The counterpart to `UncheckedFrom`. pub trait UncheckedInto { - /// The counterpart to `unchecked_from`. - fn unchecked_into(self) -> T; + /// The counterpart to `unchecked_from`. + fn unchecked_into(self) -> T; } impl> UncheckedInto for S { - fn unchecked_into(self) -> T { - T::unchecked_from(self) - } + fn unchecked_into(self) -> T { + T::unchecked_from(self) + } } /// An error with the interpretation of a secret. #[derive(Debug, Clone, PartialEq, Eq)] #[cfg(feature = "std")] pub enum SecretStringError { - /// The overall format was invalid (e.g. the seed phrase contained symbols). - InvalidFormat, - /// The seed phrase provided is not a valid BIP39 phrase. - InvalidPhrase, - /// The supplied password was invalid. - InvalidPassword, - /// The seed is invalid (bad content). - InvalidSeed, - /// The seed has an invalid length. - InvalidSeedLength, - /// The derivation path was invalid (e.g. contains soft junctions when they are not supported). - InvalidPath, + /// The overall format was invalid (e.g. the seed phrase contained symbols). + InvalidFormat, + /// The seed phrase provided is not a valid BIP39 phrase. + InvalidPhrase, + /// The supplied password was invalid. + InvalidPassword, + /// The seed is invalid (bad content). + InvalidSeed, + /// The seed has an invalid length. + InvalidSeedLength, + /// The derivation path was invalid (e.g. contains soft junctions when they are not supported). + InvalidPath, } /// A since derivation junction description. It is the single parameter used when creating @@ -86,136 +87,146 @@ pub enum SecretStringError { #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Encode, Decode)] #[cfg(feature = "std")] pub enum DeriveJunction { - /// Soft (vanilla) derivation. Public keys have a correspondent derivation. - Soft([u8; JUNCTION_ID_LEN]), - /// Hard ("hardened") derivation. Public keys do not have a correspondent derivation. - Hard([u8; JUNCTION_ID_LEN]), + /// Soft (vanilla) derivation. Public keys have a correspondent derivation. + Soft([u8; JUNCTION_ID_LEN]), + /// Hard ("hardened") derivation. Public keys do not have a correspondent derivation. + Hard([u8; JUNCTION_ID_LEN]), } #[cfg(feature = "std")] impl DeriveJunction { - /// Consume self to return a soft derive junction with the same chain code. - pub fn soften(self) -> Self { DeriveJunction::Soft(self.unwrap_inner()) } - - /// Consume self to return a hard derive junction with the same chain code. - pub fn harden(self) -> Self { DeriveJunction::Hard(self.unwrap_inner()) } - - /// Create a new soft (vanilla) DeriveJunction from a given, encodable, value. - /// - /// If you need a hard junction, use `hard()`. - pub fn soft(index: T) -> Self { - let mut cc: [u8; JUNCTION_ID_LEN] = Default::default(); - index.using_encoded(|data| if data.len() > JUNCTION_ID_LEN { - let hash_result = blake2_rfc::blake2b::blake2b(JUNCTION_ID_LEN, &[], data); - let hash = hash_result.as_bytes(); - cc.copy_from_slice(hash); - } else { - cc[0..data.len()].copy_from_slice(data); - }); - DeriveJunction::Soft(cc) - } - - /// Create a new hard (hardened) DeriveJunction from a given, encodable, value. - /// - /// If you need a soft junction, use `soft()`. - pub fn hard(index: T) -> Self { - Self::soft(index).harden() - } - - /// Consume self to return the chain code. - pub fn unwrap_inner(self) -> [u8; JUNCTION_ID_LEN] { - match self { - DeriveJunction::Hard(c) | DeriveJunction::Soft(c) => c, - } - } - - /// Get a reference to the inner junction id. - pub fn inner(&self) -> &[u8; JUNCTION_ID_LEN] { - match self { - DeriveJunction::Hard(ref c) | DeriveJunction::Soft(ref c) => c, - } - } - - /// Return `true` if the junction is soft. - pub fn is_soft(&self) -> bool { - match *self { - DeriveJunction::Soft(_) => true, - _ => false, - } - } - - /// Return `true` if the junction is hard. - pub fn is_hard(&self) -> bool { - match *self { - DeriveJunction::Hard(_) => true, - _ => false, - } - } + /// Consume self to return a soft derive junction with the same chain code. + pub fn soften(self) -> Self { + DeriveJunction::Soft(self.unwrap_inner()) + } + + /// Consume self to return a hard derive junction with the same chain code. + pub fn harden(self) -> Self { + DeriveJunction::Hard(self.unwrap_inner()) + } + + /// Create a new soft (vanilla) DeriveJunction from a given, encodable, value. + /// + /// If you need a hard junction, use `hard()`. + pub fn soft(index: T) -> Self { + let mut cc: [u8; JUNCTION_ID_LEN] = Default::default(); + index.using_encoded(|data| { + if data.len() > JUNCTION_ID_LEN { + let hash_result = blake2_rfc::blake2b::blake2b(JUNCTION_ID_LEN, &[], data); + let hash = hash_result.as_bytes(); + cc.copy_from_slice(hash); + } else { + cc[0..data.len()].copy_from_slice(data); + } + }); + DeriveJunction::Soft(cc) + } + + /// Create a new hard (hardened) DeriveJunction from a given, encodable, value. + /// + /// If you need a soft junction, use `soft()`. + pub fn hard(index: T) -> Self { + Self::soft(index).harden() + } + + /// Consume self to return the chain code. + pub fn unwrap_inner(self) -> [u8; JUNCTION_ID_LEN] { + match self { + DeriveJunction::Hard(c) | DeriveJunction::Soft(c) => c, + } + } + + /// Get a reference to the inner junction id. + pub fn inner(&self) -> &[u8; JUNCTION_ID_LEN] { + match self { + DeriveJunction::Hard(ref c) | DeriveJunction::Soft(ref c) => c, + } + } + + /// Return `true` if the junction is soft. + pub fn is_soft(&self) -> bool { + match *self { + DeriveJunction::Soft(_) => true, + _ => false, + } + } + + /// Return `true` if the junction is hard. + pub fn is_hard(&self) -> bool { + match *self { + DeriveJunction::Hard(_) => true, + _ => false, + } + } } #[cfg(feature = "std")] impl> From for DeriveJunction { - fn from(j: T) -> DeriveJunction { - let j = j.as_ref(); - let (code, hard) = if j.starts_with("/") { - (&j[1..], true) - } else { - (j, false) - }; - - let res = if let Ok(n) = str::parse::(code) { - // number - DeriveJunction::soft(n) - } else { - // something else - DeriveJunction::soft(code) - }; - - if hard { - res.harden() - } else { - res - } - } + fn from(j: T) -> DeriveJunction { + let j = j.as_ref(); + let (code, hard) = if j.starts_with("/") { + (&j[1..], true) + } else { + (j, false) + }; + + let res = if let Ok(n) = str::parse::(code) { + // number + DeriveJunction::soft(n) + } else { + // something else + DeriveJunction::soft(code) + }; + + if hard { + res.harden() + } else { + res + } + } } /// An error type for SS58 decoding. #[cfg(feature = "std")] #[derive(Clone, Copy, Eq, PartialEq, Debug)] pub enum PublicError { - /// Bad alphabet. - BadBase58, - /// Bad length. - BadLength, - /// Unknown version. - UnknownVersion, - /// Invalid checksum. - InvalidChecksum, - /// Invalid format. - InvalidFormat, - /// Invalid derivation path. - InvalidPath, + /// Bad alphabet. + BadBase58, + /// Bad length. + BadLength, + /// Unknown version. + UnknownVersion, + /// Invalid checksum. + InvalidChecksum, + /// Invalid format. + InvalidFormat, + /// Invalid derivation path. + InvalidPath, } /// Key that can be encoded to/from SS58. #[cfg(feature = "std")] pub trait Ss58Codec: Sized { - /// Some if the string is a properly encoded SS58Check address. - fn from_ss58check(s: &str) -> Result; - /// Some if the string is a properly encoded SS58Check address, optionally with - /// a derivation path following. - fn from_string(s: &str) -> Result { Self::from_ss58check(s) } - /// Return the ss58-check string for this key. - fn to_ss58check(&self) -> String; + /// Some if the string is a properly encoded SS58Check address. + fn from_ss58check(s: &str) -> Result; + /// Some if the string is a properly encoded SS58Check address, optionally with + /// a derivation path following. + fn from_string(s: &str) -> Result { + Self::from_ss58check(s) + } + /// Return the ss58-check string for this key. + fn to_ss58check(&self) -> String; } #[cfg(feature = "std")] /// Derivable key trait. pub trait Derive: Sized { - /// Derive a child key from a series of given junctions. - /// - /// Will be `None` for public keys if there are any hard junctions in there. - fn derive>(&self, _path: Iter) -> Option { None } + /// Derive a child key from a series of given junctions. + /// + /// Will be `None` for public keys if there are any hard junctions in there. + fn derive>(&self, _path: Iter) -> Option { + None + } } #[cfg(feature = "std")] @@ -223,55 +234,56 @@ const PREFIX: &[u8] = b"SS58PRE"; #[cfg(feature = "std")] fn ss58hash(data: &[u8]) -> blake2_rfc::blake2b::Blake2bResult { - let mut context = blake2_rfc::blake2b::Blake2b::new(64); - context.update(PREFIX); - context.update(data); - context.finalize() + let mut context = blake2_rfc::blake2b::Blake2b::new(64); + context.update(PREFIX); + context.update(data); + context.finalize() } #[cfg(feature = "std")] impl + AsRef<[u8]> + Default + Derive> Ss58Codec for T { - fn from_ss58check(s: &str) -> Result { - let mut res = T::default(); - let len = res.as_mut().len(); - let d = s.from_base58().map_err(|_| PublicError::BadBase58)?; // failure here would be invalid encoding. - if d.len() != len + 3 { - // Invalid length. - return Err(PublicError::BadLength); - } - if d[0] != 42 { - // Invalid version. - return Err(PublicError::UnknownVersion); - } - - if d[len+1..len+3] != ss58hash(&d[0..len+1]).as_bytes()[0..2] { - // Invalid checksum. - return Err(PublicError::InvalidChecksum); - } - res.as_mut().copy_from_slice(&d[1..len+1]); - Ok(res) - } - - fn to_ss58check(&self) -> String { - let mut v = vec![42u8]; - v.extend(self.as_ref()); - let r = ss58hash(&v); - v.extend(&r.as_bytes()[0..2]); - v.to_base58() - } - - fn from_string(s: &str) -> Result { - let re = Regex::new(r"^(?P[\w\d]+)?(?P(//?[^/]+)*)$") - .expect("constructed from known-good static value; qed"); - let cap = re.captures(s).ok_or(PublicError::InvalidFormat)?; - let re_junction = Regex::new(r"/(/?[^/]+)") - .expect("constructed from known-good static value; qed"); - let path = re_junction.captures_iter(&cap["path"]) - .map(|f| DeriveJunction::from(&f[1])); - Self::from_ss58check(cap.name("ss58").map(|r| r.as_str()).unwrap_or(DEV_ADDRESS))? - .derive(path) - .ok_or(PublicError::InvalidPath) - } + fn from_ss58check(s: &str) -> Result { + let mut res = T::default(); + let len = res.as_mut().len(); + let d = s.from_base58().map_err(|_| PublicError::BadBase58)?; // failure here would be invalid encoding. + if d.len() != len + 3 { + // Invalid length. + return Err(PublicError::BadLength); + } + if d[0] != 42 { + // Invalid version. + return Err(PublicError::UnknownVersion); + } + + if d[len + 1..len + 3] != ss58hash(&d[0..len + 1]).as_bytes()[0..2] { + // Invalid checksum. + return Err(PublicError::InvalidChecksum); + } + res.as_mut().copy_from_slice(&d[1..len + 1]); + Ok(res) + } + + fn to_ss58check(&self) -> String { + let mut v = vec![42u8]; + v.extend(self.as_ref()); + let r = ss58hash(&v); + v.extend(&r.as_bytes()[0..2]); + v.to_base58() + } + + fn from_string(s: &str) -> Result { + let re = Regex::new(r"^(?P[\w\d]+)?(?P(//?[^/]+)*)$") + .expect("constructed from known-good static value; qed"); + let cap = re.captures(s).ok_or(PublicError::InvalidFormat)?; + let re_junction = + Regex::new(r"/(/?[^/]+)").expect("constructed from known-good static value; qed"); + let path = re_junction + .captures_iter(&cap["path"]) + .map(|f| DeriveJunction::from(&f[1])); + Self::from_ss58check(cap.name("ss58").map(|r| r.as_str()).unwrap_or(DEV_ADDRESS))? + .derive(path) + .ok_or(PublicError::InvalidPath) + } } /// Trait suitable for typical cryptographic PKI key pair type. @@ -279,231 +291,321 @@ impl + AsRef<[u8]> + Default + Derive> Ss58Codec for T { /// For now it just specifies how to create a key from a phrase and derivation path. #[cfg(feature = "std")] pub trait Pair: Sized { - /// TThe type which is used to encode a public key. - type Public; - - /// The type used to (minimally) encode the data required to securely create - /// a new key pair. - type Seed; - - /// The type used to represent a signature. Can be created from a key pair and a message - /// and verified with the message and a public key. - type Signature; - - /// Error returned from the `derive` function. - type DeriveError; - - /// Generate new secure (random) key pair. - /// - /// This is only for ephemeral keys really, since you won't have access to the secret key - /// for storage. If you want a persistent key pair, use `generate_with_phrase` instead. - fn generate() -> Self; - - /// Generate new secure (random) key pair and provide the recovery phrase. - /// - /// You can recover the same key later with `from_phrase`. - /// - /// This is generally slower than `generate()`, so prefer that unless you need to persist - /// the key from the current session. - fn generate_with_phrase(password: Option<&str>) -> (Self, String); - - /// Returns the KeyPair from the English BIP39 seed `phrase`, or `None` if it's invalid. - fn from_phrase(phrase: &str, password: Option<&str>) -> Result; - - /// Derive a child key from a series of given junctions. - fn derive>(&self, path: Iter) -> Result; - - /// Generate new key pair from the provided `seed`. - /// - /// @WARNING: THIS WILL ONLY BE SECURE IF THE `seed` IS SECURE. If it can be guessed - /// by an attacker then they can also derive your key. - fn from_seed(seed: Self::Seed) -> Self; - - /// Make a new key pair from secret seed material. The slice must be the correct size or - /// it will return `None`. - /// - /// @WARNING: THIS WILL ONLY BE SECURE IF THE `seed` IS SECURE. If it can be guessed - /// by an attacker then they can also derive your key. - fn from_seed_slice(seed: &[u8]) -> Result; - - /// Construct a key from a phrase, password and path. - fn from_standard_components< - I: Iterator - >(phrase: &str, password: Option<&str>, path: I) -> Result; - - /// Sign a message. - fn sign(&self, message: &[u8]) -> Self::Signature; - - /// Verify a signature on a message. Returns true if the signature is good. - fn verify, M: AsRef<[u8]>>(sig: &Self::Signature, message: M, pubkey: P) -> bool; - - /// Verify a signature on a message. Returns true if the signature is good. - fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool; - - /// Get the public key. - fn public(&self) -> Self::Public; - - /// Interprets the string `s` in order to generate a key Pair. - /// - /// This takes a helper function to do the key generation from a phrase, password and - /// junction iterator. - /// - /// - If `s` is a possibly `0x` prefixed 64-digit hex string, then it will be interpreted - /// directly as a `MiniSecretKey` (aka "seed" in `subkey`). - /// - If `s` is a valid BIP-39 key phrase of 12, 15, 18, 21 or 24 words, then the key will - /// be derived from it. In this case: - /// - the phrase may be followed by one or more items delimited by `/` characters. - /// - the path may be followed by `///`, in which case everything after the `///` is treated - /// as a password. - /// - If `s` begins with a `/` character it is prefixed with the Substrate public `DEV_PHRASE` and - /// interpreted as above. - /// - /// In this case they are interpreted as HDKD junctions; purely numeric items are interpreted as - /// integers, non-numeric items as strings. Junctions prefixed with `/` are interpreted as soft - /// junctions, and with `//` as hard junctions. - /// - /// There is no correspondence mapping between SURI strings and the keys they represent. - /// Two different non-identical strings can actually lead to the same secret being derived. - /// Notably, integer junction indices may be legally prefixed with arbitrary number of zeros. - /// Similarly an empty password (ending the SURI with `///`) is perfectly valid and will generally - /// be equivalent to no password at all. - /// - /// `None` is returned if no matches are found. - fn from_string(s: &str, password_override: Option<&str>) -> Result { - let hex_seed = if s.starts_with("0x") { - &s[2..] - } else { - s - }; - - if let Ok(d) = hex::decode(hex_seed) { - if let Ok(r) = Self::from_seed_slice(&d) { - return Ok(r) - } - } - - let re = Regex::new(r"^(?P\w+( \w+)*)?(?P(//?[^/]+)*)(///(?P.*))?$") - .expect("constructed from known-good static value; qed"); - let cap = re.captures(s).ok_or(SecretStringError::InvalidFormat)?; - let re_junction = Regex::new(r"/(/?[^/]+)") - .expect("constructed from known-good static value; qed"); - let path = re_junction.captures_iter(&cap["path"]) - .map(|f| DeriveJunction::from(&f[1])); - Self::from_standard_components( - cap.name("phrase").map(|r| r.as_str()).unwrap_or(DEV_PHRASE), - password_override.or_else(|| cap.name("password").map(|m| m.as_str())), - path, - ) - } + /// TThe type which is used to encode a public key. + type Public; + + /// The type used to (minimally) encode the data required to securely create + /// a new key pair. + type Seed; + + /// The type used to represent a signature. Can be created from a key pair and a message + /// and verified with the message and a public key. + type Signature; + + /// Error returned from the `derive` function. + type DeriveError; + + /// Generate new secure (random) key pair. + /// + /// This is only for ephemeral keys really, since you won't have access to the secret key + /// for storage. If you want a persistent key pair, use `generate_with_phrase` instead. + fn generate() -> Self; + + /// Generate new secure (random) key pair and provide the recovery phrase. + /// + /// You can recover the same key later with `from_phrase`. + /// + /// This is generally slower than `generate()`, so prefer that unless you need to persist + /// the key from the current session. + fn generate_with_phrase(password: Option<&str>) -> (Self, String); + + /// Returns the KeyPair from the English BIP39 seed `phrase`, or `None` if it's invalid. + fn from_phrase(phrase: &str, password: Option<&str>) -> Result; + + /// Derive a child key from a series of given junctions. + fn derive>( + &self, + path: Iter, + ) -> Result; + + /// Generate new key pair from the provided `seed`. + /// + /// @WARNING: THIS WILL ONLY BE SECURE IF THE `seed` IS SECURE. If it can be guessed + /// by an attacker then they can also derive your key. + fn from_seed(seed: Self::Seed) -> Self; + + /// Make a new key pair from secret seed material. The slice must be the correct size or + /// it will return `None`. + /// + /// @WARNING: THIS WILL ONLY BE SECURE IF THE `seed` IS SECURE. If it can be guessed + /// by an attacker then they can also derive your key. + fn from_seed_slice(seed: &[u8]) -> Result; + + /// Construct a key from a phrase, password and path. + fn from_standard_components>( + phrase: &str, + password: Option<&str>, + path: I, + ) -> Result; + + /// Sign a message. + fn sign(&self, message: &[u8]) -> Self::Signature; + + /// Verify a signature on a message. Returns true if the signature is good. + fn verify, M: AsRef<[u8]>>( + sig: &Self::Signature, + message: M, + pubkey: P, + ) -> bool; + + /// Verify a signature on a message. Returns true if the signature is good. + fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool; + + /// Get the public key. + fn public(&self) -> Self::Public; + + /// Interprets the string `s` in order to generate a key Pair. + /// + /// This takes a helper function to do the key generation from a phrase, password and + /// junction iterator. + /// + /// - If `s` is a possibly `0x` prefixed 64-digit hex string, then it will be interpreted + /// directly as a `MiniSecretKey` (aka "seed" in `subkey`). + /// - If `s` is a valid BIP-39 key phrase of 12, 15, 18, 21 or 24 words, then the key will + /// be derived from it. In this case: + /// - the phrase may be followed by one or more items delimited by `/` characters. + /// - the path may be followed by `///`, in which case everything after the `///` is treated + /// as a password. + /// - If `s` begins with a `/` character it is prefixed with the Substrate public `DEV_PHRASE` and + /// interpreted as above. + /// + /// In this case they are interpreted as HDKD junctions; purely numeric items are interpreted as + /// integers, non-numeric items as strings. Junctions prefixed with `/` are interpreted as soft + /// junctions, and with `//` as hard junctions. + /// + /// There is no correspondence mapping between SURI strings and the keys they represent. + /// Two different non-identical strings can actually lead to the same secret being derived. + /// Notably, integer junction indices may be legally prefixed with arbitrary number of zeros. + /// Similarly an empty password (ending the SURI with `///`) is perfectly valid and will generally + /// be equivalent to no password at all. + /// + /// `None` is returned if no matches are found. + fn from_string(s: &str, password_override: Option<&str>) -> Result { + let hex_seed = if s.starts_with("0x") { &s[2..] } else { s }; + + if let Ok(d) = hex::decode(hex_seed) { + if let Ok(r) = Self::from_seed_slice(&d) { + return Ok(r); + } + } + + let re = + Regex::new(r"^(?P\w+( \w+)*)?(?P(//?[^/]+)*)(///(?P.*))?$") + .expect("constructed from known-good static value; qed"); + let cap = re.captures(s).ok_or(SecretStringError::InvalidFormat)?; + let re_junction = + Regex::new(r"/(/?[^/]+)").expect("constructed from known-good static value; qed"); + let path = re_junction + .captures_iter(&cap["path"]) + .map(|f| DeriveJunction::from(&f[1])); + Self::from_standard_components( + cap.name("phrase").map(|r| r.as_str()).unwrap_or(DEV_PHRASE), + password_override.or_else(|| cap.name("password").map(|m| m.as_str())), + path, + ) + } } #[cfg(test)] mod tests { - use crate::DeriveJunction; - use hex_literal::{hex, hex_impl}; - use super::*; - - #[derive(Eq, PartialEq, Debug)] - enum TestPair { - Generated, - GeneratedWithPhrase, - GeneratedFromPhrase{phrase: String, password: Option}, - Standard{phrase: String, password: Option, path: Vec}, - Seed(Vec), - } - - impl Pair for TestPair { - type Public = (); - type Seed = (); - type Signature = (); - type DeriveError = (); - - fn generate() -> Self { TestPair::Generated } - fn generate_with_phrase(_password: Option<&str>) -> (Self, String) { (TestPair::GeneratedWithPhrase, "".into()) } - fn from_phrase(phrase: &str, password: Option<&str>) -> Result { - Ok(TestPair::GeneratedFromPhrase{ phrase: phrase.to_owned(), password: password.map(Into::into) }) - } - fn derive>(&self, _path: Iter) -> Result { - Err(()) - } - fn from_seed(_seed: ::Seed) -> Self { TestPair::Seed(vec![]) } - fn sign(&self, _message: &[u8]) -> Self::Signature { () } - fn verify, M: AsRef<[u8]>>(_sig: &Self::Signature, _message: M, _pubkey: P) -> bool { true } - fn verify_weak, M: AsRef<[u8]>>(_sig: &[u8], _message: M, _pubkey: P) -> bool { true } - fn public(&self) -> Self::Public { () } - fn from_standard_components>(phrase: &str, password: Option<&str>, path: I) -> Result { - Ok(TestPair::Standard { phrase: phrase.to_owned(), password: password.map(ToOwned::to_owned), path: path.collect() }) - } - fn from_seed_slice(seed: &[u8]) -> Result { - Ok(TestPair::Seed(seed.to_owned())) - } - } - - #[test] - fn interpret_std_seed_should_work() { - assert_eq!( - TestPair::from_string("0x0123456789abcdef", None), - Ok(TestPair::Seed(hex!["0123456789abcdef"][..].to_owned())) - ); - assert_eq!( - TestPair::from_string("0123456789abcdef", None), - Ok(TestPair::Seed(hex!["0123456789abcdef"][..].to_owned())) - ); - } - - #[test] - fn password_override_should_work() { - assert_eq!( - TestPair::from_string("hello world///password", None), - TestPair::from_string("hello world", Some("password")), - ); - assert_eq!( - TestPair::from_string("hello world///password", None), - TestPair::from_string("hello world///other password", Some("password")), - ); - } - - #[test] - fn interpret_std_secret_string_should_work() { - assert_eq!( - TestPair::from_string("hello world", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![]}) - ); - assert_eq!( - TestPair::from_string("hello world/1", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::soft(1)]}) - ); - assert_eq!( - TestPair::from_string("hello world/DOT", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::soft("DOT")]}) - ); - assert_eq!( - TestPair::from_string("hello world//1", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::hard(1)]}) - ); - assert_eq!( - TestPair::from_string("hello world//DOT", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::hard("DOT")]}) - ); - assert_eq!( - TestPair::from_string("hello world//1/DOT", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::hard(1), DeriveJunction::soft("DOT")]}) - ); - assert_eq!( - TestPair::from_string("hello world//DOT/1", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::hard("DOT"), DeriveJunction::soft(1)]}) - ); - assert_eq!( - TestPair::from_string("hello world///password", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: Some("password".to_owned()), path: vec![]}) - ); - assert_eq!( - TestPair::from_string("hello world//1/DOT///password", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: Some("password".to_owned()), path: vec![DeriveJunction::hard(1), DeriveJunction::soft("DOT")]}) - ); - assert_eq!( - TestPair::from_string("hello world/1//DOT///password", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: Some("password".to_owned()), path: vec![DeriveJunction::soft(1), DeriveJunction::hard("DOT")]}) - ); - } + use super::*; + use crate::DeriveJunction; + use hex_literal::{hex, hex_impl}; + + #[derive(Eq, PartialEq, Debug)] + enum TestPair { + Generated, + GeneratedWithPhrase, + GeneratedFromPhrase { + phrase: String, + password: Option, + }, + Standard { + phrase: String, + password: Option, + path: Vec, + }, + Seed(Vec), + } + + impl Pair for TestPair { + type Public = (); + type Seed = (); + type Signature = (); + type DeriveError = (); + + fn generate() -> Self { + TestPair::Generated + } + fn generate_with_phrase(_password: Option<&str>) -> (Self, String) { + (TestPair::GeneratedWithPhrase, "".into()) + } + fn from_phrase(phrase: &str, password: Option<&str>) -> Result { + Ok(TestPair::GeneratedFromPhrase { + phrase: phrase.to_owned(), + password: password.map(Into::into), + }) + } + fn derive>( + &self, + _path: Iter, + ) -> Result { + Err(()) + } + fn from_seed(_seed: ::Seed) -> Self { + TestPair::Seed(vec![]) + } + fn sign(&self, _message: &[u8]) -> Self::Signature { + () + } + fn verify, M: AsRef<[u8]>>( + _sig: &Self::Signature, + _message: M, + _pubkey: P, + ) -> bool { + true + } + fn verify_weak, M: AsRef<[u8]>>( + _sig: &[u8], + _message: M, + _pubkey: P, + ) -> bool { + true + } + fn public(&self) -> Self::Public { + () + } + fn from_standard_components>( + phrase: &str, + password: Option<&str>, + path: I, + ) -> Result { + Ok(TestPair::Standard { + phrase: phrase.to_owned(), + password: password.map(ToOwned::to_owned), + path: path.collect(), + }) + } + fn from_seed_slice(seed: &[u8]) -> Result { + Ok(TestPair::Seed(seed.to_owned())) + } + } + + #[test] + fn interpret_std_seed_should_work() { + assert_eq!( + TestPair::from_string("0x0123456789abcdef", None), + Ok(TestPair::Seed(hex!["0123456789abcdef"][..].to_owned())) + ); + assert_eq!( + TestPair::from_string("0123456789abcdef", None), + Ok(TestPair::Seed(hex!["0123456789abcdef"][..].to_owned())) + ); + } + + #[test] + fn password_override_should_work() { + assert_eq!( + TestPair::from_string("hello world///password", None), + TestPair::from_string("hello world", Some("password")), + ); + assert_eq!( + TestPair::from_string("hello world///password", None), + TestPair::from_string("hello world///other password", Some("password")), + ); + } + + #[test] + fn interpret_std_secret_string_should_work() { + assert_eq!( + TestPair::from_string("hello world", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![] + }) + ); + assert_eq!( + TestPair::from_string("hello world/1", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::soft(1)] + }) + ); + assert_eq!( + TestPair::from_string("hello world/DOT", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::soft("DOT")] + }) + ); + assert_eq!( + TestPair::from_string("hello world//1", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::hard(1)] + }) + ); + assert_eq!( + TestPair::from_string("hello world//DOT", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::hard("DOT")] + }) + ); + assert_eq!( + TestPair::from_string("hello world//1/DOT", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::hard(1), DeriveJunction::soft("DOT")] + }) + ); + assert_eq!( + TestPair::from_string("hello world//DOT/1", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::hard("DOT"), DeriveJunction::soft(1)] + }) + ); + assert_eq!( + TestPair::from_string("hello world///password", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: Some("password".to_owned()), + path: vec![] + }) + ); + assert_eq!( + TestPair::from_string("hello world//1/DOT///password", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: Some("password".to_owned()), + path: vec![DeriveJunction::hard(1), DeriveJunction::soft("DOT")] + }) + ); + assert_eq!( + TestPair::from_string("hello world/1//DOT///password", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: Some("password".to_owned()), + path: vec![DeriveJunction::soft(1), DeriveJunction::hard("DOT")] + }) + ); + } } diff --git a/core/primitives/src/ed25519.rs b/core/primitives/src/ed25519.rs index 937cc19a89..b0063330af 100644 --- a/core/primitives/src/ed25519.rs +++ b/core/primitives/src/ed25519.rs @@ -18,27 +18,30 @@ //! Simple Ed25519 API. // end::description[] - use crate::{hash::H256, hash::H512}; -use parity_codec::{Encode, Decode}; +use parity_codec::{Decode, Encode}; +use crate::crypto::UncheckedFrom; #[cfg(feature = "std")] -use untrusted; +use crate::crypto::{Derive, DeriveJunction, Pair as TraitPair, SecretStringError}; #[cfg(feature = "std")] -use blake2_rfc; +use base58::{FromBase58, ToBase58}; #[cfg(feature = "std")] -use ring::{signature, signature::KeyPair, rand::{SecureRandom, SystemRandom}}; +use bip39::{Language, Mnemonic, MnemonicType}; #[cfg(feature = "std")] -use base58::{ToBase58, FromBase58}; +use blake2_rfc; #[cfg(feature = "std")] -use substrate_bip39::seed_from_entropy; +use ring::{ + rand::{SecureRandom, SystemRandom}, + signature, + signature::KeyPair, +}; #[cfg(feature = "std")] -use bip39::{Mnemonic, Language, MnemonicType}; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "std")] -use crate::crypto::{Pair as TraitPair, DeriveJunction, SecretStringError, Derive}; +use substrate_bip39::seed_from_entropy; #[cfg(feature = "std")] -use serde::{de, Serializer, Serialize, Deserializer, Deserialize}; -use crate::crypto::UncheckedFrom; +use untrusted; /// A secret seed. It's not called a "secret key" because ring doesn't expose the secret keys /// of the key pair (yeah, dumb); as such we're forced to remember the seed manually if we @@ -56,101 +59,112 @@ pub struct Pair(signature::Ed25519KeyPair, Seed); #[cfg(feature = "std")] impl Clone for Pair { - fn clone(&self) -> Self { - Pair::from_seed(self.1.clone()) - } + fn clone(&self) -> Self { + Pair::from_seed(self.1.clone()) + } } impl AsRef<[u8; 32]> for Public { - fn as_ref(&self) -> &[u8; 32] { - &self.0 - } + fn as_ref(&self) -> &[u8; 32] { + &self.0 + } } impl AsRef<[u8]> for Public { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } + fn as_ref(&self) -> &[u8] { + &self.0[..] + } } impl AsMut<[u8]> for Public { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } + fn as_mut(&mut self) -> &mut [u8] { + &mut self.0[..] + } } impl From for [u8; 32] { - fn from(x: Public) -> Self { - x.0 - } + fn from(x: Public) -> Self { + x.0 + } } #[cfg(feature = "std")] impl From for Public { - fn from(x: Pair) -> Self { - x.public() - } + fn from(x: Pair) -> Self { + x.public() + } } impl AsRef for Public { - fn as_ref(&self) -> &Public { - &self - } + fn as_ref(&self) -> &Public { + &self + } } impl From for H256 { - fn from(x: Public) -> Self { - x.0.into() - } + fn from(x: Public) -> Self { + x.0.into() + } } impl UncheckedFrom<[u8; 32]> for Public { - fn unchecked_from(x: [u8; 32]) -> Self { - Public::from_raw(x) - } + fn unchecked_from(x: [u8; 32]) -> Self { + Public::from_raw(x) + } } impl UncheckedFrom for Public { - fn unchecked_from(x: H256) -> Self { - Public::from_h256(x) - } + fn unchecked_from(x: H256) -> Self { + Public::from_h256(x) + } } #[cfg(feature = "std")] impl ::std::fmt::Display for Public { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - write!(f, "{}", self.to_ss58check()) - } + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + write!(f, "{}", self.to_ss58check()) + } } #[cfg(feature = "std")] impl ::std::fmt::Debug for Public { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.0), &s[0..8]) - } + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + let s = self.to_ss58check(); + write!( + f, + "{} ({}...)", + crate::hexdisplay::HexDisplay::from(&self.0), + &s[0..8] + ) + } } #[cfg(feature = "std")] impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result where S: Serializer { - serializer.serialize_str(&self.to_ss58check()) - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&self.to_ss58check()) + } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Public::from_ss58check(&String::deserialize(deserializer)?) + .map_err(|e| de::Error::custom(format!("{:?}", e))) + } } #[cfg(feature = "std")] impl ::std::hash::Hash for Public { - fn hash(&self, state: &mut H) { - self.0.hash(state); - } + fn hash(&self, state: &mut H) { + self.0.hash(state); + } } /// A signature (a 512-bit value). @@ -158,167 +172,167 @@ impl ::std::hash::Hash for Public { pub struct Signature(pub [u8; 64]); impl Clone for Signature { - fn clone(&self) -> Self { - let mut r = [0u8; 64]; - r.copy_from_slice(&self.0[..]); - Signature(r) - } + fn clone(&self) -> Self { + let mut r = [0u8; 64]; + r.copy_from_slice(&self.0[..]); + Signature(r) + } } impl Default for Signature { - fn default() -> Self { - Signature([0u8; 64]) - } + fn default() -> Self { + Signature([0u8; 64]) + } } impl PartialEq for Signature { - fn eq(&self, b: &Self) -> bool { - &self.0[..] == &b.0[..] - } + fn eq(&self, b: &Self) -> bool { + &self.0[..] == &b.0[..] + } } impl Eq for Signature {} impl From for H512 { - fn from(v: Signature) -> H512 { - H512::from(v.0) - } + fn from(v: Signature) -> H512 { + H512::from(v.0) + } } impl From for [u8; 64] { - fn from(v: Signature) -> [u8; 64] { - v.0 - } + fn from(v: Signature) -> [u8; 64] { + v.0 + } } impl AsRef<[u8; 64]> for Signature { - fn as_ref(&self) -> &[u8; 64] { - &self.0 - } + fn as_ref(&self) -> &[u8; 64] { + &self.0 + } } impl AsRef<[u8]> for Signature { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } + fn as_ref(&self) -> &[u8] { + &self.0[..] + } } impl AsMut<[u8]> for Signature { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } + fn as_mut(&mut self) -> &mut [u8] { + &mut self.0[..] + } } #[cfg(feature = "std")] impl ::std::fmt::Debug for Signature { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) + } } #[cfg(feature = "std")] impl ::std::hash::Hash for Signature { - fn hash(&self, state: &mut H) { - ::std::hash::Hash::hash(&self.0[..], state); - } + fn hash(&self, state: &mut H) { + ::std::hash::Hash::hash(&self.0[..], state); + } } impl Signature { - /// A new instance from the given 64-byte `data`. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use it if - /// you are certain that the array actually is a signature. GIGO! - pub fn from_raw(data: [u8; 64]) -> Signature { - Signature(data) - } - - /// A new instance from the given slice that should be 64 bytes long. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use it if - /// you are certain that the array actually is a signature. GIGO! - pub fn from_slice(data: &[u8]) -> Self { - let mut r = [0u8; 64]; - r.copy_from_slice(data); - Signature(r) - } - - /// A new instance from an H512. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use it if - /// you are certain that the array actually is a signature. GIGO! - pub fn from_h512(v: H512) -> Signature { - Signature(v.into()) - } + /// A new instance from the given 64-byte `data`. + /// + /// NOTE: No checking goes on to ensure this is a real signature. Only use it if + /// you are certain that the array actually is a signature. GIGO! + pub fn from_raw(data: [u8; 64]) -> Signature { + Signature(data) + } + + /// A new instance from the given slice that should be 64 bytes long. + /// + /// NOTE: No checking goes on to ensure this is a real signature. Only use it if + /// you are certain that the array actually is a signature. GIGO! + pub fn from_slice(data: &[u8]) -> Self { + let mut r = [0u8; 64]; + r.copy_from_slice(data); + Signature(r) + } + + /// A new instance from an H512. + /// + /// NOTE: No checking goes on to ensure this is a real signature. Only use it if + /// you are certain that the array actually is a signature. GIGO! + pub fn from_h512(v: H512) -> Signature { + Signature(v.into()) + } } /// A localized signature also contains sender information. #[cfg(feature = "std")] #[derive(PartialEq, Eq, Clone, Debug, Encode, Decode)] pub struct LocalizedSignature { - /// The signer of the signature. - pub signer: Public, - /// The signature itself. - pub signature: Signature, + /// The signer of the signature. + pub signer: Public, + /// The signature itself. + pub signature: Signature, } /// An error type for SS58 decoding. #[cfg(feature = "std")] #[derive(Clone, Copy, Eq, PartialEq, Debug)] pub enum PublicError { - /// Bad alphabet. - BadBase58, - /// Bad length. - BadLength, - /// Unknown version. - UnknownVersion, - /// Invalid checksum. - InvalidChecksum, + /// Bad alphabet. + BadBase58, + /// Bad length. + BadLength, + /// Unknown version. + UnknownVersion, + /// Invalid checksum. + InvalidChecksum, } impl Public { - /// A new instance from the given 32-byte `data`. - /// - /// NOTE: No checking goes on to ensure this is a real public key. Only use it if - /// you are certain that the array actually is a pubkey. GIGO! - pub fn from_raw(data: [u8; 32]) -> Self { - Public(data) - } - - /// A new instance from the given slice that should be 32 bytes long. - /// - /// NOTE: No checking goes on to ensure this is a real public key. Only use it if - /// you are certain that the array actually is a pubkey. GIGO! - pub fn from_slice(data: &[u8]) -> Self { - let mut r = [0u8; 32]; - r.copy_from_slice(data); - Public(r) - } - - /// A new instance from an H256. - /// - /// NOTE: No checking goes on to ensure this is a real public key. Only use it if - /// you are certain that the array actually is a pubkey. GIGO! - pub fn from_h256(x: H256) -> Self { - Public(x.into()) - } - - /// Return a `Vec` filled with raw data. - #[cfg(feature = "std")] - pub fn to_raw_vec(self) -> Vec { - let r: &[u8; 32] = self.as_ref(); - r.to_vec() - } - - /// Return a slice filled with raw data. - pub fn as_slice(&self) -> &[u8] { - let r: &[u8; 32] = self.as_ref(); - &r[..] - } - - /// Return a slice filled with raw data. - pub fn as_array_ref(&self) -> &[u8; 32] { - self.as_ref() - } + /// A new instance from the given 32-byte `data`. + /// + /// NOTE: No checking goes on to ensure this is a real public key. Only use it if + /// you are certain that the array actually is a pubkey. GIGO! + pub fn from_raw(data: [u8; 32]) -> Self { + Public(data) + } + + /// A new instance from the given slice that should be 32 bytes long. + /// + /// NOTE: No checking goes on to ensure this is a real public key. Only use it if + /// you are certain that the array actually is a pubkey. GIGO! + pub fn from_slice(data: &[u8]) -> Self { + let mut r = [0u8; 32]; + r.copy_from_slice(data); + Public(r) + } + + /// A new instance from an H256. + /// + /// NOTE: No checking goes on to ensure this is a real public key. Only use it if + /// you are certain that the array actually is a pubkey. GIGO! + pub fn from_h256(x: H256) -> Self { + Public(x.into()) + } + + /// Return a `Vec` filled with raw data. + #[cfg(feature = "std")] + pub fn to_raw_vec(self) -> Vec { + let r: &[u8; 32] = self.as_ref(); + r.to_vec() + } + + /// Return a slice filled with raw data. + pub fn as_slice(&self) -> &[u8] { + let r: &[u8; 32] = self.as_ref(); + &r[..] + } + + /// Return a slice filled with raw data. + pub fn as_array_ref(&self) -> &[u8; 32] { + self.as_ref() + } } #[cfg(feature = "std")] @@ -326,286 +340,329 @@ impl Derive for Public {} #[cfg(feature = "std")] impl Public { - /// Some if the string is a properly encoded SS58Check address. - pub fn from_ss58check(s: &str) -> Result { - let d = s.from_base58().map_err(|_| PublicError::BadBase58)?; // failure here would be invalid encoding. - if d.len() != 35 { - // Invalid length. - return Err(PublicError::BadLength); - } - if d[0] != 42 { - // Invalid version. - return Err(PublicError::UnknownVersion); - } - if d[33..35] != blake2_rfc::blake2b::blake2b(64, &[], &d[0..33]).as_bytes()[0..2] { - // Invalid checksum. - return Err(PublicError::InvalidChecksum); - } - Ok(Self::from_slice(&d[1..33])) - } - - /// Return the ss58-check string for this key. - pub fn to_ss58check(&self) -> String { - let mut v = vec![42u8]; - v.extend(self.as_slice()); - let r = blake2_rfc::blake2b::blake2b(64, &[], &v); - v.extend(&r.as_bytes()[0..2]); - v.to_base58() - } + /// Some if the string is a properly encoded SS58Check address. + pub fn from_ss58check(s: &str) -> Result { + let d = s.from_base58().map_err(|_| PublicError::BadBase58)?; // failure here would be invalid encoding. + if d.len() != 35 { + // Invalid length. + return Err(PublicError::BadLength); + } + if d[0] != 42 { + // Invalid version. + return Err(PublicError::UnknownVersion); + } + if d[33..35] != blake2_rfc::blake2b::blake2b(64, &[], &d[0..33]).as_bytes()[0..2] { + // Invalid checksum. + return Err(PublicError::InvalidChecksum); + } + Ok(Self::from_slice(&d[1..33])) + } + + /// Return the ss58-check string for this key. + pub fn to_ss58check(&self) -> String { + let mut v = vec![42u8]; + v.extend(self.as_slice()); + let r = blake2_rfc::blake2b::blake2b(64, &[], &v); + v.extend(&r.as_bytes()[0..2]); + v.to_base58() + } } #[cfg(feature = "std")] impl AsRef for Pair { - fn as_ref(&self) -> &Pair { - &self - } + fn as_ref(&self) -> &Pair { + &self + } } /// Derive a single hard junction. #[cfg(feature = "std")] fn derive_hard_junction(secret_seed: &Seed, cc: &[u8; 32]) -> Seed { - ("Ed25519HDKD", secret_seed, cc).using_encoded(|data| { - let mut res = [0u8; 32]; - res.copy_from_slice(blake2_rfc::blake2b::blake2b(32, &[], data).as_bytes()); - res - }) + ("Ed25519HDKD", secret_seed, cc).using_encoded(|data| { + let mut res = [0u8; 32]; + res.copy_from_slice(blake2_rfc::blake2b::blake2b(32, &[], data).as_bytes()); + res + }) } /// An error when deriving a key. #[cfg(feature = "std")] pub enum DeriveError { - /// A soft key was found in the path (and is unsupported). - SoftKeyInPath, + /// A soft key was found in the path (and is unsupported). + SoftKeyInPath, } #[cfg(feature = "std")] impl TraitPair for Pair { - type Public = Public; - type Seed = Seed; - type Signature = Signature; - type DeriveError = DeriveError; - - /// Generate new secure (random) key pair. - /// - /// This is only for ephemeral keys really, since you won't have access to the secret key - /// for storage. If you want a persistent key pair, use `generate_with_phrase` instead. - fn generate() -> Pair { - let mut seed: Seed = Default::default(); - SystemRandom::new().fill(seed.as_mut()).expect("system random source should always work! qed"); - Self::from_seed(seed) - } - - /// Generate new secure (random) key pair and provide the recovery phrase. - /// - /// You can recover the same key later with `from_phrase`. - fn generate_with_phrase(password: Option<&str>) -> (Pair, String) { - let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); - let phrase = mnemonic.phrase(); - ( - Self::from_phrase(phrase, password).expect("All phrases generated by Mnemonic are valid; qed"), - phrase.to_owned(), - ) - } - - /// Generate key pair from given recovery phrase and password. - fn from_phrase(phrase: &str, password: Option<&str>) -> Result { - let big_seed = seed_from_entropy( - Mnemonic::from_phrase(phrase, Language::English) - .map_err(|_| SecretStringError::InvalidPhrase)?.entropy(), - password.unwrap_or(""), - ).map_err(|_| SecretStringError::InvalidSeed)?; - Self::from_seed_slice(&big_seed[0..32]) - } - - /// Make a new key pair from secret seed material. - /// - /// You should never need to use this; generate(), generate_with_phrasee - fn from_seed(seed: Seed) -> Pair { - let key = signature::Ed25519KeyPair::from_seed_unchecked(untrusted::Input::from(&seed[..])) - .expect("seed has valid length; qed"); - Pair(key, seed) - } - - /// Make a new key pair from secret seed material. The slice must be 32 bytes long or it - /// will return `None`. - /// - /// You should never need to use this; generate(), generate_with_phrase - fn from_seed_slice(seed_slice: &[u8]) -> Result { - if seed_slice.len() != 32 { - Err(SecretStringError::InvalidSeedLength) - } else { - let mut seed = [0u8; 32]; - seed.copy_from_slice(&seed_slice); - Ok(Self::from_seed(seed)) - } - } - - /// Derive a child key from a series of given junctions. - fn derive>(&self, path: Iter) -> Result { - let mut acc = self.1.clone(); - for j in path { - match j { - DeriveJunction::Soft(_cc) => return Err(DeriveError::SoftKeyInPath), - DeriveJunction::Hard(cc) => acc = derive_hard_junction(&acc, &cc), - } - } - Ok(Self::from_seed(acc)) - } - - /// Generate a key from the phrase, password and derivation path. - fn from_standard_components>(phrase: &str, password: Option<&str>, path: I) -> Result { - Self::from_phrase(phrase, password)?.derive(path).map_err(|_| SecretStringError::InvalidPath) - } - - /// Get the public key. - fn public(&self) -> Public { - let mut r = [0u8; 32]; - let pk = self.0.public_key().as_ref(); - r.copy_from_slice(pk); - Public(r) - } - - /// Sign a message. - fn sign(&self, message: &[u8]) -> Signature { - let mut r = [0u8; 64]; - r.copy_from_slice(self.0.sign(message).as_ref()); - Signature::from_raw(r) - } - - /// Verify a signature on a message. Returns true if the signature is good. - fn verify, M: AsRef<[u8]>>(sig: &Self::Signature, message: M, pubkey: P) -> bool { - let public_key = untrusted::Input::from(&pubkey.as_ref().0[..]); - let msg = untrusted::Input::from(message.as_ref()); - let sig = untrusted::Input::from(&sig.0[..]); - - match signature::verify(&signature::ED25519, public_key, msg, sig) { - Ok(_) => true, - _ => false, - } - } - - /// Verify a signature on a message. Returns true if the signature is good. - /// - /// This doesn't use the type system to ensure that `sig` and `pubkey` are the correct - /// size. Use it only if you're coming from byte buffers and need the speed. - fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool { - let public_key = untrusted::Input::from(pubkey.as_ref()); - let msg = untrusted::Input::from(message.as_ref()); - let sig = untrusted::Input::from(sig); - - match signature::verify(&signature::ED25519, public_key, msg, sig) { - Ok(_) => true, - _ => false, - } - } + type Public = Public; + type Seed = Seed; + type Signature = Signature; + type DeriveError = DeriveError; + + /// Generate new secure (random) key pair. + /// + /// This is only for ephemeral keys really, since you won't have access to the secret key + /// for storage. If you want a persistent key pair, use `generate_with_phrase` instead. + fn generate() -> Pair { + let mut seed: Seed = Default::default(); + SystemRandom::new() + .fill(seed.as_mut()) + .expect("system random source should always work! qed"); + Self::from_seed(seed) + } + + /// Generate new secure (random) key pair and provide the recovery phrase. + /// + /// You can recover the same key later with `from_phrase`. + fn generate_with_phrase(password: Option<&str>) -> (Pair, String) { + let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); + let phrase = mnemonic.phrase(); + ( + Self::from_phrase(phrase, password) + .expect("All phrases generated by Mnemonic are valid; qed"), + phrase.to_owned(), + ) + } + + /// Generate key pair from given recovery phrase and password. + fn from_phrase(phrase: &str, password: Option<&str>) -> Result { + let big_seed = seed_from_entropy( + Mnemonic::from_phrase(phrase, Language::English) + .map_err(|_| SecretStringError::InvalidPhrase)? + .entropy(), + password.unwrap_or(""), + ) + .map_err(|_| SecretStringError::InvalidSeed)?; + Self::from_seed_slice(&big_seed[0..32]) + } + + /// Make a new key pair from secret seed material. + /// + /// You should never need to use this; generate(), generate_with_phrasee + fn from_seed(seed: Seed) -> Pair { + let key = signature::Ed25519KeyPair::from_seed_unchecked(untrusted::Input::from(&seed[..])) + .expect("seed has valid length; qed"); + Pair(key, seed) + } + + /// Make a new key pair from secret seed material. The slice must be 32 bytes long or it + /// will return `None`. + /// + /// You should never need to use this; generate(), generate_with_phrase + fn from_seed_slice(seed_slice: &[u8]) -> Result { + if seed_slice.len() != 32 { + Err(SecretStringError::InvalidSeedLength) + } else { + let mut seed = [0u8; 32]; + seed.copy_from_slice(&seed_slice); + Ok(Self::from_seed(seed)) + } + } + + /// Derive a child key from a series of given junctions. + fn derive>( + &self, + path: Iter, + ) -> Result { + let mut acc = self.1.clone(); + for j in path { + match j { + DeriveJunction::Soft(_cc) => return Err(DeriveError::SoftKeyInPath), + DeriveJunction::Hard(cc) => acc = derive_hard_junction(&acc, &cc), + } + } + Ok(Self::from_seed(acc)) + } + + /// Generate a key from the phrase, password and derivation path. + fn from_standard_components>( + phrase: &str, + password: Option<&str>, + path: I, + ) -> Result { + Self::from_phrase(phrase, password)? + .derive(path) + .map_err(|_| SecretStringError::InvalidPath) + } + + /// Get the public key. + fn public(&self) -> Public { + let mut r = [0u8; 32]; + let pk = self.0.public_key().as_ref(); + r.copy_from_slice(pk); + Public(r) + } + + /// Sign a message. + fn sign(&self, message: &[u8]) -> Signature { + let mut r = [0u8; 64]; + r.copy_from_slice(self.0.sign(message).as_ref()); + Signature::from_raw(r) + } + + /// Verify a signature on a message. Returns true if the signature is good. + fn verify, M: AsRef<[u8]>>( + sig: &Self::Signature, + message: M, + pubkey: P, + ) -> bool { + let public_key = untrusted::Input::from(&pubkey.as_ref().0[..]); + let msg = untrusted::Input::from(message.as_ref()); + let sig = untrusted::Input::from(&sig.0[..]); + + match signature::verify(&signature::ED25519, public_key, msg, sig) { + Ok(_) => true, + _ => false, + } + } + + /// Verify a signature on a message. Returns true if the signature is good. + /// + /// This doesn't use the type system to ensure that `sig` and `pubkey` are the correct + /// size. Use it only if you're coming from byte buffers and need the speed. + fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool { + let public_key = untrusted::Input::from(pubkey.as_ref()); + let msg = untrusted::Input::from(message.as_ref()); + let sig = untrusted::Input::from(sig); + + match signature::verify(&signature::ED25519, public_key, msg, sig) { + Ok(_) => true, + _ => false, + } + } } #[cfg(feature = "std")] impl Pair { - /// Get the seed for this key. - pub fn seed(&self) -> &Seed { - &self.1 - } - - /// Exactly as `from_string` except that if no matches are found then, the the first 32 - /// characters are taken (padded with spaces as necessary) and used as the MiniSecretKey. - pub fn from_legacy_string(s: &str, password_override: Option<&str>) -> Pair { - Self::from_string(s, password_override).unwrap_or_else(|_| { - let mut padded_seed: Seed = [' ' as u8; 32]; - let len = s.len().min(32); - padded_seed[..len].copy_from_slice(&s.as_bytes()[..len]); - Self::from_seed(padded_seed) - }) - } + /// Get the seed for this key. + pub fn seed(&self) -> &Seed { + &self.1 + } + + /// Exactly as `from_string` except that if no matches are found then, the the first 32 + /// characters are taken (padded with spaces as necessary) and used as the MiniSecretKey. + pub fn from_legacy_string(s: &str, password_override: Option<&str>) -> Pair { + Self::from_string(s, password_override).unwrap_or_else(|_| { + let mut padded_seed: Seed = [' ' as u8; 32]; + let len = s.len().min(32); + padded_seed[..len].copy_from_slice(&s.as_bytes()[..len]); + Self::from_seed(padded_seed) + }) + } } #[cfg(test)] mod test { - use super::*; - use hex_literal::{hex, hex_impl}; - use crate::crypto::DEV_PHRASE; - - #[test] - fn default_phrase_should_be_used() { - assert_eq!( - Pair::from_string("//Alice///password", None).unwrap().public(), - Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")).unwrap().public(), - ); - } - - #[test] - fn test_vector_should_work() { - let pair: Pair = Pair::from_seed(hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60")); - let public = pair.public(); - assert_eq!(public, Public::from_raw(hex!("d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a"))); - let message = b""; - let signature = Signature::from_raw(hex!("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b")); - assert!(&pair.sign(&message[..]) == &signature); - assert!(Pair::verify(&signature, &message[..], &public)); - } - - #[test] - fn test_vector_by_string_should_work() { - let pair: Pair = Pair::from_string("0x9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", None).unwrap(); - let public = pair.public(); - assert_eq!(public, Public::from_raw(hex!("d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a"))); - let message = b""; - let signature = Signature::from_raw(hex!("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b")); - assert!(&pair.sign(&message[..]) == &signature); - assert!(Pair::verify(&signature, &message[..], &public)); - } - - #[test] - fn generated_pair_should_work() { - let pair = Pair::generate(); - let public = pair.public(); - let message = b"Something important"; - let signature = pair.sign(&message[..]); - assert!(Pair::verify(&signature, &message[..], &public)); - } - - #[test] - fn seeded_pair_should_work() { - let pair = Pair::from_seed(*b"12345678901234567890123456789012"); - let public = pair.public(); - assert_eq!(public, Public::from_raw(hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee"))); - let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); - let signature = pair.sign(&message[..]); - println!("Correct signature: {:?}", signature); - assert!(Pair::verify(&signature, &message[..], &public)); - } - - #[test] - fn generate_with_phrase_recovery_possible() { - let (pair1, phrase) = Pair::generate_with_phrase(None); - let pair2 = Pair::from_phrase(&phrase, None).unwrap(); - - assert_eq!(pair1.public(), pair2.public()); - } - - #[test] - fn generate_with_password_phrase_recovery_possible() { - let (pair1, phrase) = Pair::generate_with_phrase(Some("password")); - let pair2 = Pair::from_phrase(&phrase, Some("password")).unwrap(); - - assert_eq!(pair1.public(), pair2.public()); - } - - #[test] - fn password_does_something() { - let (pair1, phrase) = Pair::generate_with_phrase(Some("password")); - let pair2 = Pair::from_phrase(&phrase, None).unwrap(); - - assert_ne!(pair1.public(), pair2.public()); - } - - #[test] - fn ss58check_roundtrip_works() { - let pair = Pair::from_seed(*b"12345678901234567890123456789012"); - let public = pair.public(); - let s = public.to_ss58check(); - println!("Correct: {}", s); - let cmp = Public::from_ss58check(&s).unwrap(); - assert_eq!(cmp, public); - } + use super::*; + use crate::crypto::DEV_PHRASE; + use hex_literal::{hex, hex_impl}; + + #[test] + fn default_phrase_should_be_used() { + assert_eq!( + Pair::from_string("//Alice///password", None) + .unwrap() + .public(), + Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")) + .unwrap() + .public(), + ); + } + + #[test] + fn test_vector_should_work() { + let pair: Pair = Pair::from_seed(hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + )); + let public = pair.public(); + assert_eq!( + public, + Public::from_raw(hex!( + "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a" + )) + ); + let message = b""; + let signature = Signature::from_raw(hex!("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b")); + assert!(&pair.sign(&message[..]) == &signature); + assert!(Pair::verify(&signature, &message[..], &public)); + } + + #[test] + fn test_vector_by_string_should_work() { + let pair: Pair = Pair::from_string( + "0x9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", + None, + ) + .unwrap(); + let public = pair.public(); + assert_eq!( + public, + Public::from_raw(hex!( + "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a" + )) + ); + let message = b""; + let signature = Signature::from_raw(hex!("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b")); + assert!(&pair.sign(&message[..]) == &signature); + assert!(Pair::verify(&signature, &message[..], &public)); + } + + #[test] + fn generated_pair_should_work() { + let pair = Pair::generate(); + let public = pair.public(); + let message = b"Something important"; + let signature = pair.sign(&message[..]); + assert!(Pair::verify(&signature, &message[..], &public)); + } + + #[test] + fn seeded_pair_should_work() { + let pair = Pair::from_seed(*b"12345678901234567890123456789012"); + let public = pair.public(); + assert_eq!( + public, + Public::from_raw(hex!( + "2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee" + )) + ); + let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); + let signature = pair.sign(&message[..]); + println!("Correct signature: {:?}", signature); + assert!(Pair::verify(&signature, &message[..], &public)); + } + + #[test] + fn generate_with_phrase_recovery_possible() { + let (pair1, phrase) = Pair::generate_with_phrase(None); + let pair2 = Pair::from_phrase(&phrase, None).unwrap(); + + assert_eq!(pair1.public(), pair2.public()); + } + + #[test] + fn generate_with_password_phrase_recovery_possible() { + let (pair1, phrase) = Pair::generate_with_phrase(Some("password")); + let pair2 = Pair::from_phrase(&phrase, Some("password")).unwrap(); + + assert_eq!(pair1.public(), pair2.public()); + } + + #[test] + fn password_does_something() { + let (pair1, phrase) = Pair::generate_with_phrase(Some("password")); + let pair2 = Pair::from_phrase(&phrase, None).unwrap(); + + assert_ne!(pair1.public(), pair2.public()); + } + + #[test] + fn ss58check_roundtrip_works() { + let pair = Pair::from_seed(*b"12345678901234567890123456789012"); + let public = pair.public(); + let s = public.to_ss58check(); + println!("Correct: {}", s); + let cmp = Public::from_ss58check(&s).unwrap(); + assert_eq!(cmp, public); + } } diff --git a/core/primitives/src/hash.rs b/core/primitives/src/hash.rs index f3e3583be5..7f3b5cea6f 100644 --- a/core/primitives/src/hash.rs +++ b/core/primitives/src/hash.rs @@ -22,67 +22,121 @@ pub use primitive_types::{H160, H256, H512}; /// implemented by the same hash type. /// Panics if used to convert between different hash types. pub fn convert_hash, H2: AsRef<[u8]>>(src: &H2) -> H1 { - let mut dest = H1::default(); - assert_eq!(dest.as_mut().len(), src.as_ref().len()); - dest.as_mut().copy_from_slice(src.as_ref()); - dest + let mut dest = H1::default(); + assert_eq!(dest.as_mut().len(), src.as_ref().len()); + dest.as_mut().copy_from_slice(src.as_ref()); + dest } #[cfg(test)] mod tests { - use super::*; - use substrate_serializer as ser; + use super::*; + use substrate_serializer as ser; - #[test] - fn test_h160() { - let tests = vec![ - (Default::default(), "0x0000000000000000000000000000000000000000"), - (H160::from_low_u64_be(2), "0x0000000000000000000000000000000000000002"), - (H160::from_low_u64_be(15), "0x000000000000000000000000000000000000000f"), - (H160::from_low_u64_be(16), "0x0000000000000000000000000000000000000010"), - (H160::from_low_u64_be(1_000), "0x00000000000000000000000000000000000003e8"), - (H160::from_low_u64_be(100_000), "0x00000000000000000000000000000000000186a0"), - (H160::from_low_u64_be(u64::max_value()), "0x000000000000000000000000ffffffffffffffff"), - ]; + #[test] + fn test_h160() { + let tests = vec![ + ( + Default::default(), + "0x0000000000000000000000000000000000000000", + ), + ( + H160::from_low_u64_be(2), + "0x0000000000000000000000000000000000000002", + ), + ( + H160::from_low_u64_be(15), + "0x000000000000000000000000000000000000000f", + ), + ( + H160::from_low_u64_be(16), + "0x0000000000000000000000000000000000000010", + ), + ( + H160::from_low_u64_be(1_000), + "0x00000000000000000000000000000000000003e8", + ), + ( + H160::from_low_u64_be(100_000), + "0x00000000000000000000000000000000000186a0", + ), + ( + H160::from_low_u64_be(u64::max_value()), + "0x000000000000000000000000ffffffffffffffff", + ), + ]; - for (number, expected) in tests { - assert_eq!(format!("{:?}", expected), ser::to_string_pretty(&number)); - assert_eq!(number, ser::from_str(&format!("{:?}", expected)).unwrap()); - } - } + for (number, expected) in tests { + assert_eq!(format!("{:?}", expected), ser::to_string_pretty(&number)); + assert_eq!(number, ser::from_str(&format!("{:?}", expected)).unwrap()); + } + } - #[test] - fn test_h256() { - let tests = vec![ - (Default::default(), "0x0000000000000000000000000000000000000000000000000000000000000000"), - (H256::from_low_u64_be(2), "0x0000000000000000000000000000000000000000000000000000000000000002"), - (H256::from_low_u64_be(15), "0x000000000000000000000000000000000000000000000000000000000000000f"), - (H256::from_low_u64_be(16), "0x0000000000000000000000000000000000000000000000000000000000000010"), - (H256::from_low_u64_be(1_000), "0x00000000000000000000000000000000000000000000000000000000000003e8"), - (H256::from_low_u64_be(100_000), "0x00000000000000000000000000000000000000000000000000000000000186a0"), - (H256::from_low_u64_be(u64::max_value()), "0x000000000000000000000000000000000000000000000000ffffffffffffffff"), - ]; + #[test] + fn test_h256() { + let tests = vec![ + ( + Default::default(), + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ( + H256::from_low_u64_be(2), + "0x0000000000000000000000000000000000000000000000000000000000000002", + ), + ( + H256::from_low_u64_be(15), + "0x000000000000000000000000000000000000000000000000000000000000000f", + ), + ( + H256::from_low_u64_be(16), + "0x0000000000000000000000000000000000000000000000000000000000000010", + ), + ( + H256::from_low_u64_be(1_000), + "0x00000000000000000000000000000000000000000000000000000000000003e8", + ), + ( + H256::from_low_u64_be(100_000), + "0x00000000000000000000000000000000000000000000000000000000000186a0", + ), + ( + H256::from_low_u64_be(u64::max_value()), + "0x000000000000000000000000000000000000000000000000ffffffffffffffff", + ), + ]; - for (number, expected) in tests { - assert_eq!(format!("{:?}", expected), ser::to_string_pretty(&number)); - assert_eq!(number, ser::from_str(&format!("{:?}", expected)).unwrap()); - } - } + for (number, expected) in tests { + assert_eq!(format!("{:?}", expected), ser::to_string_pretty(&number)); + assert_eq!(number, ser::from_str(&format!("{:?}", expected)).unwrap()); + } + } - #[test] - fn test_invalid() { - assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000g\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"0x00000000000000000000000000000000000000000000000000000000000000000\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"0\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"10\"").unwrap_err().is_data()); - } + #[test] + fn test_invalid() { + assert!(ser::from_str::( + "\"0x000000000000000000000000000000000000000000000000000000000000000\"" + ) + .unwrap_err() + .is_data()); + assert!(ser::from_str::( + "\"0x000000000000000000000000000000000000000000000000000000000000000g\"" + ) + .unwrap_err() + .is_data()); + assert!(ser::from_str::( + "\"0x00000000000000000000000000000000000000000000000000000000000000000\"" + ) + .unwrap_err() + .is_data()); + assert!(ser::from_str::("\"\"").unwrap_err().is_data()); + assert!(ser::from_str::("\"0\"").unwrap_err().is_data()); + assert!(ser::from_str::("\"10\"").unwrap_err().is_data()); + } - #[test] - fn test_heapsizeof() { - use heapsize::HeapSizeOf; - let h = H256::zero(); - assert_eq!(h.heap_size_of_children(), 0); - } + #[test] + fn test_heapsizeof() { + use heapsize::HeapSizeOf; + let h = H256::zero(); + assert_eq!(h.heap_size_of_children(), 0); + } } diff --git a/core/primitives/src/hasher.rs b/core/primitives/src/hasher.rs index 4562180a1a..6dd030277c 100644 --- a/core/primitives/src/hasher.rs +++ b/core/primitives/src/hasher.rs @@ -16,38 +16,38 @@ //! Substrate Blake2b Hasher implementation -use hash_db::Hasher; -use hash256_std_hasher::Hash256StdHasher; use crate::hash::H256; +use hash256_std_hasher::Hash256StdHasher; +use hash_db::Hasher; pub mod blake2 { - use super::{Hasher, Hash256StdHasher, H256}; - #[cfg(feature = "std")] - use crate::hashing::blake2_256; - - #[cfg(not(feature = "std"))] - extern "C" { - fn ext_blake2_256(data: *const u8, len: u32, out: *mut u8); - } - #[cfg(not(feature = "std"))] - fn blake2_256(data: &[u8]) -> [u8; 32] { - let mut result: [u8; 32] = Default::default(); - unsafe { - ext_blake2_256(data.as_ptr(), data.len() as u32, result.as_mut_ptr()); - } - result - } - - /// Concrete implementation of Hasher using Blake2b 256-bit hashes - #[derive(Debug)] - pub struct Blake2Hasher; - - impl Hasher for Blake2Hasher { - type Out = H256; - type StdHasher = Hash256StdHasher; - const LENGTH: usize = 32; - fn hash(x: &[u8]) -> Self::Out { - blake2_256(x).into() - } - } + use super::{Hash256StdHasher, Hasher, H256}; + #[cfg(feature = "std")] + use crate::hashing::blake2_256; + + #[cfg(not(feature = "std"))] + extern "C" { + fn ext_blake2_256(data: *const u8, len: u32, out: *mut u8); + } + #[cfg(not(feature = "std"))] + fn blake2_256(data: &[u8]) -> [u8; 32] { + let mut result: [u8; 32] = Default::default(); + unsafe { + ext_blake2_256(data.as_ptr(), data.len() as u32, result.as_mut_ptr()); + } + result + } + + /// Concrete implementation of Hasher using Blake2b 256-bit hashes + #[derive(Debug)] + pub struct Blake2Hasher; + + impl Hasher for Blake2Hasher { + type Out = H256; + type StdHasher = Hash256StdHasher; + const LENGTH: usize = 32; + fn hash(x: &[u8]) -> Self::Out { + blake2_256(x).into() + } + } } diff --git a/core/primitives/src/hashing.rs b/core/primitives/src/hashing.rs index 814048fea8..782bccb528 100644 --- a/core/primitives/src/hashing.rs +++ b/core/primitives/src/hashing.rs @@ -21,86 +21,86 @@ use twox_hash; /// Do a Blake2 512-bit hash and place result in `dest`. pub fn blake2_512_into(data: &[u8], dest: &mut [u8; 64]) { - dest.copy_from_slice(blake2_rfc::blake2b::blake2b(64, &[], data).as_bytes()); + dest.copy_from_slice(blake2_rfc::blake2b::blake2b(64, &[], data).as_bytes()); } /// Do a Blake2 512-bit hash and return result. pub fn blake2_512(data: &[u8]) -> [u8; 64] { - let mut r = [0; 64]; - blake2_512_into(data, &mut r); - r + let mut r = [0; 64]; + blake2_512_into(data, &mut r); + r } /// Do a Blake2 256-bit hash and place result in `dest`. pub fn blake2_256_into(data: &[u8], dest: &mut [u8; 32]) { - dest.copy_from_slice(blake2_rfc::blake2b::blake2b(32, &[], data).as_bytes()); + dest.copy_from_slice(blake2_rfc::blake2b::blake2b(32, &[], data).as_bytes()); } /// Do a Blake2 256-bit hash and return result. pub fn blake2_256(data: &[u8]) -> [u8; 32] { - let mut r = [0; 32]; - blake2_256_into(data, &mut r); - r + let mut r = [0; 32]; + blake2_256_into(data, &mut r); + r } /// Do a Blake2 128-bit hash and place result in `dest`. pub fn blake2_128_into(data: &[u8], dest: &mut [u8; 16]) { - dest.copy_from_slice(blake2_rfc::blake2b::blake2b(16, &[], data).as_bytes()); + dest.copy_from_slice(blake2_rfc::blake2b::blake2b(16, &[], data).as_bytes()); } /// Do a Blake2 128-bit hash and return result. pub fn blake2_128(data: &[u8]) -> [u8; 16] { - let mut r = [0; 16]; - blake2_128_into(data, &mut r); - r + let mut r = [0; 16]; + blake2_128_into(data, &mut r); + r } /// Do a XX 128-bit hash and place result in `dest`. pub fn twox_128_into(data: &[u8], dest: &mut [u8; 16]) { - use ::core::hash::Hasher; - let mut h0 = twox_hash::XxHash::with_seed(0); - let mut h1 = twox_hash::XxHash::with_seed(1); - h0.write(data); - h1.write(data); - let r0 = h0.finish(); - let r1 = h1.finish(); - use byteorder::{ByteOrder, LittleEndian}; - LittleEndian::write_u64(&mut dest[0..8], r0); - LittleEndian::write_u64(&mut dest[8..16], r1); + use ::core::hash::Hasher; + let mut h0 = twox_hash::XxHash::with_seed(0); + let mut h1 = twox_hash::XxHash::with_seed(1); + h0.write(data); + h1.write(data); + let r0 = h0.finish(); + let r1 = h1.finish(); + use byteorder::{ByteOrder, LittleEndian}; + LittleEndian::write_u64(&mut dest[0..8], r0); + LittleEndian::write_u64(&mut dest[8..16], r1); } /// Do a XX 128-bit hash and return result. pub fn twox_128(data: &[u8]) -> [u8; 16] { - let mut r: [u8; 16] = [0; 16]; - twox_128_into(data, &mut r); - r + let mut r: [u8; 16] = [0; 16]; + twox_128_into(data, &mut r); + r } /// Do a XX 256-bit hash and place result in `dest`. pub fn twox_256_into(data: &[u8], dest: &mut [u8; 32]) { - use ::core::hash::Hasher; - use byteorder::{ByteOrder, LittleEndian}; - let mut h0 = twox_hash::XxHash::with_seed(0); - let mut h1 = twox_hash::XxHash::with_seed(1); - let mut h2 = twox_hash::XxHash::with_seed(2); - let mut h3 = twox_hash::XxHash::with_seed(3); - h0.write(data); - h1.write(data); - h2.write(data); - h3.write(data); - let r0 = h0.finish(); - let r1 = h1.finish(); - let r2 = h2.finish(); - let r3 = h3.finish(); - LittleEndian::write_u64(&mut dest[0..8], r0); - LittleEndian::write_u64(&mut dest[8..16], r1); - LittleEndian::write_u64(&mut dest[16..24], r2); - LittleEndian::write_u64(&mut dest[24..32], r3); + use ::core::hash::Hasher; + use byteorder::{ByteOrder, LittleEndian}; + let mut h0 = twox_hash::XxHash::with_seed(0); + let mut h1 = twox_hash::XxHash::with_seed(1); + let mut h2 = twox_hash::XxHash::with_seed(2); + let mut h3 = twox_hash::XxHash::with_seed(3); + h0.write(data); + h1.write(data); + h2.write(data); + h3.write(data); + let r0 = h0.finish(); + let r1 = h1.finish(); + let r2 = h2.finish(); + let r3 = h3.finish(); + LittleEndian::write_u64(&mut dest[0..8], r0); + LittleEndian::write_u64(&mut dest[8..16], r1); + LittleEndian::write_u64(&mut dest[16..24], r2); + LittleEndian::write_u64(&mut dest[24..32], r3); } /// Do a XX 256-bit hash and return result. pub fn twox_256(data: &[u8]) -> [u8; 32] { - let mut r: [u8; 32] = [0; 32]; - twox_256_into(data, &mut r); - r + let mut r: [u8; 32] = [0; 32]; + twox_256_into(data, &mut r); + r } diff --git a/core/primitives/src/hexdisplay.rs b/core/primitives/src/hexdisplay.rs index d748208d0e..b419c93514 100644 --- a/core/primitives/src/hexdisplay.rs +++ b/core/primitives/src/hexdisplay.rs @@ -20,45 +20,53 @@ pub struct HexDisplay<'a>(&'a [u8]); impl<'a> HexDisplay<'a> { - /// Create new instance that will display `d` as a hex string when displayed. - pub fn from(d: &'a AsBytesRef) -> Self { HexDisplay(d.as_bytes_ref()) } + /// Create new instance that will display `d` as a hex string when displayed. + pub fn from(d: &'a AsBytesRef) -> Self { + HexDisplay(d.as_bytes_ref()) + } } impl<'a> ::core::fmt::Display for HexDisplay<'a> { - fn fmt(&self, fmtr: &mut ::core::fmt::Formatter) -> Result<(), ::core::fmt::Error> { - if self.0.len() < 1027 { - for byte in self.0 { - fmtr.write_fmt(format_args!("{:02x}", byte))?; - } - } else { - for byte in &self.0[0..512] { - fmtr.write_fmt(format_args!("{:02x}", byte))?; - } - fmtr.write_str("...")?; - for byte in &self.0[self.0.len() - 512..] { - fmtr.write_fmt(format_args!("{:02x}", byte))?; - } - } - Ok(()) - } + fn fmt(&self, fmtr: &mut ::core::fmt::Formatter) -> Result<(), ::core::fmt::Error> { + if self.0.len() < 1027 { + for byte in self.0 { + fmtr.write_fmt(format_args!("{:02x}", byte))?; + } + } else { + for byte in &self.0[0..512] { + fmtr.write_fmt(format_args!("{:02x}", byte))?; + } + fmtr.write_str("...")?; + for byte in &self.0[self.0.len() - 512..] { + fmtr.write_fmt(format_args!("{:02x}", byte))?; + } + } + Ok(()) + } } /// Simple trait to transform various types to `&[u8]` pub trait AsBytesRef { - /// Transform `self` into `&[u8]`. - fn as_bytes_ref(&self) -> &[u8]; + /// Transform `self` into `&[u8]`. + fn as_bytes_ref(&self) -> &[u8]; } impl<'a> AsBytesRef for &'a [u8] { - fn as_bytes_ref(&self) -> &[u8] { self } + fn as_bytes_ref(&self) -> &[u8] { + self + } } impl AsBytesRef for [u8] { - fn as_bytes_ref(&self) -> &[u8] { &self } + fn as_bytes_ref(&self) -> &[u8] { + &self + } } impl AsBytesRef for Vec { - fn as_bytes_ref(&self) -> &[u8] { &self } + fn as_bytes_ref(&self) -> &[u8] { + &self + } } macro_rules! impl_non_endians { @@ -69,25 +77,27 @@ macro_rules! impl_non_endians { )* } } -impl_non_endians!([u8; 1], [u8; 2], [u8; 3], [u8; 4], [u8; 5], [u8; 6], [u8; 7], [u8; 8], - [u8; 10], [u8; 12], [u8; 14], [u8; 16], [u8; 20], [u8; 24], [u8; 28], [u8; 32], [u8; 40], - [u8; 48], [u8; 56], [u8; 64], [u8; 80], [u8; 96], [u8; 112], [u8; 128]); +impl_non_endians!( + [u8; 1], [u8; 2], [u8; 3], [u8; 4], [u8; 5], [u8; 6], [u8; 7], [u8; 8], [u8; 10], [u8; 12], + [u8; 14], [u8; 16], [u8; 20], [u8; 24], [u8; 28], [u8; 32], [u8; 40], [u8; 48], [u8; 56], + [u8; 64], [u8; 80], [u8; 96], [u8; 112], [u8; 128] +); /// Format into ASCII + # + hex, suitable for storage key preimages. pub fn ascii_format(asciish: &[u8]) -> String { - let mut r = String::new(); - let mut latch = false; - for c in asciish { - match (latch, *c) { - (false, 32...127) => r.push(*c as char), - _ => { - if !latch { - r.push('#'); - latch = true; - } - r.push_str(&format!("{:02x}", *c)); - } - } - } - r + let mut r = String::new(); + let mut latch = false; + for c in asciish { + match (latch, *c) { + (false, 32...127) => r.push(*c as char), + _ => { + if !latch { + r.push('#'); + latch = true; + } + r.push_str(&format!("{:02x}", *c)); + } + } + } + r } diff --git a/core/primitives/src/lib.rs b/core/primitives/src/lib.rs index f078b5446f..f0c4b55d16 100644 --- a/core/primitives/src/lib.rs +++ b/core/primitives/src/lib.rs @@ -17,7 +17,6 @@ //! Shareable Substrate types. #![warn(missing_docs)] - #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), feature(alloc))] @@ -32,13 +31,13 @@ macro_rules! map { ) } -use rstd::prelude::*; +use parity_codec::{Decode, Encode}; use rstd::ops::Deref; -use parity_codec::{Encode, Decode}; +use rstd::prelude::*; #[cfg(feature = "std")] -use std::borrow::Cow; +use serde_derive::{Deserialize, Serialize}; #[cfg(feature = "std")] -use serde_derive::{Serialize, Deserialize}; +use std::borrow::Cow; #[cfg(feature = "std")] pub use impl_serde::serialize as bytes; @@ -47,25 +46,25 @@ pub use impl_serde::serialize as bytes; pub mod hashing; #[cfg(feature = "std")] pub use hashing::{blake2_256, twox_128, twox_256}; +pub mod crypto; #[cfg(feature = "std")] pub mod hexdisplay; -pub mod crypto; pub mod u32_trait; +mod changes_trie; pub mod ed25519; -pub mod sr25519; pub mod hash; mod hasher; pub mod sandbox; +pub mod sr25519; pub mod storage; pub mod uint; -mod changes_trie; #[cfg(test)] mod tests; -pub use self::hash::{H160, H256, H512, convert_hash}; +pub use self::hash::{convert_hash, H160, H256, H512}; pub use self::uint::U256; pub use changes_trie::ChangesTrieConfiguration; #[cfg(feature = "std")] @@ -79,48 +78,57 @@ pub use self::hasher::blake2::Blake2Hasher; /// Context for executing a call into the runtime. #[repr(u8)] pub enum ExecutionContext { - /// Context for general importing (including own blocks). - Importing, - /// Context used when syncing the blockchain. - Syncing, - /// Context used for block construction. - BlockConstruction, - /// Offchain worker context. - OffchainWorker(Box), - /// Context used for other calls. - Other, + /// Context for general importing (including own blocks). + Importing, + /// Context used when syncing the blockchain. + Syncing, + /// Context used for block construction. + BlockConstruction, + /// Offchain worker context. + OffchainWorker(Box), + /// Context used for other calls. + Other, } /// An extended externalities for offchain workers. pub trait OffchainExt { - /// Submits an extrinsics. - /// - /// The extrinsic will either go to the pool (signed) - /// or to the next produced block (inherent). - fn submit_extrinsic(&mut self, extrinsic: Vec); + /// Submits an extrinsics. + /// + /// The extrinsic will either go to the pool (signed) + /// or to the next produced block (inherent). + fn submit_extrinsic(&mut self, extrinsic: Vec); } impl OffchainExt for Box { - fn submit_extrinsic(&mut self, ex: Vec) { - (&mut **self).submit_extrinsic(ex) - } + fn submit_extrinsic(&mut self, ex: Vec) { + (&mut **self).submit_extrinsic(ex) + } } /// Hex-serialized shim for `Vec`. #[derive(PartialEq, Eq, Clone)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug, Hash, PartialOrd, Ord))] -pub struct Bytes(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); +#[cfg_attr( + feature = "std", + derive(Serialize, Deserialize, Debug, Hash, PartialOrd, Ord) +)] +pub struct Bytes(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec); impl From> for Bytes { - fn from(s: Vec) -> Self { Bytes(s) } + fn from(s: Vec) -> Self { + Bytes(s) + } } impl From for Bytes { - fn from(s: OpaqueMetadata) -> Self { Bytes(s.0) } + fn from(s: OpaqueMetadata) -> Self { + Bytes(s.0) + } } impl Deref for Bytes { - type Target = [u8]; - fn deref(&self) -> &[u8] { &self.0[..] } + type Target = [u8]; + fn deref(&self) -> &[u8] { + &self.0[..] + } } /// Stores the encoded `RuntimeMetadata` for the native side as opaque type. @@ -128,66 +136,67 @@ impl Deref for Bytes { pub struct OpaqueMetadata(Vec); impl OpaqueMetadata { - /// Creates a new instance with the given metadata blob. - pub fn new(metadata: Vec) -> Self { - OpaqueMetadata(metadata) - } + /// Creates a new instance with the given metadata blob. + pub fn new(metadata: Vec) -> Self { + OpaqueMetadata(metadata) + } } impl rstd::ops::Deref for OpaqueMetadata { - type Target = Vec; + type Target = Vec; - fn deref(&self) -> &Self::Target { - &self.0 - } + fn deref(&self) -> &Self::Target { + &self.0 + } } /// Something that is either a native or an encoded value. #[cfg(feature = "std")] pub enum NativeOrEncoded { - /// The native representation. - Native(R), - /// The encoded representation. - Encoded(Vec) + /// The native representation. + Native(R), + /// The encoded representation. + Encoded(Vec), } #[cfg(feature = "std")] impl ::std::fmt::Debug for NativeOrEncoded { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - self.as_encoded().as_ref().fmt(f) - } + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + self.as_encoded().as_ref().fmt(f) + } } #[cfg(feature = "std")] impl NativeOrEncoded { - /// Return the value as the encoded format. - pub fn as_encoded<'a>(&'a self) -> Cow<'a, [u8]> { - match self { - NativeOrEncoded::Encoded(e) => Cow::Borrowed(e.as_slice()), - NativeOrEncoded::Native(n) => Cow::Owned(n.encode()), - } - } - - /// Return the value as the encoded format. - pub fn into_encoded(self) -> Vec { - match self { - NativeOrEncoded::Encoded(e) => e, - NativeOrEncoded::Native(n) => n.encode(), - } - } + /// Return the value as the encoded format. + pub fn as_encoded<'a>(&'a self) -> Cow<'a, [u8]> { + match self { + NativeOrEncoded::Encoded(e) => Cow::Borrowed(e.as_slice()), + NativeOrEncoded::Native(n) => Cow::Owned(n.encode()), + } + } + + /// Return the value as the encoded format. + pub fn into_encoded(self) -> Vec { + match self { + NativeOrEncoded::Encoded(e) => e, + NativeOrEncoded::Native(n) => n.encode(), + } + } } #[cfg(feature = "std")] impl PartialEq for NativeOrEncoded { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (NativeOrEncoded::Native(l), NativeOrEncoded::Native(r)) => l == r, - (NativeOrEncoded::Native(n), NativeOrEncoded::Encoded(e)) | - (NativeOrEncoded::Encoded(e), NativeOrEncoded::Native(n)) => - Some(n) == parity_codec::Decode::decode(&mut &e[..]).as_ref(), - (NativeOrEncoded::Encoded(l), NativeOrEncoded::Encoded(r)) => l == r, - } - } + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (NativeOrEncoded::Native(l), NativeOrEncoded::Native(r)) => l == r, + (NativeOrEncoded::Native(n), NativeOrEncoded::Encoded(e)) + | (NativeOrEncoded::Encoded(e), NativeOrEncoded::Native(n)) => { + Some(n) == parity_codec::Decode::decode(&mut &e[..]).as_ref() + } + (NativeOrEncoded::Encoded(l), NativeOrEncoded::Encoded(r)) => l == r, + } + } } /// A value that is never in a native representation. @@ -198,15 +207,15 @@ pub enum NeverNativeValue {} #[cfg(feature = "std")] impl parity_codec::Encode for NeverNativeValue { - fn encode(&self) -> Vec { - // The enum is not constructable, so this function should never be callable! - unreachable!() - } + fn encode(&self) -> Vec { + // The enum is not constructable, so this function should never be callable! + unreachable!() + } } #[cfg(feature = "std")] impl parity_codec::Decode for NeverNativeValue { - fn decode(_: &mut I) -> Option { - None - } + fn decode(_: &mut I) -> Option { + None + } } diff --git a/core/primitives/src/sandbox.rs b/core/primitives/src/sandbox.rs index 773a6b4893..1b6e0c7933 100644 --- a/core/primitives/src/sandbox.rs +++ b/core/primitives/src/sandbox.rs @@ -16,7 +16,7 @@ //! Definition of a sandbox environment. -use parity_codec::{Encode, Decode}; +use parity_codec::{Decode, Encode}; use rstd::vec::Vec; /// Error error that can be returned from host function. @@ -28,58 +28,58 @@ pub struct HostError; #[derive(Clone, Copy, PartialEq, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug))] pub enum TypedValue { - /// Value of 32-bit signed or unsigned integer. - #[codec(index = "1")] - I32(i32), + /// Value of 32-bit signed or unsigned integer. + #[codec(index = "1")] + I32(i32), - /// Value of 64-bit signed or unsigned integer. - #[codec(index = "2")] - I64(i64), + /// Value of 64-bit signed or unsigned integer. + #[codec(index = "2")] + I64(i64), - /// Value of 32-bit IEEE 754-2008 floating point number represented as a bit pattern. - #[codec(index = "3")] - F32(i32), + /// Value of 32-bit IEEE 754-2008 floating point number represented as a bit pattern. + #[codec(index = "3")] + F32(i32), - /// Value of 64-bit IEEE 754-2008 floating point number represented as a bit pattern. - #[codec(index = "4")] - F64(i64), + /// Value of 64-bit IEEE 754-2008 floating point number represented as a bit pattern. + #[codec(index = "4")] + F64(i64), } impl TypedValue { - /// Returns `Some` if this value of type `I32`. - pub fn as_i32(&self) -> Option { - match *self { - TypedValue::I32(v) => Some(v), - _ => None, - } - } + /// Returns `Some` if this value of type `I32`. + pub fn as_i32(&self) -> Option { + match *self { + TypedValue::I32(v) => Some(v), + _ => None, + } + } } #[cfg(feature = "std")] impl From<::wasmi::RuntimeValue> for TypedValue { - fn from(val: ::wasmi::RuntimeValue) -> TypedValue { - use ::wasmi::RuntimeValue; - match val { - RuntimeValue::I32(v) => TypedValue::I32(v), - RuntimeValue::I64(v) => TypedValue::I64(v), - RuntimeValue::F32(v) => TypedValue::F32(v.to_bits() as i32), - RuntimeValue::F64(v) => TypedValue::F64(v.to_bits() as i64), - } - } + fn from(val: ::wasmi::RuntimeValue) -> TypedValue { + use ::wasmi::RuntimeValue; + match val { + RuntimeValue::I32(v) => TypedValue::I32(v), + RuntimeValue::I64(v) => TypedValue::I64(v), + RuntimeValue::F32(v) => TypedValue::F32(v.to_bits() as i32), + RuntimeValue::F64(v) => TypedValue::F64(v.to_bits() as i64), + } + } } #[cfg(feature = "std")] impl From for ::wasmi::RuntimeValue { - fn from(val: TypedValue) -> ::wasmi::RuntimeValue { - use ::wasmi::RuntimeValue; - use ::wasmi::nan_preserving_float::{F32, F64}; - match val { - TypedValue::I32(v) => RuntimeValue::I32(v), - TypedValue::I64(v) => RuntimeValue::I64(v), - TypedValue::F32(v_bits) => RuntimeValue::F32(F32::from_bits(v_bits as u32)), - TypedValue::F64(v_bits) => RuntimeValue::F64(F64::from_bits(v_bits as u64)), - } - } + fn from(val: TypedValue) -> ::wasmi::RuntimeValue { + use ::wasmi::nan_preserving_float::{F32, F64}; + use ::wasmi::RuntimeValue; + match val { + TypedValue::I32(v) => RuntimeValue::I32(v), + TypedValue::I64(v) => RuntimeValue::I64(v), + TypedValue::F32(v_bits) => RuntimeValue::F32(F32::from_bits(v_bits as u32)), + TypedValue::F64(v_bits) => RuntimeValue::F64(F64::from_bits(v_bits as u64)), + } + } } /// Typed value that can be returned from a function. @@ -88,48 +88,48 @@ impl From for ::wasmi::RuntimeValue { #[derive(Clone, Copy, PartialEq, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug))] pub enum ReturnValue { - /// For returning nothing. - Unit, - /// For returning some concrete value. - Value(TypedValue), + /// For returning nothing. + Unit, + /// For returning some concrete value. + Value(TypedValue), } impl From for ReturnValue { - fn from(v: TypedValue) -> ReturnValue { - ReturnValue::Value(v) - } + fn from(v: TypedValue) -> ReturnValue { + ReturnValue::Value(v) + } } impl ReturnValue { - /// Maximum number of bytes `ReturnValue` might occupy when serialized with - /// `Codec`. - /// - /// Breakdown: - /// 1 byte for encoding unit/value variant - /// 1 byte for encoding value type - /// 8 bytes for encoding the biggest value types available in wasm: f64, i64. - pub const ENCODED_MAX_SIZE: usize = 10; + /// Maximum number of bytes `ReturnValue` might occupy when serialized with + /// `Codec`. + /// + /// Breakdown: + /// 1 byte for encoding unit/value variant + /// 1 byte for encoding value type + /// 8 bytes for encoding the biggest value types available in wasm: f64, i64. + pub const ENCODED_MAX_SIZE: usize = 10; } #[test] fn return_value_encoded_max_size() { - let encoded = ReturnValue::Value(TypedValue::I64(-1)).encode(); - assert_eq!(encoded.len(), ReturnValue::ENCODED_MAX_SIZE); + let encoded = ReturnValue::Value(TypedValue::I64(-1)).encode(); + assert_eq!(encoded.len(), ReturnValue::ENCODED_MAX_SIZE); } /// Describes an entity to define or import into the environment. #[derive(Clone, PartialEq, Eq, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug))] pub enum ExternEntity { - /// Function that is specified by an index in a default table of - /// a module that creates the sandbox. - #[codec(index = "1")] - Function(u32), - - /// Linear memory that is specified by some identifier returned by sandbox - /// module upon creation new sandboxed memory. - #[codec(index = "2")] - Memory(u32), + /// Function that is specified by an index in a default table of + /// a module that creates the sandbox. + #[codec(index = "1")] + Function(u32), + + /// Linear memory that is specified by some identifier returned by sandbox + /// module upon creation new sandboxed memory. + #[codec(index = "2")] + Memory(u32), } /// An entry in a environment definition table. @@ -139,20 +139,20 @@ pub enum ExternEntity { #[derive(Clone, PartialEq, Eq, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug))] pub struct Entry { - /// Module name of which corresponding entity being defined. - pub module_name: Vec, - /// Field name in which corresponding entity being defined. - pub field_name: Vec, - /// External entity being defined. - pub entity: ExternEntity, + /// Module name of which corresponding entity being defined. + pub module_name: Vec, + /// Field name in which corresponding entity being defined. + pub field_name: Vec, + /// External entity being defined. + pub entity: ExternEntity, } /// Definition of runtime that could be used by sandboxed code. #[derive(Clone, PartialEq, Eq, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug))] pub struct EnvironmentDefinition { - /// Vector of all entries in the environment definition. - pub entries: Vec, + /// Vector of all entries in the environment definition. + pub entries: Vec, } /// Constant for specifying no limit when creating a sandboxed @@ -182,39 +182,33 @@ pub const ERR_EXECUTION: u32 = -3i32 as u32; #[cfg(test)] mod tests { - use super::*; - use std::fmt; - use parity_codec::Codec; - - fn roundtrip(s: S) { - let encoded = s.encode(); - assert_eq!(S::decode(&mut &encoded[..]).unwrap(), s); - } - - #[test] - fn env_def_roundtrip() { - roundtrip(EnvironmentDefinition { - entries: vec![], - }); - - roundtrip(EnvironmentDefinition { - entries: vec![ - Entry { - module_name: b"kernel"[..].into(), - field_name: b"memory"[..].into(), - entity: ExternEntity::Memory(1337), - }, - ], - }); - - roundtrip(EnvironmentDefinition { - entries: vec![ - Entry { - module_name: b"env"[..].into(), - field_name: b"abort"[..].into(), - entity: ExternEntity::Function(228), - }, - ], - }); - } + use super::*; + use parity_codec::Codec; + use std::fmt; + + fn roundtrip(s: S) { + let encoded = s.encode(); + assert_eq!(S::decode(&mut &encoded[..]).unwrap(), s); + } + + #[test] + fn env_def_roundtrip() { + roundtrip(EnvironmentDefinition { entries: vec![] }); + + roundtrip(EnvironmentDefinition { + entries: vec![Entry { + module_name: b"kernel"[..].into(), + field_name: b"memory"[..].into(), + entity: ExternEntity::Memory(1337), + }], + }); + + roundtrip(EnvironmentDefinition { + entries: vec![Entry { + module_name: b"env"[..].into(), + field_name: b"abort"[..].into(), + entity: ExternEntity::Function(228), + }], + }); + } } diff --git a/core/primitives/src/sr25519.rs b/core/primitives/src/sr25519.rs index aa17447a24..1903bd9cd1 100644 --- a/core/primitives/src/sr25519.rs +++ b/core/primitives/src/sr25519.rs @@ -21,25 +21,31 @@ //! for this to work. // end::description[] +#[cfg(feature = "std")] +use crate::crypto::{ + Derive, DeriveJunction, Infallible, Pair as TraitPair, SecretStringError, Ss58Codec, +}; +use crate::{ + crypto::UncheckedFrom, + hash::{H256, H512}, +}; +#[cfg(feature = "std")] +use bip39::{Language, Mnemonic, MnemonicType}; +use parity_codec::{Decode, Encode}; #[cfg(feature = "std")] use rand::rngs::OsRng; #[cfg(feature = "std")] -use schnorrkel::{signing_context, Keypair, SecretKey, MiniSecretKey, PublicKey, - derive::{Derivation, ChainCode, CHAIN_CODE_LENGTH} +use schnorrkel::{ + derive::{ChainCode, Derivation, CHAIN_CODE_LENGTH}, + signing_context, Keypair, MiniSecretKey, PublicKey, SecretKey, }; #[cfg(feature = "std")] use substrate_bip39::mini_secret_from_entropy; -#[cfg(feature = "std")] -use bip39::{Mnemonic, Language, MnemonicType}; -#[cfg(feature = "std")] -use crate::crypto::{Pair as TraitPair, DeriveJunction, Infallible, SecretStringError, Derive, Ss58Codec}; -use crate::{hash::{H256, H512}, crypto::UncheckedFrom}; -use parity_codec::{Encode, Decode}; -#[cfg(feature = "std")] -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "std")] use schnorrkel::keys::MINI_SECRET_KEY_LENGTH; +#[cfg(feature = "std")] +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; // signing context #[cfg(feature = "std")] @@ -54,88 +60,99 @@ pub struct Public(pub [u8; 32]); pub struct Pair(Keypair); impl AsRef for Public { - fn as_ref(&self) -> &Public { - &self - } + fn as_ref(&self) -> &Public { + &self + } } impl AsRef<[u8; 32]> for Public { - fn as_ref(&self) -> &[u8; 32] { - &self.0 - } + fn as_ref(&self) -> &[u8; 32] { + &self.0 + } } impl AsRef<[u8]> for Public { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } + fn as_ref(&self) -> &[u8] { + &self.0[..] + } } impl AsMut<[u8]> for Public { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } + fn as_mut(&mut self) -> &mut [u8] { + &mut self.0[..] + } } impl From for [u8; 32] { - fn from(x: Public) -> [u8; 32] { - x.0 - } + fn from(x: Public) -> [u8; 32] { + x.0 + } } impl From for H256 { - fn from(x: Public) -> H256 { - x.0.into() - } + fn from(x: Public) -> H256 { + x.0.into() + } } impl UncheckedFrom<[u8; 32]> for Public { - fn unchecked_from(x: [u8; 32]) -> Self { - Public::from_raw(x) - } + fn unchecked_from(x: [u8; 32]) -> Self { + Public::from_raw(x) + } } impl UncheckedFrom for Public { - fn unchecked_from(x: H256) -> Self { - Public::from_h256(x) - } + fn unchecked_from(x: H256) -> Self { + Public::from_h256(x) + } } #[cfg(feature = "std")] impl ::std::fmt::Display for Public { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - write!(f, "{}", self.to_ss58check()) - } + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + write!(f, "{}", self.to_ss58check()) + } } #[cfg(feature = "std")] impl ::std::fmt::Debug for Public { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.0), &s[0..8]) - } + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + let s = self.to_ss58check(); + write!( + f, + "{} ({}...)", + crate::hexdisplay::HexDisplay::from(&self.0), + &s[0..8] + ) + } } #[cfg(feature = "std")] impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result where S: Serializer { - serializer.serialize_str(&self.to_ss58check()) - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&self.to_ss58check()) + } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Public::from_ss58check(&String::deserialize(deserializer)?) + .map_err(|e| de::Error::custom(format!("{:?}", e))) + } } #[cfg(feature = "std")] impl ::std::hash::Hash for Public { - fn hash(&self, state: &mut H) { - self.0.hash(state); - } + fn hash(&self, state: &mut H) { + self.0.hash(state); + } } /// An Schnorrkel/Ristretto x25519 ("sr25519") signature. @@ -145,76 +162,76 @@ impl ::std::hash::Hash for Public { pub struct Signature(pub [u8; 64]); impl Clone for Signature { - fn clone(&self) -> Self { - let mut r = [0u8; 64]; - r.copy_from_slice(&self.0[..]); - Signature(r) - } + fn clone(&self) -> Self { + let mut r = [0u8; 64]; + r.copy_from_slice(&self.0[..]); + Signature(r) + } } impl Default for Signature { - fn default() -> Self { - Signature([0u8; 64]) - } + fn default() -> Self { + Signature([0u8; 64]) + } } impl PartialEq for Signature { - fn eq(&self, b: &Self) -> bool { - &self.0[..] == &b.0[..] - } + fn eq(&self, b: &Self) -> bool { + &self.0[..] == &b.0[..] + } } impl Eq for Signature {} impl From for [u8; 64] { - fn from(v: Signature) -> [u8; 64] { - v.0 - } + fn from(v: Signature) -> [u8; 64] { + v.0 + } } impl From for H512 { - fn from(v: Signature) -> H512 { - H512::from(v.0) - } + fn from(v: Signature) -> H512 { + H512::from(v.0) + } } impl AsRef<[u8; 64]> for Signature { - fn as_ref(&self) -> &[u8; 64] { - &self.0 - } + fn as_ref(&self) -> &[u8; 64] { + &self.0 + } } impl AsRef<[u8]> for Signature { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } + fn as_ref(&self) -> &[u8] { + &self.0[..] + } } impl AsMut<[u8]> for Signature { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } + fn as_mut(&mut self) -> &mut [u8] { + &mut self.0[..] + } } #[cfg(feature = "std")] impl From for Signature { - fn from(s: schnorrkel::Signature) -> Signature { - Signature(s.to_bytes()) - } + fn from(s: schnorrkel::Signature) -> Signature { + Signature(s.to_bytes()) + } } #[cfg(feature = "std")] impl ::std::fmt::Debug for Signature { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) + } } #[cfg(feature = "std")] impl ::std::hash::Hash for Signature { - fn hash(&self, state: &mut H) { - ::std::hash::Hash::hash(&self.0[..], state); - } + fn hash(&self, state: &mut H) { + ::std::hash::Hash::hash(&self.0[..], state); + } } /// A localized signature also contains sender information. @@ -222,149 +239,152 @@ impl ::std::hash::Hash for Signature { #[cfg(feature = "std")] #[derive(PartialEq, Eq, Clone, Debug)] pub struct LocalizedSignature { - /// The signer of the signature. - pub signer: Public, - /// The signature itself. - pub signature: Signature, + /// The signer of the signature. + pub signer: Public, + /// The signature itself. + pub signature: Signature, } impl Signature { - /// A new instance from the given 64-byte `data`. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use it if - /// you are certain that the array actually is a signature. GIGO! - pub fn from_raw(data: [u8; 64]) -> Signature { - Signature(data) - } - - /// A new instance from the given slice that should be 64 bytes long. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use it if - /// you are certain that the array actually is a signature. GIGO! - pub fn from_slice(data: &[u8]) -> Self { - let mut r = [0u8; 64]; - r.copy_from_slice(data); - Signature(r) - } - - /// A new instance from an H512. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use it if - /// you are certain that the array actually is a signature. GIGO! - pub fn from_h512(v: H512) -> Signature { - Signature(v.into()) - } + /// A new instance from the given 64-byte `data`. + /// + /// NOTE: No checking goes on to ensure this is a real signature. Only use it if + /// you are certain that the array actually is a signature. GIGO! + pub fn from_raw(data: [u8; 64]) -> Signature { + Signature(data) + } + + /// A new instance from the given slice that should be 64 bytes long. + /// + /// NOTE: No checking goes on to ensure this is a real signature. Only use it if + /// you are certain that the array actually is a signature. GIGO! + pub fn from_slice(data: &[u8]) -> Self { + let mut r = [0u8; 64]; + r.copy_from_slice(data); + Signature(r) + } + + /// A new instance from an H512. + /// + /// NOTE: No checking goes on to ensure this is a real signature. Only use it if + /// you are certain that the array actually is a signature. GIGO! + pub fn from_h512(v: H512) -> Signature { + Signature(v.into()) + } } #[cfg(feature = "std")] impl Derive for Public { - /// Derive a child key from a series of given junctions. - /// - /// `None` if there are any hard junctions in there. - fn derive>(&self, path: Iter) -> Option { - let mut acc = PublicKey::from_bytes(self.as_ref()).ok()?; - for j in path { - match j { - DeriveJunction::Soft(cc) => acc = acc.derived_key_simple(ChainCode(cc), &[]).0, - DeriveJunction::Hard(_cc) => return None, - } - } - Some(Self(acc.to_bytes())) - } + /// Derive a child key from a series of given junctions. + /// + /// `None` if there are any hard junctions in there. + fn derive>(&self, path: Iter) -> Option { + let mut acc = PublicKey::from_bytes(self.as_ref()).ok()?; + for j in path { + match j { + DeriveJunction::Soft(cc) => acc = acc.derived_key_simple(ChainCode(cc), &[]).0, + DeriveJunction::Hard(_cc) => return None, + } + } + Some(Self(acc.to_bytes())) + } } impl Public { - /// A new instance from the given 32-byte `data`. - /// - /// NOTE: No checking goes on to ensure this is a real public key. Only use it if - /// you are certain that the array actually is a pubkey. GIGO! - pub fn from_raw(data: [u8; 32]) -> Self { - Public(data) - } - - /// A new instance from the given slice that should be 32 bytes long. - /// - /// NOTE: No checking goes on to ensure this is a real public key. Only use it if - /// you are certain that the array actually is a pubkey. GIGO! - pub fn from_slice(data: &[u8]) -> Self { - let mut r = [0u8; 32]; - r.copy_from_slice(data); - Public(r) - } - - /// A new instance from an H256. - /// - /// NOTE: No checking goes on to ensure this is a real public key. Only use it if - /// you are certain that the array actually is a pubkey. GIGO! - pub fn from_h256(x: H256) -> Self { - Public(x.into()) - } - - /// Return a `Vec` filled with raw data. - #[cfg(feature = "std")] - pub fn to_raw_vec(self) -> Vec { - let r: &[u8; 32] = self.as_ref(); - r.to_vec() - } - - /// Return a slice filled with raw data. - pub fn as_slice(&self) -> &[u8] { - let r: &[u8; 32] = self.as_ref(); - &r[..] - } - - /// Return a slice filled with raw data. - pub fn as_array_ref(&self) -> &[u8; 32] { - self.as_ref() - } + /// A new instance from the given 32-byte `data`. + /// + /// NOTE: No checking goes on to ensure this is a real public key. Only use it if + /// you are certain that the array actually is a pubkey. GIGO! + pub fn from_raw(data: [u8; 32]) -> Self { + Public(data) + } + + /// A new instance from the given slice that should be 32 bytes long. + /// + /// NOTE: No checking goes on to ensure this is a real public key. Only use it if + /// you are certain that the array actually is a pubkey. GIGO! + pub fn from_slice(data: &[u8]) -> Self { + let mut r = [0u8; 32]; + r.copy_from_slice(data); + Public(r) + } + + /// A new instance from an H256. + /// + /// NOTE: No checking goes on to ensure this is a real public key. Only use it if + /// you are certain that the array actually is a pubkey. GIGO! + pub fn from_h256(x: H256) -> Self { + Public(x.into()) + } + + /// Return a `Vec` filled with raw data. + #[cfg(feature = "std")] + pub fn to_raw_vec(self) -> Vec { + let r: &[u8; 32] = self.as_ref(); + r.to_vec() + } + + /// Return a slice filled with raw data. + pub fn as_slice(&self) -> &[u8] { + let r: &[u8; 32] = self.as_ref(); + &r[..] + } + + /// Return a slice filled with raw data. + pub fn as_array_ref(&self) -> &[u8; 32] { + self.as_ref() + } } #[cfg(feature = "std")] impl AsRef for Pair { - fn as_ref(&self) -> &Pair { - &self - } + fn as_ref(&self) -> &Pair { + &self + } } #[cfg(feature = "std")] impl From for Pair { - fn from(sec: MiniSecretKey) -> Pair { - Pair(sec.expand_to_keypair()) - } + fn from(sec: MiniSecretKey) -> Pair { + Pair(sec.expand_to_keypair()) + } } #[cfg(feature = "std")] impl From for Pair { - fn from(sec: SecretKey) -> Pair { - Pair(Keypair::from(sec)) - } + fn from(sec: SecretKey) -> Pair { + Pair(Keypair::from(sec)) + } } #[cfg(feature = "std")] impl From for Pair { - fn from(p: schnorrkel::Keypair) -> Pair { - Pair(p) - } + fn from(p: schnorrkel::Keypair) -> Pair { + Pair(p) + } } #[cfg(feature = "std")] impl From for schnorrkel::Keypair { - fn from(p: Pair) -> schnorrkel::Keypair { - p.0 - } + fn from(p: Pair) -> schnorrkel::Keypair { + p.0 + } } #[cfg(feature = "std")] impl AsRef for Pair { - fn as_ref(&self) -> &schnorrkel::Keypair { - &self.0 - } + fn as_ref(&self) -> &schnorrkel::Keypair { + &self.0 + } } /// Derive a single hard junction. #[cfg(feature = "std")] fn derive_hard_junction(secret: &SecretKey, cc: &[u8; CHAIN_CODE_LENGTH]) -> SecretKey { - secret.hard_derive_mini_secret_key(Some(ChainCode(cc.clone())), b"").0.expand() + secret + .hard_derive_mini_secret_key(Some(ChainCode(cc.clone())), b"") + .0 + .expand() } #[cfg(feature = "std")] @@ -372,274 +392,310 @@ type Seed = [u8; MINI_SECRET_KEY_LENGTH]; #[cfg(feature = "std")] impl TraitPair for Pair { - type Public = Public; - type Seed = Seed; - type Signature = Signature; - type DeriveError = Infallible; - - /// Generate new secure (random) key pair. - fn generate() -> Pair { - let mut csprng: OsRng = OsRng::new().expect("os random generator works; qed"); - let key_pair: Keypair = Keypair::generate(&mut csprng); - Pair(key_pair) - } - - /// Make a new key pair from raw secret seed material. - /// - /// This is generated using schnorrkel's Mini-Secret-Keys. - /// - /// A MiniSecretKey is literally what Ed25519 calls a SecretKey, which is just 32 random bytes. - fn from_seed(seed: Seed) -> Pair { - let mini_key: MiniSecretKey = MiniSecretKey::from_bytes(&seed[..]) - .expect("32 bytes can always build a key; qed"); - let kp = mini_key.expand_to_keypair(); - Pair(kp) - } - - /// Get the public key. - fn public(&self) -> Public { - let mut pk = [0u8; 32]; - pk.copy_from_slice(&self.0.public.to_bytes()); - Public(pk) - } - - /// Make a new key pair from secret seed material. The slice must be 32 bytes long or it - /// will return `None`. - /// - /// You should never need to use this; generate(), generate_with_phrase(), from_phrase() - fn from_seed_slice(seed: &[u8]) -> Result { - if seed.len() != MINI_SECRET_KEY_LENGTH { - Err(SecretStringError::InvalidSeedLength) - } else { - Ok(Pair( - MiniSecretKey::from_bytes(seed) - .map_err(|_| SecretStringError::InvalidSeed)? - .expand_to_keypair() - )) - } - } - - /// Generate a key from the phrase, password and derivation path. - fn from_standard_components>(phrase: &str, password: Option<&str>, path: I) -> Result { - Self::from_phrase(phrase, password)? - .derive(path) - .map_err(|_| SecretStringError::InvalidPath) - } - - fn generate_with_phrase(password: Option<&str>) -> (Pair, String) { - let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); - let phrase = mnemonic.phrase(); - ( - Self::from_phrase(phrase, password).expect("All phrases generated by Mnemonic are valid; qed"), - phrase.to_owned(), - ) - } - - fn from_phrase(phrase: &str, password: Option<&str>) -> Result { - Mnemonic::from_phrase(phrase, Language::English) - .map_err(|_| SecretStringError::InvalidPhrase) - .map(|m| Self::from_entropy(m.entropy(), password)) - } - - fn derive>(&self, path: Iter) -> Result { - let init = self.0.secret.clone(); - let result = path.fold(init, |acc, j| match j { - DeriveJunction::Soft(cc) => acc.derived_key_simple(ChainCode(cc), &[]).0, - DeriveJunction::Hard(cc) => derive_hard_junction(&acc, &cc), - }); - Ok(Self(result.into())) - } - - fn sign(&self, message: &[u8]) -> Signature { - let context = signing_context(SIGNING_CTX); - self.0.sign(context.bytes(message)).into() - } - - /// Verify a signature on a message. Returns true if the signature is good. - fn verify, M: AsRef<[u8]>>(sig: &Self::Signature, message: M, pubkey: P) -> bool { - let signature: schnorrkel::Signature = match schnorrkel::Signature::from_bytes(&sig.as_ref()) { - Ok(some_signature) => some_signature, - Err(_) => return false - }; - match PublicKey::from_bytes(pubkey.as_ref().as_slice()) { - Ok(pk) => pk.verify( - signing_context(SIGNING_CTX).bytes(message.as_ref()), &signature - ), - Err(_) => false, - } - } - - /// Verify a signature on a message. Returns true if the signature is good. - fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool { - let signature: schnorrkel::Signature = match schnorrkel::Signature::from_bytes(sig) { - Ok(some_signature) => some_signature, - Err(_) => return false - }; - match PublicKey::from_bytes(pubkey.as_ref()) { - Ok(pk) => pk.verify( - signing_context(SIGNING_CTX).bytes(message.as_ref()), &signature - ), - Err(_) => false, - } - } + type Public = Public; + type Seed = Seed; + type Signature = Signature; + type DeriveError = Infallible; + + /// Generate new secure (random) key pair. + fn generate() -> Pair { + let mut csprng: OsRng = OsRng::new().expect("os random generator works; qed"); + let key_pair: Keypair = Keypair::generate(&mut csprng); + Pair(key_pair) + } + + /// Make a new key pair from raw secret seed material. + /// + /// This is generated using schnorrkel's Mini-Secret-Keys. + /// + /// A MiniSecretKey is literally what Ed25519 calls a SecretKey, which is just 32 random bytes. + fn from_seed(seed: Seed) -> Pair { + let mini_key: MiniSecretKey = + MiniSecretKey::from_bytes(&seed[..]).expect("32 bytes can always build a key; qed"); + let kp = mini_key.expand_to_keypair(); + Pair(kp) + } + + /// Get the public key. + fn public(&self) -> Public { + let mut pk = [0u8; 32]; + pk.copy_from_slice(&self.0.public.to_bytes()); + Public(pk) + } + + /// Make a new key pair from secret seed material. The slice must be 32 bytes long or it + /// will return `None`. + /// + /// You should never need to use this; generate(), generate_with_phrase(), from_phrase() + fn from_seed_slice(seed: &[u8]) -> Result { + if seed.len() != MINI_SECRET_KEY_LENGTH { + Err(SecretStringError::InvalidSeedLength) + } else { + Ok(Pair( + MiniSecretKey::from_bytes(seed) + .map_err(|_| SecretStringError::InvalidSeed)? + .expand_to_keypair(), + )) + } + } + + /// Generate a key from the phrase, password and derivation path. + fn from_standard_components>( + phrase: &str, + password: Option<&str>, + path: I, + ) -> Result { + Self::from_phrase(phrase, password)? + .derive(path) + .map_err(|_| SecretStringError::InvalidPath) + } + + fn generate_with_phrase(password: Option<&str>) -> (Pair, String) { + let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); + let phrase = mnemonic.phrase(); + ( + Self::from_phrase(phrase, password) + .expect("All phrases generated by Mnemonic are valid; qed"), + phrase.to_owned(), + ) + } + + fn from_phrase(phrase: &str, password: Option<&str>) -> Result { + Mnemonic::from_phrase(phrase, Language::English) + .map_err(|_| SecretStringError::InvalidPhrase) + .map(|m| Self::from_entropy(m.entropy(), password)) + } + + fn derive>( + &self, + path: Iter, + ) -> Result { + let init = self.0.secret.clone(); + let result = path.fold(init, |acc, j| match j { + DeriveJunction::Soft(cc) => acc.derived_key_simple(ChainCode(cc), &[]).0, + DeriveJunction::Hard(cc) => derive_hard_junction(&acc, &cc), + }); + Ok(Self(result.into())) + } + + fn sign(&self, message: &[u8]) -> Signature { + let context = signing_context(SIGNING_CTX); + self.0.sign(context.bytes(message)).into() + } + + /// Verify a signature on a message. Returns true if the signature is good. + fn verify, M: AsRef<[u8]>>( + sig: &Self::Signature, + message: M, + pubkey: P, + ) -> bool { + let signature: schnorrkel::Signature = + match schnorrkel::Signature::from_bytes(&sig.as_ref()) { + Ok(some_signature) => some_signature, + Err(_) => return false, + }; + match PublicKey::from_bytes(pubkey.as_ref().as_slice()) { + Ok(pk) => pk.verify( + signing_context(SIGNING_CTX).bytes(message.as_ref()), + &signature, + ), + Err(_) => false, + } + } + + /// Verify a signature on a message. Returns true if the signature is good. + fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool { + let signature: schnorrkel::Signature = match schnorrkel::Signature::from_bytes(sig) { + Ok(some_signature) => some_signature, + Err(_) => return false, + }; + match PublicKey::from_bytes(pubkey.as_ref()) { + Ok(pk) => pk.verify( + signing_context(SIGNING_CTX).bytes(message.as_ref()), + &signature, + ), + Err(_) => false, + } + } } #[cfg(feature = "std")] impl Pair { - /// Make a new key pair from binary data derived from a valid seed phrase. - /// - /// This uses a key derivation function to convert the entropy into a seed, then returns - /// the pair generated from it. - pub fn from_entropy(entropy: &[u8], password: Option<&str>) -> Pair { - let mini_key: MiniSecretKey = mini_secret_from_entropy(entropy, password.unwrap_or("")) - .expect("32 bytes can always build a key; qed"); - let kp = mini_key.expand_to_keypair(); - Pair(kp) - } + /// Make a new key pair from binary data derived from a valid seed phrase. + /// + /// This uses a key derivation function to convert the entropy into a seed, then returns + /// the pair generated from it. + pub fn from_entropy(entropy: &[u8], password: Option<&str>) -> Pair { + let mini_key: MiniSecretKey = mini_secret_from_entropy(entropy, password.unwrap_or("")) + .expect("32 bytes can always build a key; qed"); + let kp = mini_key.expand_to_keypair(); + Pair(kp) + } } #[cfg(test)] mod test { - use super::*; - use crate::crypto::{Ss58Codec, DEV_PHRASE, DEV_ADDRESS}; - use hex_literal::{hex, hex_impl}; - - #[test] - fn default_phrase_should_be_used() { - assert_eq!( - Pair::from_string("//Alice///password", None).unwrap().public(), - Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")).unwrap().public(), - ); - assert_eq!( - Pair::from_string(&format!("{}/Alice", DEV_PHRASE), None).as_ref().map(Pair::public), - Pair::from_string("/Alice", None).as_ref().map(Pair::public) - ); - } - - #[test] - fn default_address_should_be_used() { - assert_eq!( - Public::from_string(&format!("{}/Alice", DEV_ADDRESS)), - Public::from_string("/Alice") - ); - } - - #[test] - fn default_phrase_should_correspond_to_default_address() { - assert_eq!( - Pair::from_string(&format!("{}/Alice", DEV_PHRASE), None).unwrap().public(), - Public::from_string(&format!("{}/Alice", DEV_ADDRESS)).unwrap(), - ); - assert_eq!( - Pair::from_string("/Alice", None).unwrap().public(), - Public::from_string("/Alice").unwrap() - ); - } - - #[test] - fn derive_soft_should_work() { - let pair: Pair = Pair::from_seed(hex!( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" - )); - let derive_1 = pair.derive(Some(DeriveJunction::soft(1)).into_iter()).unwrap(); - let derive_1b = pair.derive(Some(DeriveJunction::soft(1)).into_iter()).unwrap(); - let derive_2 = pair.derive(Some(DeriveJunction::soft(2)).into_iter()).unwrap(); - assert_eq!(derive_1.public(), derive_1b.public()); - assert_ne!(derive_1.public(), derive_2.public()); - } - - #[test] - fn derive_hard_should_work() { - let pair: Pair = Pair::from_seed(hex!( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" - )); - let derive_1 = pair.derive(Some(DeriveJunction::hard(1)).into_iter()).unwrap(); - let derive_1b = pair.derive(Some(DeriveJunction::hard(1)).into_iter()).unwrap(); - let derive_2 = pair.derive(Some(DeriveJunction::hard(2)).into_iter()).unwrap(); - assert_eq!(derive_1.public(), derive_1b.public()); - assert_ne!(derive_1.public(), derive_2.public()); - } - - #[test] - fn derive_soft_public_should_work() { - let pair: Pair = Pair::from_seed(hex!( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" - )); - let path = Some(DeriveJunction::soft(1)); - let pair_1 = pair.derive(path.clone().into_iter()).unwrap(); - let public_1 = pair.public().derive(path.into_iter()).unwrap(); - assert_eq!(pair_1.public(), public_1); - } - - #[test] - fn derive_hard_public_should_fail() { - let pair: Pair = Pair::from_seed(hex!( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" - )); - let path = Some(DeriveJunction::hard(1)); - assert!(pair.public().derive(path.into_iter()).is_none()); - } - - #[test] - fn sr_test_vector_should_work() { - let pair: Pair = Pair::from_seed(hex!( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" - )); - let public = pair.public(); - assert_eq!( - public, - Public::from_raw(hex!( - "44a996beb1eef7bdcab976ab6d2ca26104834164ecf28fb375600576fcc6eb0f" - )) - ); - let message = b""; - let signature = pair.sign(message); - assert!(Pair::verify(&signature, &message[..], &public)); - } - - #[test] - fn generated_pair_should_work() { - let pair = Pair::generate(); - let public = pair.public(); - let message = b"Something important"; - let signature = pair.sign(&message[..]); - assert!(Pair::verify(&signature, &message[..], &public)); - } - - #[test] - fn seeded_pair_should_work() { - - let pair = Pair::from_seed(*b"12345678901234567890123456789012"); - let public = pair.public(); - assert_eq!( - public, - Public::from_raw(hex!( - "741c08a06f41c596608f6774259bd9043304adfa5d3eea62760bd9be97634d63" - )) - ); - let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); - let signature = pair.sign(&message[..]); - assert!(Pair::verify(&signature, &message[..], &public)); - } - - #[test] - fn ss58check_roundtrip_works() { - let pair = Pair::generate(); - let public = pair.public(); - let s = public.to_ss58check(); - println!("Correct: {}", s); - let cmp = Public::from_ss58check(&s).unwrap(); - assert_eq!(cmp, public); - } - - #[test] - fn verify_from_wasm_works() { - // The values in this test case are compared to the output of `node-test.js` in schnorrkel-js. - // - // This is to make sure that the wasm library is compatible. - let pk = Pair::from_seed(hex!("0000000000000000000000000000000000000000000000000000000000000000")); - let public = pk.public(); - let js_signature = Signature::from_raw(hex!("28a854d54903e056f89581c691c1f7d2ff39f8f896c9e9c22475e60902cc2b3547199e0e91fa32902028f2ca2355e8cdd16cfe19ba5e8b658c94aa80f3b81a00")); - assert!(Pair::verify(&js_signature, b"SUBSTRATE", public)); - } + use super::*; + use crate::crypto::{Ss58Codec, DEV_ADDRESS, DEV_PHRASE}; + use hex_literal::{hex, hex_impl}; + + #[test] + fn default_phrase_should_be_used() { + assert_eq!( + Pair::from_string("//Alice///password", None) + .unwrap() + .public(), + Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")) + .unwrap() + .public(), + ); + assert_eq!( + Pair::from_string(&format!("{}/Alice", DEV_PHRASE), None) + .as_ref() + .map(Pair::public), + Pair::from_string("/Alice", None).as_ref().map(Pair::public) + ); + } + + #[test] + fn default_address_should_be_used() { + assert_eq!( + Public::from_string(&format!("{}/Alice", DEV_ADDRESS)), + Public::from_string("/Alice") + ); + } + + #[test] + fn default_phrase_should_correspond_to_default_address() { + assert_eq!( + Pair::from_string(&format!("{}/Alice", DEV_PHRASE), None) + .unwrap() + .public(), + Public::from_string(&format!("{}/Alice", DEV_ADDRESS)).unwrap(), + ); + assert_eq!( + Pair::from_string("/Alice", None).unwrap().public(), + Public::from_string("/Alice").unwrap() + ); + } + + #[test] + fn derive_soft_should_work() { + let pair: Pair = Pair::from_seed(hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + )); + let derive_1 = pair + .derive(Some(DeriveJunction::soft(1)).into_iter()) + .unwrap(); + let derive_1b = pair + .derive(Some(DeriveJunction::soft(1)).into_iter()) + .unwrap(); + let derive_2 = pair + .derive(Some(DeriveJunction::soft(2)).into_iter()) + .unwrap(); + assert_eq!(derive_1.public(), derive_1b.public()); + assert_ne!(derive_1.public(), derive_2.public()); + } + + #[test] + fn derive_hard_should_work() { + let pair: Pair = Pair::from_seed(hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + )); + let derive_1 = pair + .derive(Some(DeriveJunction::hard(1)).into_iter()) + .unwrap(); + let derive_1b = pair + .derive(Some(DeriveJunction::hard(1)).into_iter()) + .unwrap(); + let derive_2 = pair + .derive(Some(DeriveJunction::hard(2)).into_iter()) + .unwrap(); + assert_eq!(derive_1.public(), derive_1b.public()); + assert_ne!(derive_1.public(), derive_2.public()); + } + + #[test] + fn derive_soft_public_should_work() { + let pair: Pair = Pair::from_seed(hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + )); + let path = Some(DeriveJunction::soft(1)); + let pair_1 = pair.derive(path.clone().into_iter()).unwrap(); + let public_1 = pair.public().derive(path.into_iter()).unwrap(); + assert_eq!(pair_1.public(), public_1); + } + + #[test] + fn derive_hard_public_should_fail() { + let pair: Pair = Pair::from_seed(hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + )); + let path = Some(DeriveJunction::hard(1)); + assert!(pair.public().derive(path.into_iter()).is_none()); + } + + #[test] + fn sr_test_vector_should_work() { + let pair: Pair = Pair::from_seed(hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + )); + let public = pair.public(); + assert_eq!( + public, + Public::from_raw(hex!( + "44a996beb1eef7bdcab976ab6d2ca26104834164ecf28fb375600576fcc6eb0f" + )) + ); + let message = b""; + let signature = pair.sign(message); + assert!(Pair::verify(&signature, &message[..], &public)); + } + + #[test] + fn generated_pair_should_work() { + let pair = Pair::generate(); + let public = pair.public(); + let message = b"Something important"; + let signature = pair.sign(&message[..]); + assert!(Pair::verify(&signature, &message[..], &public)); + } + + #[test] + fn seeded_pair_should_work() { + let pair = Pair::from_seed(*b"12345678901234567890123456789012"); + let public = pair.public(); + assert_eq!( + public, + Public::from_raw(hex!( + "741c08a06f41c596608f6774259bd9043304adfa5d3eea62760bd9be97634d63" + )) + ); + let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); + let signature = pair.sign(&message[..]); + assert!(Pair::verify(&signature, &message[..], &public)); + } + + #[test] + fn ss58check_roundtrip_works() { + let pair = Pair::generate(); + let public = pair.public(); + let s = public.to_ss58check(); + println!("Correct: {}", s); + let cmp = Public::from_ss58check(&s).unwrap(); + assert_eq!(cmp, public); + } + + #[test] + fn verify_from_wasm_works() { + // The values in this test case are compared to the output of `node-test.js` in schnorrkel-js. + // + // This is to make sure that the wasm library is compatible. + let pk = Pair::from_seed(hex!( + "0000000000000000000000000000000000000000000000000000000000000000" + )); + let public = pk.public(); + let js_signature = Signature::from_raw(hex!("28a854d54903e056f89581c691c1f7d2ff39f8f896c9e9c22475e60902cc2b3547199e0e91fa32902028f2ca2355e8cdd16cfe19ba5e8b658c94aa80f3b81a00")); + assert!(Pair::verify(&js_signature, b"SUBSTRATE", public)); + } } diff --git a/core/primitives/src/storage.rs b/core/primitives/src/storage.rs index 79652a8d4c..ab8279b482 100644 --- a/core/primitives/src/storage.rs +++ b/core/primitives/src/storage.rs @@ -16,75 +16,78 @@ //! Contract execution data. -#[cfg(feature = "std")] -use serde_derive::{Serialize, Deserialize}; #[cfg(feature = "std")] use crate::bytes; use rstd::vec::Vec; +#[cfg(feature = "std")] +use serde_derive::{Deserialize, Serialize}; /// Contract storage key. #[derive(PartialEq, Eq)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug, Hash, PartialOrd, Ord, Clone))] -pub struct StorageKey(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); +#[cfg_attr( + feature = "std", + derive(Serialize, Deserialize, Debug, Hash, PartialOrd, Ord, Clone) +)] +pub struct StorageKey(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec); /// Contract storage entry data. #[derive(PartialEq, Eq)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug, Hash, PartialOrd, Ord, Clone))] -pub struct StorageData(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); +#[cfg_attr( + feature = "std", + derive(Serialize, Deserialize, Debug, Hash, PartialOrd, Ord, Clone) +)] +pub struct StorageData(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec); /// Storage change set #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug, PartialEq, Eq))] #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub struct StorageChangeSet { - /// Block hash - pub block: Hash, - /// A list of changes - pub changes: Vec<( - StorageKey, - Option, - )>, + /// Block hash + pub block: Hash, + /// A list of changes + pub changes: Vec<(StorageKey, Option)>, } /// List of all well known keys and prefixes in storage. pub mod well_known_keys { - /// Wasm code of the runtime. - /// - /// Stored as a raw byte vector. Required by substrate. - pub const CODE: &'static [u8] = b":code"; + /// Wasm code of the runtime. + /// + /// Stored as a raw byte vector. Required by substrate. + pub const CODE: &'static [u8] = b":code"; - /// Number of wasm linear memory pages required for execution of the runtime. - /// - /// The type of this value is encoded `u64`. - pub const HEAP_PAGES: &'static [u8] = b":heappages"; + /// Number of wasm linear memory pages required for execution of the runtime. + /// + /// The type of this value is encoded `u64`. + pub const HEAP_PAGES: &'static [u8] = b":heappages"; - /// Number of authorities. - /// - /// The type of this value is encoded `u32`. Required by substrate. - pub const AUTHORITY_COUNT: &'static [u8] = b":auth:len"; + /// Number of authorities. + /// + /// The type of this value is encoded `u32`. Required by substrate. + pub const AUTHORITY_COUNT: &'static [u8] = b":auth:len"; - /// Prefix under which authorities are storied. - /// - /// The full key for N-th authority is generated as: - /// - /// `(n as u32).to_keyed_vec(AUTHORITY_PREFIX)`. - pub const AUTHORITY_PREFIX: &'static [u8] = b":auth:"; + /// Prefix under which authorities are storied. + /// + /// The full key for N-th authority is generated as: + /// + /// `(n as u32).to_keyed_vec(AUTHORITY_PREFIX)`. + pub const AUTHORITY_PREFIX: &'static [u8] = b":auth:"; - /// Current extrinsic index (u32) is stored under this key. - pub const EXTRINSIC_INDEX: &'static [u8] = b":extrinsic_index"; + /// Current extrinsic index (u32) is stored under this key. + pub const EXTRINSIC_INDEX: &'static [u8] = b":extrinsic_index"; - /// Sum of all lengths of executed extrinsics (u32). - pub const ALL_EXTRINSICS_LEN: &'static [u8] = b":all_extrinsics_len"; + /// Sum of all lengths of executed extrinsics (u32). + pub const ALL_EXTRINSICS_LEN: &'static [u8] = b":all_extrinsics_len"; - /// Changes trie configuration is stored under this key. - pub const CHANGES_TRIE_CONFIG: &'static [u8] = b":changes_trie"; + /// Changes trie configuration is stored under this key. + pub const CHANGES_TRIE_CONFIG: &'static [u8] = b":changes_trie"; - /// Prefix of child storage keys. - pub const CHILD_STORAGE_KEY_PREFIX: &'static [u8] = b":child_storage:"; + /// Prefix of child storage keys. + pub const CHILD_STORAGE_KEY_PREFIX: &'static [u8] = b":child_storage:"; - /// Whether a key is a child storage key. - pub fn is_child_storage_key(key: &[u8]) -> bool { - key.starts_with(CHILD_STORAGE_KEY_PREFIX) - } + /// Whether a key is a child storage key. + pub fn is_child_storage_key(key: &[u8]) -> bool { + key.starts_with(CHILD_STORAGE_KEY_PREFIX) + } } diff --git a/core/primitives/src/u32_trait.rs b/core/primitives/src/u32_trait.rs index 3fcdceac4c..7733754c96 100644 --- a/core/primitives/src/u32_trait.rs +++ b/core/primitives/src/u32_trait.rs @@ -18,72 +18,171 @@ /// A u32 value, wrapped in a trait because we don't yet have const generics. pub trait Value { - /// The actual value represented by the impl'ing type. - const VALUE: u32; + /// The actual value represented by the impl'ing type. + const VALUE: u32; } /// Type representing the value 0 for the `Value` trait. -pub struct _0; impl Value for _0 { const VALUE: u32 = 0; } +pub struct _0; +impl Value for _0 { + const VALUE: u32 = 0; +} /// Type representing the value 1 for the `Value` trait. -pub struct _1; impl Value for _1 { const VALUE: u32 = 1; } +pub struct _1; +impl Value for _1 { + const VALUE: u32 = 1; +} /// Type representing the value 2 for the `Value` trait. -pub struct _2; impl Value for _2 { const VALUE: u32 = 2; } +pub struct _2; +impl Value for _2 { + const VALUE: u32 = 2; +} /// Type representing the value 3 for the `Value` trait. -pub struct _3; impl Value for _3 { const VALUE: u32 = 3; } +pub struct _3; +impl Value for _3 { + const VALUE: u32 = 3; +} /// Type representing the value 4 for the `Value` trait. -pub struct _4; impl Value for _4 { const VALUE: u32 = 4; } +pub struct _4; +impl Value for _4 { + const VALUE: u32 = 4; +} /// Type representing the value 5 for the `Value` trait. -pub struct _5; impl Value for _5 { const VALUE: u32 = 5; } +pub struct _5; +impl Value for _5 { + const VALUE: u32 = 5; +} /// Type representing the value 6 for the `Value` trait. -pub struct _6; impl Value for _6 { const VALUE: u32 = 6; } +pub struct _6; +impl Value for _6 { + const VALUE: u32 = 6; +} /// Type representing the value 7 for the `Value` trait. -pub struct _7; impl Value for _7 { const VALUE: u32 = 7; } +pub struct _7; +impl Value for _7 { + const VALUE: u32 = 7; +} /// Type representing the value 8 for the `Value` trait. -pub struct _8; impl Value for _8 { const VALUE: u32 = 8; } +pub struct _8; +impl Value for _8 { + const VALUE: u32 = 8; +} /// Type representing the value 9 for the `Value` trait. -pub struct _9; impl Value for _9 { const VALUE: u32 = 9; } +pub struct _9; +impl Value for _9 { + const VALUE: u32 = 9; +} /// Type representing the value 10 for the `Value` trait. -pub struct _10; impl Value for _10 { const VALUE: u32 = 10; } +pub struct _10; +impl Value for _10 { + const VALUE: u32 = 10; +} /// Type representing the value 11 for the `Value` trait. -pub struct _11; impl Value for _11 { const VALUE: u32 = 11; } +pub struct _11; +impl Value for _11 { + const VALUE: u32 = 11; +} /// Type representing the value 12 for the `Value` trait. -pub struct _12; impl Value for _12 { const VALUE: u32 = 12; } +pub struct _12; +impl Value for _12 { + const VALUE: u32 = 12; +} /// Type representing the value 13 for the `Value` trait. -pub struct _13; impl Value for _13 { const VALUE: u32 = 13; } +pub struct _13; +impl Value for _13 { + const VALUE: u32 = 13; +} /// Type representing the value 14 for the `Value` trait. -pub struct _14; impl Value for _14 { const VALUE: u32 = 14; } +pub struct _14; +impl Value for _14 { + const VALUE: u32 = 14; +} /// Type representing the value 15 for the `Value` trait. -pub struct _15; impl Value for _15 { const VALUE: u32 = 15; } +pub struct _15; +impl Value for _15 { + const VALUE: u32 = 15; +} /// Type representing the value 16 for the `Value` trait. -pub struct _16; impl Value for _16 { const VALUE: u32 = 16; } +pub struct _16; +impl Value for _16 { + const VALUE: u32 = 16; +} /// Type representing the value 24 for the `Value` trait. -pub struct _24; impl Value for _24 { const VALUE: u32 = 24; } +pub struct _24; +impl Value for _24 { + const VALUE: u32 = 24; +} /// Type representing the value 32 for the `Value` trait. -pub struct _32; impl Value for _32 { const VALUE: u32 = 32; } +pub struct _32; +impl Value for _32 { + const VALUE: u32 = 32; +} /// Type representing the value 40 for the `Value` trait. -pub struct _40; impl Value for _40 { const VALUE: u32 = 40; } +pub struct _40; +impl Value for _40 { + const VALUE: u32 = 40; +} /// Type representing the value 48 for the `Value` trait. -pub struct _48; impl Value for _48 { const VALUE: u32 = 48; } +pub struct _48; +impl Value for _48 { + const VALUE: u32 = 48; +} /// Type representing the value 56 for the `Value` trait. -pub struct _56; impl Value for _56 { const VALUE: u32 = 56; } +pub struct _56; +impl Value for _56 { + const VALUE: u32 = 56; +} /// Type representing the value 64 for the `Value` trait. -pub struct _64; impl Value for _64 { const VALUE: u32 = 64; } +pub struct _64; +impl Value for _64 { + const VALUE: u32 = 64; +} /// Type representing the value 80 for the `Value` trait. -pub struct _80; impl Value for _80 { const VALUE: u32 = 80; } +pub struct _80; +impl Value for _80 { + const VALUE: u32 = 80; +} /// Type representing the value 96 for the `Value` trait. -pub struct _96; impl Value for _96 { const VALUE: u32 = 96; } +pub struct _96; +impl Value for _96 { + const VALUE: u32 = 96; +} /// Type representing the value 112 for the `Value` trait. -pub struct _112; impl Value for _112 { const VALUE: u32 = 112; } +pub struct _112; +impl Value for _112 { + const VALUE: u32 = 112; +} /// Type representing the value 128 for the `Value` trait. -pub struct _128; impl Value for _128 { const VALUE: u32 = 128; } +pub struct _128; +impl Value for _128 { + const VALUE: u32 = 128; +} /// Type representing the value 160 for the `Value` trait. -pub struct _160; impl Value for _160 { const VALUE: u32 = 160; } +pub struct _160; +impl Value for _160 { + const VALUE: u32 = 160; +} /// Type representing the value 192 for the `Value` trait. -pub struct _192; impl Value for _192 { const VALUE: u32 = 192; } +pub struct _192; +impl Value for _192 { + const VALUE: u32 = 192; +} /// Type representing the value 224 for the `Value` trait. -pub struct _224; impl Value for _224 { const VALUE: u32 = 224; } +pub struct _224; +impl Value for _224 { + const VALUE: u32 = 224; +} /// Type representing the value 256 for the `Value` trait. -pub struct _256; impl Value for _256 { const VALUE: u32 = 256; } +pub struct _256; +impl Value for _256 { + const VALUE: u32 = 256; +} /// Type representing the value 384 for the `Value` trait. -pub struct _384; impl Value for _384 { const VALUE: u32 = 384; } +pub struct _384; +impl Value for _384 { + const VALUE: u32 = 384; +} /// Type representing the value 512 for the `Value` trait. -pub struct _512; impl Value for _512 { const VALUE: u32 = 512; } +pub struct _512; +impl Value for _512 { + const VALUE: u32 = 512; +} diff --git a/core/primitives/src/uint.rs b/core/primitives/src/uint.rs index dfea51921d..afad55aa1d 100644 --- a/core/primitives/src/uint.rs +++ b/core/primitives/src/uint.rs @@ -20,78 +20,76 @@ pub use primitive_types::U256; #[cfg(test)] mod tests { - use super::*; - use parity_codec::{Encode, Decode}; - use substrate_serializer as ser; + use super::*; + use parity_codec::{Decode, Encode}; + use substrate_serializer as ser; - macro_rules! test { - ($name: ident, $test_name: ident) => { - #[test] - fn $test_name() { - let tests = vec![ - ($name::from(0), "0x0"), - ($name::from(1), "0x1"), - ($name::from(2), "0x2"), - ($name::from(10), "0xa"), - ($name::from(15), "0xf"), - ($name::from(15), "0xf"), - ($name::from(16), "0x10"), - ($name::from(1_000), "0x3e8"), - ($name::from(100_000), "0x186a0"), - ($name::from(u64::max_value()), "0xffffffffffffffff"), - ($name::from(u64::max_value()) + $name::from(1), "0x10000000000000000"), - ]; + macro_rules! test { + ($name: ident, $test_name: ident) => { + #[test] + fn $test_name() { + let tests = vec![ + ($name::from(0), "0x0"), + ($name::from(1), "0x1"), + ($name::from(2), "0x2"), + ($name::from(10), "0xa"), + ($name::from(15), "0xf"), + ($name::from(15), "0xf"), + ($name::from(16), "0x10"), + ($name::from(1_000), "0x3e8"), + ($name::from(100_000), "0x186a0"), + ($name::from(u64::max_value()), "0xffffffffffffffff"), + ( + $name::from(u64::max_value()) + $name::from(1), + "0x10000000000000000", + ), + ]; - for (number, expected) in tests { - assert_eq!(format!("{:?}", expected), ser::to_string_pretty(&number)); - assert_eq!(number, ser::from_str(&format!("{:?}", expected)).unwrap()); - } + for (number, expected) in tests { + assert_eq!(format!("{:?}", expected), ser::to_string_pretty(&number)); + assert_eq!(number, ser::from_str(&format!("{:?}", expected)).unwrap()); + } - // Invalid examples - assert!(ser::from_str::<$name>("\"0x\"").unwrap_err().is_data()); - assert!(ser::from_str::<$name>("\"0xg\"").unwrap_err().is_data()); - assert!(ser::from_str::<$name>("\"\"").unwrap_err().is_data()); - assert!(ser::from_str::<$name>("\"10\"").unwrap_err().is_data()); - assert!(ser::from_str::<$name>("\"0\"").unwrap_err().is_data()); - } - } - } + // Invalid examples + assert!(ser::from_str::<$name>("\"0x\"").unwrap_err().is_data()); + assert!(ser::from_str::<$name>("\"0xg\"").unwrap_err().is_data()); + assert!(ser::from_str::<$name>("\"\"").unwrap_err().is_data()); + assert!(ser::from_str::<$name>("\"10\"").unwrap_err().is_data()); + assert!(ser::from_str::<$name>("\"0\"").unwrap_err().is_data()); + } + }; + } - test!(U256, test_u256); + test!(U256, test_u256); - #[test] - fn test_u256_codec() { - let res1 = vec![120, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0]; - let res2 = vec![0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]; + #[test] + fn test_u256_codec() { + let res1 = vec![ + 120, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, + ]; + let res2 = vec![ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + ]; - assert_eq!( - U256::from(120).encode(), - res1); - assert_eq!( - U256::max_value().encode(), - res2); - assert_eq!( - U256::decode(&mut &res1[..]), - Some(U256::from(120))); - assert_eq!( - U256::decode(&mut &res2[..]), - Some(U256::max_value())); - } + assert_eq!(U256::from(120).encode(), res1); + assert_eq!(U256::max_value().encode(), res2); + assert_eq!(U256::decode(&mut &res1[..]), Some(U256::from(120))); + assert_eq!(U256::decode(&mut &res2[..]), Some(U256::max_value())); + } - #[test] - fn test_large_values() { - assert_eq!( - ser::to_string_pretty(&!U256::zero()), - "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" - ); - assert!( - ser::from_str::("\"0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"").unwrap_err().is_data() - ); - } + #[test] + fn test_large_values() { + assert_eq!( + ser::to_string_pretty(&!U256::zero()), + "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" + ); + assert!(ser::from_str::( + "\"0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" + ) + .unwrap_err() + .is_data()); + } } diff --git a/core/rpc-servers/src/lib.rs b/core/rpc-servers/src/lib.rs index 939b2b93aa..48f6187c01 100644 --- a/core/rpc-servers/src/lib.rs +++ b/core/rpc-servers/src/lib.rs @@ -17,12 +17,14 @@ //! Substrate RPC servers. #[warn(missing_docs)] - pub use substrate_rpc as apis; -use std::io; use log::error; -use sr_primitives::{traits::{Block as BlockT, NumberFor}, generic::SignedBlock}; +use sr_primitives::{ + generic::SignedBlock, + traits::{Block as BlockT, NumberFor}, +}; +use std::io; /// Maximal payload accepted by RPC servers const MAX_PAYLOAD: usize = 15 * 1024 * 1024; @@ -34,54 +36,57 @@ pub type WsServer = ws::Server; /// Construct rpc `IoHandler` pub fn rpc_handler( - state: S, - chain: C, - author: A, - system: Y, -) -> RpcHandler where - Block: BlockT + 'static, - ExHash: Send + Sync + 'static + sr_primitives::Serialize + sr_primitives::DeserializeOwned, - S: apis::state::StateApi, - C: apis::chain::ChainApi, Block::Hash, Block::Header, SignedBlock, Metadata=Metadata>, - A: apis::author::AuthorApi, - Y: apis::system::SystemApi>, + state: S, + chain: C, + author: A, + system: Y, +) -> RpcHandler +where + Block: BlockT + 'static, + ExHash: Send + Sync + 'static + sr_primitives::Serialize + sr_primitives::DeserializeOwned, + S: apis::state::StateApi, + C: apis::chain::ChainApi< + NumberFor, + Block::Hash, + Block::Header, + SignedBlock, + Metadata = Metadata, + >, + A: apis::author::AuthorApi, + Y: apis::system::SystemApi>, { - let mut io = pubsub::PubSubHandler::default(); - io.extend_with(state.to_delegate()); - io.extend_with(chain.to_delegate()); - io.extend_with(author.to_delegate()); - io.extend_with(system.to_delegate()); - io + let mut io = pubsub::PubSubHandler::default(); + io.extend_with(state.to_delegate()); + io.extend_with(chain.to_delegate()); + io.extend_with(author.to_delegate()); + io.extend_with(system.to_delegate()); + io } /// Start HTTP server listening on given address. -pub fn start_http( - addr: &std::net::SocketAddr, - io: RpcHandler, -) -> io::Result { - http::ServerBuilder::new(io) - .threads(4) - .health_api(("/health", "system_health")) - .rest_api(http::RestApi::Unsecure) - .cors(http::DomainsValidation::Disabled) - .max_request_body_size(MAX_PAYLOAD) - .start_http(addr) +pub fn start_http(addr: &std::net::SocketAddr, io: RpcHandler) -> io::Result { + http::ServerBuilder::new(io) + .threads(4) + .health_api(("/health", "system_health")) + .rest_api(http::RestApi::Unsecure) + .cors(http::DomainsValidation::Disabled) + .max_request_body_size(MAX_PAYLOAD) + .start_http(addr) } /// Start WS server listening on given address. -pub fn start_ws( - addr: &std::net::SocketAddr, - io: RpcHandler, -) -> io::Result { - ws::ServerBuilder::with_meta_extractor(io, |context: &ws::RequestContext| Metadata::new(context.sender())) - .max_payload(MAX_PAYLOAD) - .start(addr) - .map_err(|err| match err { - ws::Error(ws::ErrorKind::Io(io), _) => io, - ws::Error(ws::ErrorKind::ConnectionClosed, _) => io::ErrorKind::BrokenPipe.into(), - ws::Error(e, _) => { - error!("{}", e); - io::ErrorKind::Other.into() - } - }) +pub fn start_ws(addr: &std::net::SocketAddr, io: RpcHandler) -> io::Result { + ws::ServerBuilder::with_meta_extractor(io, |context: &ws::RequestContext| { + Metadata::new(context.sender()) + }) + .max_payload(MAX_PAYLOAD) + .start(addr) + .map_err(|err| match err { + ws::Error(ws::ErrorKind::Io(io), _) => io, + ws::Error(ws::ErrorKind::ConnectionClosed, _) => io::ErrorKind::BrokenPipe.into(), + ws::Error(e, _) => { + error!("{}", e); + io::ErrorKind::Other.into() + } + }) } diff --git a/core/rpc/src/author/error.rs b/core/rpc/src/author/error.rs index 9c1ec23225..6470aced4e 100644 --- a/core/rpc/src/author/error.rs +++ b/core/rpc/src/author/error.rs @@ -16,35 +16,35 @@ //! Authoring RPC module errors. -use error_chain::*; +use crate::rpc; use client; +use error_chain::*; use transaction_pool::txpool; -use crate::rpc; use crate::errors; error_chain! { - links { - Pool(txpool::error::Error, txpool::error::ErrorKind) #[doc = "Pool error"]; - Client(client::error::Error, client::error::ErrorKind) #[doc = "Client error"]; - } - errors { - /// Not implemented yet - Unimplemented { - description("not yet implemented"), - display("Method Not Implemented"), - } - /// Incorrect extrinsic format. - BadFormat { - description("bad format"), - display("Invalid extrinsic format"), - } - /// Verification error - Verification(e: Box<::std::error::Error + Send>) { - description("extrinsic verification error"), - display("Extrinsic verification error: {}", e.description()), - } - } + links { + Pool(txpool::error::Error, txpool::error::ErrorKind) #[doc = "Pool error"]; + Client(client::error::Error, client::error::ErrorKind) #[doc = "Client error"]; + } + errors { + /// Not implemented yet + Unimplemented { + description("not yet implemented"), + display("Method Not Implemented"), + } + /// Incorrect extrinsic format. + BadFormat { + description("bad format"), + display("Invalid extrinsic format"), + } + /// Verification error + Verification(e: Box<::std::error::Error + Send>) { + description("extrinsic verification error"), + display("Extrinsic verification error: {}", e.description()), + } + } } /// Base code for all authorship errors. @@ -70,8 +70,8 @@ const POOL_CYCLE_DETECTED: i64 = POOL_INVALID_TX + 5; const POOL_IMMEDIATELY_DROPPED: i64 = POOL_INVALID_TX + 6; impl From for rpc::Error { - fn from(e: Error) -> Self { - match e { + fn from(e: Error) -> Self { + match e { Error(ErrorKind::Unimplemented, _) => errors::unimplemented(), Error(ErrorKind::BadFormat, _) => rpc::Error { code: rpc::ErrorCode::ServerError(BAD_FORMAT), @@ -120,5 +120,5 @@ impl From for rpc::Error { }, e => errors::internal(e), } - } + } } diff --git a/core/rpc/src/author/mod.rs b/core/rpc/src/author/mod.rs index acd500ba0b..07cf25f2ad 100644 --- a/core/rpc/src/author/mod.rs +++ b/core/rpc/src/author/mod.rs @@ -18,25 +18,18 @@ use std::sync::Arc; -use log::warn; +use crate::rpc::futures::{Future, Sink, Stream}; +use crate::subscriptions::Subscriptions; use client::{self, Client}; -use parity_codec::{Encode, Decode}; -use transaction_pool::{ - txpool::{ - ChainApi as PoolChainApi, - BlockHash, - ExHash, - IntoPoolError, - Pool, - watcher::Status, - }, -}; use jsonrpc_derive::rpc; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; -use primitives::{Bytes, Blake2Hasher, H256}; -use crate::rpc::futures::{Sink, Stream, Future}; +use log::warn; +use parity_codec::{Decode, Encode}; +use primitives::{Blake2Hasher, Bytes, H256}; use runtime_primitives::{generic, traits}; -use crate::subscriptions::Subscriptions; +use transaction_pool::txpool::{ + watcher::Status, BlockHash, ChainApi as PoolChainApi, ExHash, IntoPoolError, Pool, +}; pub mod error; @@ -48,106 +41,147 @@ use self::error::Result; /// Substrate authoring RPC API #[rpc] pub trait AuthorApi { - /// RPC metadata - type Metadata; - - /// Submit hex-encoded extrinsic for inclusion in block. - #[rpc(name = "author_submitExtrinsic")] - fn submit_extrinsic(&self, extrinsic: Bytes) -> Result; - - /// Returns all pending extrinsics, potentially grouped by sender. - #[rpc(name = "author_pendingExtrinsics")] - fn pending_extrinsics(&self) -> Result>; - - /// Submit an extrinsic to watch. - #[pubsub(subscription = "author_extrinsicUpdate", subscribe, name = "author_submitAndWatchExtrinsic")] - fn watch_extrinsic(&self, metadata: Self::Metadata, subscriber: Subscriber>, bytes: Bytes); - - /// Unsubscribe from extrinsic watching. - #[pubsub(subscription = "author_extrinsicUpdate", unsubscribe, name = "author_unwatchExtrinsic")] - fn unwatch_extrinsic(&self, metadata: Option, id: SubscriptionId) -> Result; + /// RPC metadata + type Metadata; + + /// Submit hex-encoded extrinsic for inclusion in block. + #[rpc(name = "author_submitExtrinsic")] + fn submit_extrinsic(&self, extrinsic: Bytes) -> Result; + + /// Returns all pending extrinsics, potentially grouped by sender. + #[rpc(name = "author_pendingExtrinsics")] + fn pending_extrinsics(&self) -> Result>; + + /// Submit an extrinsic to watch. + #[pubsub( + subscription = "author_extrinsicUpdate", + subscribe, + name = "author_submitAndWatchExtrinsic" + )] + fn watch_extrinsic( + &self, + metadata: Self::Metadata, + subscriber: Subscriber>, + bytes: Bytes, + ); + + /// Unsubscribe from extrinsic watching. + #[pubsub( + subscription = "author_extrinsicUpdate", + unsubscribe, + name = "author_unwatchExtrinsic" + )] + fn unwatch_extrinsic( + &self, + metadata: Option, + id: SubscriptionId, + ) -> Result; } /// Authoring API -pub struct Author where P: PoolChainApi + Sync + Send + 'static { - /// Substrate client - client: Arc::Block, RA>>, - /// Extrinsic pool - pool: Arc>, - /// Subscriptions manager - subscriptions: Subscriptions, +pub struct Author +where + P: PoolChainApi + Sync + Send + 'static, +{ + /// Substrate client + client: Arc::Block, RA>>, + /// Extrinsic pool + pool: Arc>, + /// Subscriptions manager + subscriptions: Subscriptions, } -impl Author where P: PoolChainApi + Sync + Send + 'static { - /// Create new instance of Authoring API. - pub fn new( - client: Arc::Block, RA>>, - pool: Arc>, - subscriptions: Subscriptions, - ) -> Self { - Author { - client, - pool, - subscriptions, - } - } +impl Author +where + P: PoolChainApi + Sync + Send + 'static, +{ + /// Create new instance of Authoring API. + pub fn new( + client: Arc::Block, RA>>, + pool: Arc>, + subscriptions: Subscriptions, + ) -> Self { + Author { + client, + pool, + subscriptions, + } + } } -impl AuthorApi, BlockHash

> for Author where - B: client::backend::Backend<

::Block, Blake2Hasher> + Send + Sync + 'static, - E: client::CallExecutor<

::Block, Blake2Hasher> + Send + Sync + 'static, - P: PoolChainApi + Sync + Send + 'static, - P::Block: traits::Block, - P::Error: 'static, - RA: Send + Sync + 'static +impl AuthorApi, BlockHash

> for Author +where + B: client::backend::Backend<

::Block, Blake2Hasher> + Send + Sync + 'static, + E: client::CallExecutor<

::Block, Blake2Hasher> + Send + Sync + 'static, + P: PoolChainApi + Sync + Send + 'static, + P::Block: traits::Block, + P::Error: 'static, + RA: Send + Sync + 'static, { - type Metadata = crate::metadata::Metadata; - - fn submit_extrinsic(&self, ext: Bytes) -> Result> { - let xt = Decode::decode(&mut &ext[..]).ok_or(error::Error::from(error::ErrorKind::BadFormat))?; - let best_block_hash = self.client.info()?.chain.best_hash; - self.pool - .submit_one(&generic::BlockId::hash(best_block_hash), xt) - .map_err(|e| e.into_pool_error() - .map(Into::into) - .unwrap_or_else(|e| error::ErrorKind::Verification(Box::new(e)).into()) - ) - } - - fn pending_extrinsics(&self) -> Result> { - Ok(self.pool.ready().map(|tx| tx.data.encode().into()).collect()) - } - - fn watch_extrinsic(&self, _metadata: Self::Metadata, subscriber: Subscriber, BlockHash

>>, xt: Bytes) { - let submit = || -> Result<_> { - let best_block_hash = self.client.info()?.chain.best_hash; - let dxt = <

::Block as traits::Block>::Extrinsic::decode(&mut &xt[..]).ok_or(error::Error::from(error::ErrorKind::BadFormat))?; - self.pool - .submit_and_watch(&generic::BlockId::hash(best_block_hash), dxt) - .map_err(|e| e.into_pool_error() - .map(Into::into) - .unwrap_or_else(|e| error::ErrorKind::Verification(Box::new(e)).into()) - ) - }; - - let watcher = match submit() { - Ok(watcher) => watcher, - Err(err) => { - // reject the subscriber (ignore errors - we don't care if subscriber is no longer there). - let _ = subscriber.reject(err.into()); - return; - }, - }; - - self.subscriptions.add(subscriber, move |sink| { - sink - .sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) - .send_all(watcher.into_stream().map(Ok)) - .map(|_| ()) - }) - } - - fn unwatch_extrinsic(&self, _metadata: Option, id: SubscriptionId) -> Result { - Ok(self.subscriptions.cancel(id)) - } + type Metadata = crate::metadata::Metadata; + + fn submit_extrinsic(&self, ext: Bytes) -> Result> { + let xt = + Decode::decode(&mut &ext[..]).ok_or(error::Error::from(error::ErrorKind::BadFormat))?; + let best_block_hash = self.client.info()?.chain.best_hash; + self.pool + .submit_one(&generic::BlockId::hash(best_block_hash), xt) + .map_err(|e| { + e.into_pool_error() + .map(Into::into) + .unwrap_or_else(|e| error::ErrorKind::Verification(Box::new(e)).into()) + }) + } + + fn pending_extrinsics(&self) -> Result> { + Ok(self + .pool + .ready() + .map(|tx| tx.data.encode().into()) + .collect()) + } + + fn watch_extrinsic( + &self, + _metadata: Self::Metadata, + subscriber: Subscriber, BlockHash

>>, + xt: Bytes, + ) { + let submit = || -> Result<_> { + let best_block_hash = self.client.info()?.chain.best_hash; + let dxt = + <

::Block as traits::Block>::Extrinsic::decode(&mut &xt[..]) + .ok_or(error::Error::from(error::ErrorKind::BadFormat))?; + self.pool + .submit_and_watch(&generic::BlockId::hash(best_block_hash), dxt) + .map_err(|e| { + e.into_pool_error() + .map(Into::into) + .unwrap_or_else(|e| error::ErrorKind::Verification(Box::new(e)).into()) + }) + }; + + let watcher = match submit() { + Ok(watcher) => watcher, + Err(err) => { + // reject the subscriber (ignore errors - we don't care if subscriber is no longer there). + let _ = subscriber.reject(err.into()); + return; + } + }; + + self.subscriptions.add(subscriber, move |sink| { + sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) + .send_all(watcher.into_stream().map(Ok)) + .map(|_| ()) + }) + } + + fn unwatch_extrinsic( + &self, + _metadata: Option, + id: SubscriptionId, + ) -> Result { + Ok(self.subscriptions.cancel(id)) + } } diff --git a/core/rpc/src/author/tests.rs b/core/rpc/src/author/tests.rs index 53166e76f8..5c893d3d5c 100644 --- a/core/rpc/src/author/tests.rs +++ b/core/rpc/src/author/tests.rs @@ -16,107 +16,117 @@ use super::*; -use std::sync::Arc; use assert_matches::assert_matches; use parity_codec::Encode; -use transaction_pool::{ - txpool::Pool, - ChainApi, +use primitives::{blake2_256, hexdisplay::HexDisplay, H256}; +use std::sync::Arc; +use test_client::{ + self, + runtime::{Extrinsic, Transfer}, + AccountKeyring, }; -use primitives::{H256, blake2_256, hexdisplay::HexDisplay}; -use test_client::{self, AccountKeyring, runtime::{Extrinsic, Transfer}}; use tokio::runtime; +use transaction_pool::{txpool::Pool, ChainApi}; fn uxt(sender: AccountKeyring, nonce: u64) -> Extrinsic { - let tx = Transfer { - amount: Default::default(), - nonce, - from: sender.into(), - to: Default::default(), - }; - let signature = AccountKeyring::from_public(&tx.from).unwrap().sign(&tx.encode()).into(); - Extrinsic::Transfer(tx, signature) + let tx = Transfer { + amount: Default::default(), + nonce, + from: sender.into(), + to: Default::default(), + }; + let signature = AccountKeyring::from_public(&tx.from) + .unwrap() + .sign(&tx.encode()) + .into(); + Extrinsic::Transfer(tx, signature) } #[test] fn submit_transaction_should_not_cause_error() { - let runtime = runtime::Runtime::new().unwrap(); - let client = Arc::new(test_client::new()); - let p = Author { - client: client.clone(), - pool: Arc::new(Pool::new(Default::default(), ChainApi::new(client))), - subscriptions: Subscriptions::new(runtime.executor()), - }; - let xt = uxt(AccountKeyring::Alice, 1).encode(); - let h: H256 = blake2_256(&xt).into(); + let runtime = runtime::Runtime::new().unwrap(); + let client = Arc::new(test_client::new()); + let p = Author { + client: client.clone(), + pool: Arc::new(Pool::new(Default::default(), ChainApi::new(client))), + subscriptions: Subscriptions::new(runtime.executor()), + }; + let xt = uxt(AccountKeyring::Alice, 1).encode(); + let h: H256 = blake2_256(&xt).into(); - assert_matches!( - AuthorApi::submit_extrinsic(&p, xt.clone().into()), - Ok(h2) if h == h2 - ); - assert!( - AuthorApi::submit_extrinsic(&p, xt.into()).is_err() - ); + assert_matches!( + AuthorApi::submit_extrinsic(&p, xt.clone().into()), + Ok(h2) if h == h2 + ); + assert!(AuthorApi::submit_extrinsic(&p, xt.into()).is_err()); } #[test] fn submit_rich_transaction_should_not_cause_error() { - let runtime = runtime::Runtime::new().unwrap(); - let client = Arc::new(test_client::new()); - let p = Author { - client: client.clone(), - pool: Arc::new(Pool::new(Default::default(), ChainApi::new(client.clone()))), - subscriptions: Subscriptions::new(runtime.executor()), - }; - let xt = uxt(AccountKeyring::Alice, 0).encode(); - let h: H256 = blake2_256(&xt).into(); + let runtime = runtime::Runtime::new().unwrap(); + let client = Arc::new(test_client::new()); + let p = Author { + client: client.clone(), + pool: Arc::new(Pool::new(Default::default(), ChainApi::new(client.clone()))), + subscriptions: Subscriptions::new(runtime.executor()), + }; + let xt = uxt(AccountKeyring::Alice, 0).encode(); + let h: H256 = blake2_256(&xt).into(); - assert_matches!( - AuthorApi::submit_extrinsic(&p, xt.clone().into()), - Ok(h2) if h == h2 - ); - assert!( - AuthorApi::submit_extrinsic(&p, xt.into()).is_err() - ); + assert_matches!( + AuthorApi::submit_extrinsic(&p, xt.clone().into()), + Ok(h2) if h == h2 + ); + assert!(AuthorApi::submit_extrinsic(&p, xt.into()).is_err()); } #[test] fn should_watch_extrinsic() { - //given - let mut runtime = runtime::Runtime::new().unwrap(); - let client = Arc::new(test_client::new()); - let pool = Arc::new(Pool::new(Default::default(), ChainApi::new(client.clone()))); - let p = Author { - client, - pool: pool.clone(), - subscriptions: Subscriptions::new(runtime.executor()), - }; - let (subscriber, id_rx, data) = ::jsonrpc_pubsub::typed::Subscriber::new_test("test"); + //given + let mut runtime = runtime::Runtime::new().unwrap(); + let client = Arc::new(test_client::new()); + let pool = Arc::new(Pool::new(Default::default(), ChainApi::new(client.clone()))); + let p = Author { + client, + pool: pool.clone(), + subscriptions: Subscriptions::new(runtime.executor()), + }; + let (subscriber, id_rx, data) = ::jsonrpc_pubsub::typed::Subscriber::new_test("test"); - // when - p.watch_extrinsic(Default::default(), subscriber, uxt(AccountKeyring::Alice, 0).encode().into()); + // when + p.watch_extrinsic( + Default::default(), + subscriber, + uxt(AccountKeyring::Alice, 0).encode().into(), + ); - // then - assert_eq!(runtime.block_on(id_rx), Ok(Ok(1.into()))); - // check notifications - let replacement = { - let tx = Transfer { - amount: 5, - nonce: 0, - from: AccountKeyring::Alice.into(), - to: Default::default(), - }; - let signature = AccountKeyring::from_public(&tx.from).unwrap().sign(&tx.encode()).into(); - Extrinsic::Transfer(tx, signature) - }; - AuthorApi::submit_extrinsic(&p, replacement.encode().into()).unwrap(); - let (res, data) = runtime.block_on(data.into_future()).unwrap(); - assert_eq!( - res, - Some(r#"{"jsonrpc":"2.0","method":"test","params":{"result":"ready","subscription":1}}"#.into()) - ); - let h = blake2_256(&replacement.encode()); - assert_eq!( + // then + assert_eq!(runtime.block_on(id_rx), Ok(Ok(1.into()))); + // check notifications + let replacement = { + let tx = Transfer { + amount: 5, + nonce: 0, + from: AccountKeyring::Alice.into(), + to: Default::default(), + }; + let signature = AccountKeyring::from_public(&tx.from) + .unwrap() + .sign(&tx.encode()) + .into(); + Extrinsic::Transfer(tx, signature) + }; + AuthorApi::submit_extrinsic(&p, replacement.encode().into()).unwrap(); + let (res, data) = runtime.block_on(data.into_future()).unwrap(); + assert_eq!( + res, + Some( + r#"{"jsonrpc":"2.0","method":"test","params":{"result":"ready","subscription":1}}"# + .into() + ) + ); + let h = blake2_256(&replacement.encode()); + assert_eq!( runtime.block_on(data.into_future()).unwrap().0, Some(format!(r#"{{"jsonrpc":"2.0","method":"test","params":{{"result":{{"usurped":"0x{}"}},"subscription":1}}}}"#, HexDisplay::from(&h))) ); @@ -124,18 +134,18 @@ fn should_watch_extrinsic() { #[test] fn should_return_pending_extrinsics() { - let runtime = runtime::Runtime::new().unwrap(); - let client = Arc::new(test_client::new()); - let pool = Arc::new(Pool::new(Default::default(), ChainApi::new(client.clone()))); - let p = Author { - client, - pool: pool.clone(), - subscriptions: Subscriptions::new(runtime.executor()), - }; - let ex = uxt(AccountKeyring::Alice, 0); - AuthorApi::submit_extrinsic(&p, ex.encode().into()).unwrap(); - assert_matches!( - p.pending_extrinsics(), - Ok(ref expected) if *expected == vec![Bytes(ex.encode())] - ); + let runtime = runtime::Runtime::new().unwrap(); + let client = Arc::new(test_client::new()); + let pool = Arc::new(Pool::new(Default::default(), ChainApi::new(client.clone()))); + let p = Author { + client, + pool: pool.clone(), + subscriptions: Subscriptions::new(runtime.executor()), + }; + let ex = uxt(AccountKeyring::Alice, 0); + AuthorApi::submit_extrinsic(&p, ex.encode().into()).unwrap(); + assert_matches!( + p.pending_extrinsics(), + Ok(ref expected) if *expected == vec![Bytes(ex.encode())] + ); } diff --git a/core/rpc/src/chain/error.rs b/core/rpc/src/chain/error.rs index c52d44eddc..4893e187c9 100644 --- a/core/rpc/src/chain/error.rs +++ b/core/rpc/src/chain/error.rs @@ -14,29 +14,29 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use error_chain::*; -use client; -use crate::rpc; use crate::errors; +use crate::rpc; +use client; +use error_chain::*; error_chain! { - links { - Client(client::error::Error, client::error::ErrorKind) #[doc = "Client error"]; - } - errors { - /// Not implemented yet - Unimplemented { - description("not yet implemented"), - display("Method Not Implemented"), - } - } + links { + Client(client::error::Error, client::error::ErrorKind) #[doc = "Client error"]; + } + errors { + /// Not implemented yet + Unimplemented { + description("not yet implemented"), + display("Method Not Implemented"), + } + } } impl From for rpc::Error { - fn from(e: Error) -> Self { - match e { - Error(ErrorKind::Unimplemented, _) => errors::unimplemented(), - e => errors::internal(e), - } - } + fn from(e: Error) -> Self { + match e { + Error(ErrorKind::Unimplemented, _) => errors::unimplemented(), + e => errors::internal(e), + } + } } diff --git a/core/rpc/src/chain/mod.rs b/core/rpc/src/chain/mod.rs index de7ad3faad..5391df283f 100644 --- a/core/rpc/src/chain/mod.rs +++ b/core/rpc/src/chain/mod.rs @@ -18,210 +18,239 @@ use std::sync::Arc; -use log::warn; -use client::{self, Client, BlockchainEvents}; +use crate::rpc::futures::{stream, Future, Sink, Stream}; +use crate::rpc::Result as RpcResult; +use client::{self, BlockchainEvents, Client}; use jsonrpc_derive::rpc; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; -use primitives::{H256, Blake2Hasher}; -use crate::rpc::Result as RpcResult; -use crate::rpc::futures::{stream, Future, Sink, Stream}; +use log::warn; +use primitives::{Blake2Hasher, H256}; use runtime_primitives::generic::{BlockId, SignedBlock}; use runtime_primitives::traits::{Block as BlockT, Header, NumberFor}; use crate::subscriptions::Subscriptions; mod error; +mod number; #[cfg(test)] mod tests; -mod number; use self::error::Result; /// Substrate blockchain API #[rpc] pub trait ChainApi { - /// RPC metadata - type Metadata; - - /// Get header of a relay chain block. - #[rpc(name = "chain_getHeader")] - fn header(&self, hash: Option) -> Result>; - - /// Get header and body of a relay chain block. - #[rpc(name = "chain_getBlock")] - fn block(&self, hash: Option) -> Result>; - - /// Get hash of the n-th block in the canon chain. - /// - /// By default returns latest block hash. - #[rpc(name = "chain_getBlockHash", alias("chain_getHead"))] - fn block_hash(&self, hash: Option>) -> Result>; - - /// Get hash of the last finalized block in the canon chain. - #[rpc(name = "chain_getFinalizedHead", alias("chain_getFinalisedHead"))] - fn finalized_head(&self) -> Result; - - /// New head subscription - #[pubsub( - subscription = "chain_newHead", - subscribe, - name = "chain_subscribeNewHead", - alias("subscribe_newHead") - )] - fn subscribe_new_head(&self, metadata: Self::Metadata, subscriber: Subscriber

); - - /// Unsubscribe from new head subscription. - #[pubsub( - subscription = "chain_newHead", - unsubscribe, - name = "chain_unsubscribeNewHead", - alias("unsubscribe_newHead") - )] - fn unsubscribe_new_head(&self, metadata: Option, id: SubscriptionId) -> RpcResult; - - /// New head subscription - #[pubsub( - subscription = "chain_finalizedHead", - subscribe, - name = "chain_subscribeFinalizedHeads", - alias("chain_subscribeFinalisedHeads") - )] - fn subscribe_finalized_heads(&self, metadata: Self::Metadata, subscriber: Subscriber
); - - /// Unsubscribe from new head subscription. - #[pubsub( - subscription = "chain_finalizedHead", - unsubscribe, - name = "chain_unsubscribeFinalizedHeads", - alias("chain_unsubscribeFinalisedHeads") - )] - fn unsubscribe_finalized_heads(&self, metadata: Option, id: SubscriptionId) -> RpcResult; + /// RPC metadata + type Metadata; + + /// Get header of a relay chain block. + #[rpc(name = "chain_getHeader")] + fn header(&self, hash: Option) -> Result>; + + /// Get header and body of a relay chain block. + #[rpc(name = "chain_getBlock")] + fn block(&self, hash: Option) -> Result>; + + /// Get hash of the n-th block in the canon chain. + /// + /// By default returns latest block hash. + #[rpc(name = "chain_getBlockHash", alias("chain_getHead"))] + fn block_hash(&self, hash: Option>) -> Result>; + + /// Get hash of the last finalized block in the canon chain. + #[rpc(name = "chain_getFinalizedHead", alias("chain_getFinalisedHead"))] + fn finalized_head(&self) -> Result; + + /// New head subscription + #[pubsub( + subscription = "chain_newHead", + subscribe, + name = "chain_subscribeNewHead", + alias("subscribe_newHead") + )] + fn subscribe_new_head(&self, metadata: Self::Metadata, subscriber: Subscriber
); + + /// Unsubscribe from new head subscription. + #[pubsub( + subscription = "chain_newHead", + unsubscribe, + name = "chain_unsubscribeNewHead", + alias("unsubscribe_newHead") + )] + fn unsubscribe_new_head( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult; + + /// New head subscription + #[pubsub( + subscription = "chain_finalizedHead", + subscribe, + name = "chain_subscribeFinalizedHeads", + alias("chain_subscribeFinalisedHeads") + )] + fn subscribe_finalized_heads(&self, metadata: Self::Metadata, subscriber: Subscriber
); + + /// Unsubscribe from new head subscription. + #[pubsub( + subscription = "chain_finalizedHead", + unsubscribe, + name = "chain_unsubscribeFinalizedHeads", + alias("chain_unsubscribeFinalisedHeads") + )] + fn unsubscribe_finalized_heads( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult; } /// Chain API with subscriptions support. pub struct Chain { - /// Substrate client. - client: Arc>, - /// Current subscriptions. - subscriptions: Subscriptions, + /// Substrate client. + client: Arc>, + /// Current subscriptions. + subscriptions: Subscriptions, } impl Chain { - /// Create new Chain API RPC handler. - pub fn new(client: Arc>, subscriptions: Subscriptions) -> Self { - Self { - client, - subscriptions, - } - } + /// Create new Chain API RPC handler. + pub fn new(client: Arc>, subscriptions: Subscriptions) -> Self { + Self { + client, + subscriptions, + } + } } -impl Chain where - Block: BlockT + 'static, - B: client::backend::Backend + Send + Sync + 'static, - E: client::CallExecutor + Send + Sync + 'static, - RA: Send + Sync + 'static +impl Chain +where + Block: BlockT + 'static, + B: client::backend::Backend + Send + Sync + 'static, + E: client::CallExecutor + Send + Sync + 'static, + RA: Send + Sync + 'static, { - fn unwrap_or_best(&self, hash: Option) -> Result { - Ok(match hash.into() { - None => self.client.info()?.chain.best_hash, - Some(hash) => hash, - }) - } - - fn subscribe_headers( - &self, - subscriber: Subscriber, - best_block_hash: G, - stream: F, - ) where - F: FnOnce() -> S, - G: FnOnce() -> Result>, - ERR: ::std::fmt::Debug, - S: Stream + Send + 'static, - { - self.subscriptions.add(subscriber, |sink| { - // send current head right at the start. - let header = best_block_hash() - .and_then(|hash| self.header(hash.into())) - .and_then(|header| { - header.ok_or_else(|| self::error::ErrorKind::Unimplemented.into()) - }) - .map_err(Into::into); - - // send further subscriptions - let stream = stream() - .map(|res| Ok(res)) - .map_err(|e| warn!("Block notification stream error: {:?}", e)); - - sink - .sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) - .send_all( - stream::iter_result(vec![Ok(header)]) - .chain(stream) - ) - // we ignore the resulting Stream (if the first stream is over we are unsubscribed) - .map(|_| ()) - }); - } + fn unwrap_or_best(&self, hash: Option) -> Result { + Ok(match hash.into() { + None => self.client.info()?.chain.best_hash, + Some(hash) => hash, + }) + } + + fn subscribe_headers( + &self, + subscriber: Subscriber, + best_block_hash: G, + stream: F, + ) where + F: FnOnce() -> S, + G: FnOnce() -> Result>, + ERR: ::std::fmt::Debug, + S: Stream + Send + 'static, + { + self.subscriptions.add(subscriber, |sink| { + // send current head right at the start. + let header = best_block_hash() + .and_then(|hash| self.header(hash.into())) + .and_then(|header| { + header.ok_or_else(|| self::error::ErrorKind::Unimplemented.into()) + }) + .map_err(Into::into); + + // send further subscriptions + let stream = stream() + .map(|res| Ok(res)) + .map_err(|e| warn!("Block notification stream error: {:?}", e)); + + sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) + .send_all(stream::iter_result(vec![Ok(header)]).chain(stream)) + // we ignore the resulting Stream (if the first stream is over we are unsubscribed) + .map(|_| ()) + }); + } } -impl ChainApi, Block::Hash, Block::Header, SignedBlock> for Chain where - Block: BlockT + 'static, - B: client::backend::Backend + Send + Sync + 'static, - E: client::CallExecutor + Send + Sync + 'static, - RA: Send + Sync + 'static +impl ChainApi, Block::Hash, Block::Header, SignedBlock> + for Chain +where + Block: BlockT + 'static, + B: client::backend::Backend + Send + Sync + 'static, + E: client::CallExecutor + Send + Sync + 'static, + RA: Send + Sync + 'static, { - type Metadata = crate::metadata::Metadata; - - fn header(&self, hash: Option) -> Result> { - let hash = self.unwrap_or_best(hash)?; - Ok(self.client.header(&BlockId::Hash(hash))?) - } - - fn block(&self, hash: Option) - -> Result>> - { - let hash = self.unwrap_or_best(hash)?; - Ok(self.client.block(&BlockId::Hash(hash))?) - } - - fn block_hash(&self, number: Option>>) -> Result> { - let number: Option>> = number.into(); - Ok(match number { - None => Some(self.client.info()?.chain.best_hash), - Some(num_or_hex) => self.client.header(&BlockId::number(num_or_hex.to_number()?))?.map(|h| h.hash()), - }) - } - - fn finalized_head(&self) -> Result { - Ok(self.client.info()?.chain.finalized_hash) - } - - fn subscribe_new_head(&self, _metadata: Self::Metadata, subscriber: Subscriber) { - self.subscribe_headers( - subscriber, - || self.block_hash(None.into()), - || self.client.import_notification_stream() - .filter(|notification| notification.is_new_best) - .map(|notification| notification.header), - ) - } - - fn unsubscribe_new_head(&self, _metadata: Option, id: SubscriptionId) -> RpcResult { - Ok(self.subscriptions.cancel(id)) - } - - fn subscribe_finalized_heads(&self, _meta: Self::Metadata, subscriber: Subscriber) { - self.subscribe_headers( - subscriber, - || Ok(Some(self.client.info()?.chain.finalized_hash)), - || self.client.finality_notification_stream() - .map(|notification| notification.header), - ) - } - - fn unsubscribe_finalized_heads(&self, _metadata: Option, id: SubscriptionId) -> RpcResult { - Ok(self.subscriptions.cancel(id)) - } + type Metadata = crate::metadata::Metadata; + + fn header(&self, hash: Option) -> Result> { + let hash = self.unwrap_or_best(hash)?; + Ok(self.client.header(&BlockId::Hash(hash))?) + } + + fn block(&self, hash: Option) -> Result>> { + let hash = self.unwrap_or_best(hash)?; + Ok(self.client.block(&BlockId::Hash(hash))?) + } + + fn block_hash( + &self, + number: Option>>, + ) -> Result> { + let number: Option>> = number.into(); + Ok(match number { + None => Some(self.client.info()?.chain.best_hash), + Some(num_or_hex) => self + .client + .header(&BlockId::number(num_or_hex.to_number()?))? + .map(|h| h.hash()), + }) + } + + fn finalized_head(&self) -> Result { + Ok(self.client.info()?.chain.finalized_hash) + } + + fn subscribe_new_head(&self, _metadata: Self::Metadata, subscriber: Subscriber) { + self.subscribe_headers( + subscriber, + || self.block_hash(None.into()), + || { + self.client + .import_notification_stream() + .filter(|notification| notification.is_new_best) + .map(|notification| notification.header) + }, + ) + } + + fn unsubscribe_new_head( + &self, + _metadata: Option, + id: SubscriptionId, + ) -> RpcResult { + Ok(self.subscriptions.cancel(id)) + } + + fn subscribe_finalized_heads( + &self, + _meta: Self::Metadata, + subscriber: Subscriber, + ) { + self.subscribe_headers( + subscriber, + || Ok(Some(self.client.info()?.chain.finalized_hash)), + || { + self.client + .finality_notification_stream() + .map(|notification| notification.header) + }, + ) + } + + fn unsubscribe_finalized_heads( + &self, + _metadata: Option, + id: SubscriptionId, + ) -> RpcResult { + Ok(self.subscriptions.cancel(id)) + } } diff --git a/core/rpc/src/chain/number.rs b/core/rpc/src/chain/number.rs index bdf4b4df03..c759a03b09 100644 --- a/core/rpc/src/chain/number.rs +++ b/core/rpc/src/chain/number.rs @@ -14,9 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use serde_derive::Deserialize; use primitives::U256; use runtime_primitives::traits; +use serde_derive::Deserialize; /// RPC Block number type /// @@ -28,43 +28,43 @@ use runtime_primitives::traits; #[derive(Deserialize)] #[serde(untagged)] pub enum NumberOrHex { - /// The original header number type of block. - Number(Number), - /// Hex representation of the block number. - Hex(U256), + /// The original header number type of block. + Number(Number), + /// Hex representation of the block number. + Hex(U256), } impl> NumberOrHex { - /// Attempts to convert into concrete block number. - /// - /// Fails in case hex number is too big. - pub fn to_number(self) -> Result { - match self { - NumberOrHex::Number(n) => Ok(n), - NumberOrHex::Hex(h) => { - // FIXME #1377 this only supports `u64` since `BlockNumber` - // is `As` we could possibly go with `u128`. - let l = h.low_u64(); - if U256::from(l) != h { - Err(format!("`{}` does not fit into the block number type.", h)) - } else { - Ok(traits::As::sa(l)) - } - }, - } - } + /// Attempts to convert into concrete block number. + /// + /// Fails in case hex number is too big. + pub fn to_number(self) -> Result { + match self { + NumberOrHex::Number(n) => Ok(n), + NumberOrHex::Hex(h) => { + // FIXME #1377 this only supports `u64` since `BlockNumber` + // is `As` we could possibly go with `u128`. + let l = h.low_u64(); + if U256::from(l) != h { + Err(format!("`{}` does not fit into the block number type.", h)) + } else { + Ok(traits::As::sa(l)) + } + } + } + } } #[cfg(test)] impl From for NumberOrHex { - fn from(n: u64) -> Self { - NumberOrHex::Number(n) - } + fn from(n: u64) -> Self { + NumberOrHex::Number(n) + } } #[cfg(test)] impl From for NumberOrHex { - fn from(n: U256) -> Self { - NumberOrHex::Hex(n) - } + fn from(n: U256) -> Self { + NumberOrHex::Hex(n) + } } diff --git a/core/rpc/src/chain/tests.rs b/core/rpc/src/chain/tests.rs index 26b7202305..84fe6a814b 100644 --- a/core/rpc/src/chain/tests.rs +++ b/core/rpc/src/chain/tests.rs @@ -16,237 +16,247 @@ use super::*; use assert_matches::assert_matches; -use test_client::{self, TestClient}; -use test_client::runtime::{H256, Block, Header}; use consensus::BlockOrigin; +use test_client::runtime::{Block, Header, H256}; +use test_client::{self, TestClient}; #[test] fn should_return_header() { - let core = ::tokio::runtime::Runtime::new().unwrap(); - let remote = core.executor(); - - let client = Chain { - client: Arc::new(test_client::new()), - subscriptions: Subscriptions::new(remote), - }; - - assert_matches!( - client.header(Some(client.client.genesis_hash()).into()), - Ok(Some(ref x)) if x == &Header { - parent_hash: H256::from_low_u64_be(0), - number: 0, - state_root: x.state_root.clone(), - extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(), - digest: Default::default(), - } - ); - - assert_matches!( - client.header(None.into()), - Ok(Some(ref x)) if x == &Header { - parent_hash: H256::from_low_u64_be(0), - number: 0, - state_root: x.state_root.clone(), - extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(), - digest: Default::default(), - } - ); - - assert_matches!( - client.header(Some(H256::from_low_u64_be(5)).into()), - Ok(None) - ); + let core = ::tokio::runtime::Runtime::new().unwrap(); + let remote = core.executor(); + + let client = Chain { + client: Arc::new(test_client::new()), + subscriptions: Subscriptions::new(remote), + }; + + assert_matches!( + client.header(Some(client.client.genesis_hash()).into()), + Ok(Some(ref x)) if x == &Header { + parent_hash: H256::from_low_u64_be(0), + number: 0, + state_root: x.state_root.clone(), + extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(), + digest: Default::default(), + } + ); + + assert_matches!( + client.header(None.into()), + Ok(Some(ref x)) if x == &Header { + parent_hash: H256::from_low_u64_be(0), + number: 0, + state_root: x.state_root.clone(), + extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(), + digest: Default::default(), + } + ); + + assert_matches!( + client.header(Some(H256::from_low_u64_be(5)).into()), + Ok(None) + ); } #[test] fn should_return_a_block() { - let core = ::tokio::runtime::Runtime::new().unwrap(); - let remote = core.executor(); - - let api = Chain { - client: Arc::new(test_client::new()), - subscriptions: Subscriptions::new(remote), - }; - - let block = api.client.new_block().unwrap().bake().unwrap(); - let block_hash = block.hash(); - api.client.import(BlockOrigin::Own, block).unwrap(); - - // Genesis block is not justified - assert_matches!( - api.block(Some(api.client.genesis_hash()).into()), - Ok(Some(SignedBlock { justification: None, .. })) - ); - - assert_matches!( - api.block(Some(block_hash).into()), - Ok(Some(ref x)) if x.block == Block { - header: Header { - parent_hash: api.client.genesis_hash(), - number: 1, - state_root: x.block.header.state_root.clone(), - extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(), - digest: Default::default(), - }, - extrinsics: vec![], - } - ); - - assert_matches!( - api.block(None.into()), - Ok(Some(ref x)) if x.block == Block { - header: Header { - parent_hash: api.client.genesis_hash(), - number: 1, - state_root: x.block.header.state_root.clone(), - extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(), - digest: Default::default(), - }, - extrinsics: vec![], - } - ); - - assert_matches!( - api.block(Some(H256::from_low_u64_be(5)).into()), - Ok(None) - ); + let core = ::tokio::runtime::Runtime::new().unwrap(); + let remote = core.executor(); + + let api = Chain { + client: Arc::new(test_client::new()), + subscriptions: Subscriptions::new(remote), + }; + + let block = api.client.new_block().unwrap().bake().unwrap(); + let block_hash = block.hash(); + api.client.import(BlockOrigin::Own, block).unwrap(); + + // Genesis block is not justified + assert_matches!( + api.block(Some(api.client.genesis_hash()).into()), + Ok(Some(SignedBlock { + justification: None, + .. + })) + ); + + assert_matches!( + api.block(Some(block_hash).into()), + Ok(Some(ref x)) if x.block == Block { + header: Header { + parent_hash: api.client.genesis_hash(), + number: 1, + state_root: x.block.header.state_root.clone(), + extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(), + digest: Default::default(), + }, + extrinsics: vec![], + } + ); + + assert_matches!( + api.block(None.into()), + Ok(Some(ref x)) if x.block == Block { + header: Header { + parent_hash: api.client.genesis_hash(), + number: 1, + state_root: x.block.header.state_root.clone(), + extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(), + digest: Default::default(), + }, + extrinsics: vec![], + } + ); + + assert_matches!(api.block(Some(H256::from_low_u64_be(5)).into()), Ok(None)); } #[test] fn should_return_block_hash() { - let core = ::tokio::runtime::Runtime::new().unwrap(); - let remote = core.executor(); - - let client = Chain { - client: Arc::new(test_client::new()), - subscriptions: Subscriptions::new(remote), - }; - - assert_matches!( - client.block_hash(None.into()), - Ok(Some(ref x)) if x == &client.client.genesis_hash() - ); - - - assert_matches!( - client.block_hash(Some(0u64.into()).into()), - Ok(Some(ref x)) if x == &client.client.genesis_hash() - ); - - assert_matches!( - client.block_hash(Some(1u64.into()).into()), - Ok(None) - ); - - let block = client.client.new_block().unwrap().bake().unwrap(); - client.client.import(BlockOrigin::Own, block.clone()).unwrap(); - - assert_matches!( - client.block_hash(Some(0u64.into()).into()), - Ok(Some(ref x)) if x == &client.client.genesis_hash() - ); - assert_matches!( - client.block_hash(Some(1u64.into()).into()), - Ok(Some(ref x)) if x == &block.hash() - ); - assert_matches!( - client.block_hash(Some(::primitives::U256::from(1u64).into()).into()), - Ok(Some(ref x)) if x == &block.hash() - ); + let core = ::tokio::runtime::Runtime::new().unwrap(); + let remote = core.executor(); + + let client = Chain { + client: Arc::new(test_client::new()), + subscriptions: Subscriptions::new(remote), + }; + + assert_matches!( + client.block_hash(None.into()), + Ok(Some(ref x)) if x == &client.client.genesis_hash() + ); + + assert_matches!( + client.block_hash(Some(0u64.into()).into()), + Ok(Some(ref x)) if x == &client.client.genesis_hash() + ); + + assert_matches!(client.block_hash(Some(1u64.into()).into()), Ok(None)); + + let block = client.client.new_block().unwrap().bake().unwrap(); + client + .client + .import(BlockOrigin::Own, block.clone()) + .unwrap(); + + assert_matches!( + client.block_hash(Some(0u64.into()).into()), + Ok(Some(ref x)) if x == &client.client.genesis_hash() + ); + assert_matches!( + client.block_hash(Some(1u64.into()).into()), + Ok(Some(ref x)) if x == &block.hash() + ); + assert_matches!( + client.block_hash(Some(::primitives::U256::from(1u64).into()).into()), + Ok(Some(ref x)) if x == &block.hash() + ); } - #[test] fn should_return_finalized_hash() { - let core = ::tokio::runtime::Runtime::new().unwrap(); - let remote = core.executor(); - - let client = Chain { - client: Arc::new(test_client::new()), - subscriptions: Subscriptions::new(remote), - }; - - assert_matches!( - client.finalized_head(), - Ok(ref x) if x == &client.client.genesis_hash() - ); - - // import new block - let builder = client.client.new_block().unwrap(); - client.client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap(); - // no finalization yet - assert_matches!( - client.finalized_head(), - Ok(ref x) if x == &client.client.genesis_hash() - ); - - // finalize - client.client.finalize_block(BlockId::number(1), None, true).unwrap(); - assert_matches!( - client.finalized_head(), - Ok(ref x) if x == &client.client.block_hash(1).unwrap().unwrap() - ); + let core = ::tokio::runtime::Runtime::new().unwrap(); + let remote = core.executor(); + + let client = Chain { + client: Arc::new(test_client::new()), + subscriptions: Subscriptions::new(remote), + }; + + assert_matches!( + client.finalized_head(), + Ok(ref x) if x == &client.client.genesis_hash() + ); + + // import new block + let builder = client.client.new_block().unwrap(); + client + .client + .import(BlockOrigin::Own, builder.bake().unwrap()) + .unwrap(); + // no finalization yet + assert_matches!( + client.finalized_head(), + Ok(ref x) if x == &client.client.genesis_hash() + ); + + // finalize + client + .client + .finalize_block(BlockId::number(1), None, true) + .unwrap(); + assert_matches!( + client.finalized_head(), + Ok(ref x) if x == &client.client.block_hash(1).unwrap().unwrap() + ); } #[test] fn should_notify_about_latest_block() { - let mut core = ::tokio::runtime::Runtime::new().unwrap(); - let remote = core.executor(); - let (subscriber, id, transport) = Subscriber::new_test("test"); - - { - let api = Chain { - client: Arc::new(test_client::new()), - subscriptions: Subscriptions::new(remote), - }; - - api.subscribe_new_head(Default::default(), subscriber); - - // assert id assigned - assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); - - let builder = api.client.new_block().unwrap(); - api.client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap(); - } - - // assert initial head sent. - let (notification, next) = core.block_on(transport.into_future()).unwrap(); - assert!(notification.is_some()); - // assert notification sent to transport - let (notification, next) = core.block_on(next.into_future()).unwrap(); - assert!(notification.is_some()); - // no more notifications on this channel - assert_eq!(core.block_on(next.into_future()).unwrap().0, None); + let mut core = ::tokio::runtime::Runtime::new().unwrap(); + let remote = core.executor(); + let (subscriber, id, transport) = Subscriber::new_test("test"); + + { + let api = Chain { + client: Arc::new(test_client::new()), + subscriptions: Subscriptions::new(remote), + }; + + api.subscribe_new_head(Default::default(), subscriber); + + // assert id assigned + assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); + + let builder = api.client.new_block().unwrap(); + api.client + .import(BlockOrigin::Own, builder.bake().unwrap()) + .unwrap(); + } + + // assert initial head sent. + let (notification, next) = core.block_on(transport.into_future()).unwrap(); + assert!(notification.is_some()); + // assert notification sent to transport + let (notification, next) = core.block_on(next.into_future()).unwrap(); + assert!(notification.is_some()); + // no more notifications on this channel + assert_eq!(core.block_on(next.into_future()).unwrap().0, None); } #[test] fn should_notify_about_finalized_block() { - let mut core = ::tokio::runtime::Runtime::new().unwrap(); - let remote = core.executor(); - let (subscriber, id, transport) = Subscriber::new_test("test"); - - { - let api = Chain { - client: Arc::new(test_client::new()), - subscriptions: Subscriptions::new(remote), - }; - - api.subscribe_finalized_heads(Default::default(), subscriber); - - // assert id assigned - assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); - - let builder = api.client.new_block().unwrap(); - api.client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap(); - api.client.finalize_block(BlockId::number(1), None, true).unwrap(); - } - - // assert initial head sent. - let (notification, next) = core.block_on(transport.into_future()).unwrap(); - assert!(notification.is_some()); - // assert notification sent to transport - let (notification, next) = core.block_on(next.into_future()).unwrap(); - assert!(notification.is_some()); - // no more notifications on this channel - assert_eq!(core.block_on(next.into_future()).unwrap().0, None); + let mut core = ::tokio::runtime::Runtime::new().unwrap(); + let remote = core.executor(); + let (subscriber, id, transport) = Subscriber::new_test("test"); + + { + let api = Chain { + client: Arc::new(test_client::new()), + subscriptions: Subscriptions::new(remote), + }; + + api.subscribe_finalized_heads(Default::default(), subscriber); + + // assert id assigned + assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); + + let builder = api.client.new_block().unwrap(); + api.client + .import(BlockOrigin::Own, builder.bake().unwrap()) + .unwrap(); + api.client + .finalize_block(BlockId::number(1), None, true) + .unwrap(); + } + + // assert initial head sent. + let (notification, next) = core.block_on(transport.into_future()).unwrap(); + assert!(notification.is_some()); + // assert notification sent to transport + let (notification, next) = core.block_on(next.into_future()).unwrap(); + assert!(notification.is_some()); + // no more notifications on this channel + assert_eq!(core.block_on(next.into_future()).unwrap().0, None); } diff --git a/core/rpc/src/errors.rs b/core/rpc/src/errors.rs index a709013ad2..5f31bac580 100644 --- a/core/rpc/src/errors.rs +++ b/core/rpc/src/errors.rs @@ -18,18 +18,18 @@ use crate::rpc; use log::warn; pub fn unimplemented() -> rpc::Error { - rpc::Error { - code: rpc::ErrorCode::ServerError(1), - message: "Not implemented yet".into(), - data: None, - } + rpc::Error { + code: rpc::ErrorCode::ServerError(1), + message: "Not implemented yet".into(), + data: None, + } } pub fn internal(e: E) -> rpc::Error { - warn!("Unknown error: {:?}", e); - rpc::Error { - code: rpc::ErrorCode::InternalError, - message: "Unknown error occured".into(), - data: Some(format!("{:?}", e).into()), - } + warn!("Unknown error: {:?}", e); + rpc::Error { + code: rpc::ErrorCode::InternalError, + message: "Unknown error occured".into(), + data: Some(format!("{:?}", e).into()), + } } diff --git a/core/rpc/src/helpers.rs b/core/rpc/src/helpers.rs index e579c743ac..7690d84bb2 100644 --- a/core/rpc/src/helpers.rs +++ b/core/rpc/src/helpers.rs @@ -15,11 +15,12 @@ // along with Substrate. If not, see . /// Unwraps the trailing parameter or falls back with the closure result. -pub fn unwrap_or_else(or_else: F, optional: Option) -> Result where - F: FnOnce() -> Result, +pub fn unwrap_or_else(or_else: F, optional: Option) -> Result +where + F: FnOnce() -> Result, { - match optional.into() { - None => or_else(), - Some(x) => Ok(x), - } + match optional.into() { + None => or_else(), + Some(x) => Ok(x), + } } diff --git a/core/rpc/src/metadata.rs b/core/rpc/src/metadata.rs index e6af4ef94b..54af8c73ae 100644 --- a/core/rpc/src/metadata.rs +++ b/core/rpc/src/metadata.rs @@ -17,8 +17,8 @@ //! RPC Metadata use std::sync::Arc; -use jsonrpc_pubsub::{Session, PubSubMetadata}; use crate::rpc::futures::sync::mpsc; +use jsonrpc_pubsub::{PubSubMetadata, Session}; /// RPC Metadata. /// @@ -27,28 +27,28 @@ use crate::rpc::futures::sync::mpsc; /// (like remote client IP address, request headers, etc) #[derive(Default, Clone)] pub struct Metadata { - session: Option>, + session: Option>, } impl crate::rpc::Metadata for Metadata {} impl PubSubMetadata for Metadata { - fn session(&self) -> Option> { - self.session.clone() - } + fn session(&self) -> Option> { + self.session.clone() + } } impl Metadata { - /// Create new `Metadata` with session (Pub/Sub) support. - pub fn new(transport: mpsc::Sender) -> Self { - Metadata { - session: Some(Arc::new(Session::new(transport))), - } - } - - /// Create new `Metadata` for tests. - #[cfg(test)] - pub fn new_test() -> (mpsc::Receiver, Self) { - let (tx, rx) = mpsc::channel(1); - (rx, Self::new(tx)) - } + /// Create new `Metadata` with session (Pub/Sub) support. + pub fn new(transport: mpsc::Sender) -> Self { + Metadata { + session: Some(Arc::new(Session::new(transport))), + } + } + + /// Create new `Metadata` for tests. + #[cfg(test)] + pub fn new_test() -> (mpsc::Receiver, Self) { + let (tx, rx) = mpsc::channel(1); + (rx, Self::new(tx)) + } } diff --git a/core/rpc/src/state/error.rs b/core/rpc/src/state/error.rs index bd85664099..439915569a 100644 --- a/core/rpc/src/state/error.rs +++ b/core/rpc/src/state/error.rs @@ -14,35 +14,35 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use error_chain::*; -use client; -use crate::rpc; use crate::errors; +use crate::rpc; +use client; +use error_chain::*; error_chain! { - links { - Client(client::error::Error, client::error::ErrorKind) #[doc = "Client error"]; - } + links { + Client(client::error::Error, client::error::ErrorKind) #[doc = "Client error"]; + } - errors { - /// Provided block range couldn't be resolved to a list of blocks. - InvalidBlockRange(from: String, to: String, details: String) { - description("Invalid block range"), - display("Cannot resolve a block range ['{:?}' ... '{:?}]. {}", from, to, details), - } - /// Not implemented yet - Unimplemented { - description("not implemented yet"), - display("Method Not Implemented"), - } - } + errors { + /// Provided block range couldn't be resolved to a list of blocks. + InvalidBlockRange(from: String, to: String, details: String) { + description("Invalid block range"), + display("Cannot resolve a block range ['{:?}' ... '{:?}]. {}", from, to, details), + } + /// Not implemented yet + Unimplemented { + description("not implemented yet"), + display("Method Not Implemented"), + } + } } impl From for rpc::Error { - fn from(e: Error) -> Self { - match e { - Error(ErrorKind::Unimplemented, _) => errors::unimplemented(), - e => errors::internal(e), - } - } + fn from(e: Error) -> Self { + match e { + Error(ErrorKind::Unimplemented, _) => errors::unimplemented(), + e => errors::internal(e), + } + } } diff --git a/core/rpc/src/state/mod.rs b/core/rpc/src/state/mod.rs index 168c0bd692..5654bb5514 100644 --- a/core/rpc/src/state/mod.rs +++ b/core/rpc/src/state/mod.rs @@ -17,23 +17,23 @@ //! Substrate state API. use std::{ - collections::{BTreeMap, HashMap}, - ops::Range, - sync::Arc, + collections::{BTreeMap, HashMap}, + ops::Range, + sync::Arc, }; +use crate::rpc::futures::{stream, Future, Sink, Stream}; +use crate::rpc::Result as RpcResult; +use client::{self, runtime_api::Metadata, BlockchainEvents, CallExecutor, Client}; use error_chain::bail; -use log::{warn, trace}; -use client::{self, Client, CallExecutor, BlockchainEvents, runtime_api::Metadata}; use jsonrpc_derive::rpc; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; -use primitives::{H256, Blake2Hasher, Bytes}; +use log::{trace, warn}; use primitives::hexdisplay::HexDisplay; -use primitives::storage::{self, StorageKey, StorageData, StorageChangeSet}; -use crate::rpc::Result as RpcResult; -use crate::rpc::futures::{stream, Future, Sink, Stream}; +use primitives::storage::{self, StorageChangeSet, StorageData, StorageKey}; +use primitives::{Blake2Hasher, Bytes, H256}; use runtime_primitives::generic::BlockId; -use runtime_primitives::traits::{Block as BlockT, Header, ProvideRuntimeApi, As, NumberFor}; +use runtime_primitives::traits::{As, Block as BlockT, Header, NumberFor, ProvideRuntimeApi}; use runtime_version::RuntimeVersion; use state_machine::{self, ExecutionStrategy}; @@ -48,428 +48,530 @@ use self::error::Result; /// Substrate state API #[rpc] pub trait StateApi { - /// RPC Metadata - type Metadata; - - /// Call a contract at a block's state. - #[rpc(name = "state_call", alias("state_callAt"))] - fn call(&self, name: String, bytes: Bytes, hash: Option) -> Result; - - /// Returns the keys with prefix, leave empty to get all the keys - #[rpc(name = "state_getKeys")] - fn storage_keys(&self, key: StorageKey, hash: Option) -> Result>; - - /// Returns a storage entry at a specific block's state. - #[rpc(name = "state_getStorage", alias("state_getStorageAt"))] - fn storage(&self, key: StorageKey, hash: Option) -> Result>; - - /// Returns the hash of a storage entry at a block's state. - #[rpc(name = "state_getStorageHash", alias("state_getStorageHashAt"))] - fn storage_hash(&self, key: StorageKey, hash: Option) -> Result>; - - /// Returns the size of a storage entry at a block's state. - #[rpc(name = "state_getStorageSize", alias("state_getStorageSizeAt"))] - fn storage_size(&self, key: StorageKey, hash: Option) -> Result>; - - /// Returns the runtime metadata as an opaque blob. - #[rpc(name = "state_getMetadata")] - fn metadata(&self, hash: Option) -> Result; - - /// Get the runtime version. - #[rpc(name = "state_getRuntimeVersion", alias("chain_getRuntimeVersion"))] - fn runtime_version(&self, hash: Option) -> Result; - - /// Query historical storage entries (by key) starting from a block given as the second parameter. - /// - /// NOTE This first returned result contains the initial state of storage for all keys. - /// Subsequent values in the vector represent changes to the previous state (diffs). - #[rpc(name = "state_queryStorage")] - fn query_storage(&self, keys: Vec, block: Hash, hash: Option) -> Result>>; - - /// New runtime version subscription - #[pubsub( - subscription = "state_runtimeVersion", - subscribe, - name = "state_subscribeRuntimeVersion", - alias("chain_subscribeRuntimeVersion") - )] - fn subscribe_runtime_version(&self, metadata: Self::Metadata, subscriber: Subscriber); - - /// Unsubscribe from runtime version subscription - #[pubsub( - subscription = "state_runtimeVersion", - unsubscribe, - name = "state_unsubscribeRuntimeVersion", - alias("chain_unsubscribeRuntimeVersion") - )] - fn unsubscribe_runtime_version(&self, metadata: Option, id: SubscriptionId) -> RpcResult; - - /// New storage subscription - #[pubsub(subscription = "state_storage", subscribe, name = "state_subscribeStorage")] - fn subscribe_storage(&self, metadata: Self::Metadata, subscriber: Subscriber>, keys: Option>); - - /// Unsubscribe from storage subscription - #[pubsub(subscription = "state_storage", unsubscribe, name = "state_unsubscribeStorage")] - fn unsubscribe_storage(&self, metadata: Option, id: SubscriptionId) -> RpcResult; + /// RPC Metadata + type Metadata; + + /// Call a contract at a block's state. + #[rpc(name = "state_call", alias("state_callAt"))] + fn call(&self, name: String, bytes: Bytes, hash: Option) -> Result; + + /// Returns the keys with prefix, leave empty to get all the keys + #[rpc(name = "state_getKeys")] + fn storage_keys(&self, key: StorageKey, hash: Option) -> Result>; + + /// Returns a storage entry at a specific block's state. + #[rpc(name = "state_getStorage", alias("state_getStorageAt"))] + fn storage(&self, key: StorageKey, hash: Option) -> Result>; + + /// Returns the hash of a storage entry at a block's state. + #[rpc(name = "state_getStorageHash", alias("state_getStorageHashAt"))] + fn storage_hash(&self, key: StorageKey, hash: Option) -> Result>; + + /// Returns the size of a storage entry at a block's state. + #[rpc(name = "state_getStorageSize", alias("state_getStorageSizeAt"))] + fn storage_size(&self, key: StorageKey, hash: Option) -> Result>; + + /// Returns the runtime metadata as an opaque blob. + #[rpc(name = "state_getMetadata")] + fn metadata(&self, hash: Option) -> Result; + + /// Get the runtime version. + #[rpc(name = "state_getRuntimeVersion", alias("chain_getRuntimeVersion"))] + fn runtime_version(&self, hash: Option) -> Result; + + /// Query historical storage entries (by key) starting from a block given as the second parameter. + /// + /// NOTE This first returned result contains the initial state of storage for all keys. + /// Subsequent values in the vector represent changes to the previous state (diffs). + #[rpc(name = "state_queryStorage")] + fn query_storage( + &self, + keys: Vec, + block: Hash, + hash: Option, + ) -> Result>>; + + /// New runtime version subscription + #[pubsub( + subscription = "state_runtimeVersion", + subscribe, + name = "state_subscribeRuntimeVersion", + alias("chain_subscribeRuntimeVersion") + )] + fn subscribe_runtime_version( + &self, + metadata: Self::Metadata, + subscriber: Subscriber, + ); + + /// Unsubscribe from runtime version subscription + #[pubsub( + subscription = "state_runtimeVersion", + unsubscribe, + name = "state_unsubscribeRuntimeVersion", + alias("chain_unsubscribeRuntimeVersion") + )] + fn unsubscribe_runtime_version( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult; + + /// New storage subscription + #[pubsub( + subscription = "state_storage", + subscribe, + name = "state_subscribeStorage" + )] + fn subscribe_storage( + &self, + metadata: Self::Metadata, + subscriber: Subscriber>, + keys: Option>, + ); + + /// Unsubscribe from storage subscription + #[pubsub( + subscription = "state_storage", + unsubscribe, + name = "state_unsubscribeStorage" + )] + fn unsubscribe_storage( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult; } /// State API with subscriptions support. pub struct State { - /// Substrate client. - client: Arc>, - /// Current subscriptions. - subscriptions: Subscriptions, + /// Substrate client. + client: Arc>, + /// Current subscriptions. + subscriptions: Subscriptions, } /// Ranges to query in state_queryStorage. struct QueryStorageRange { - /// Hashes of all the blocks in the range. - pub hashes: Vec, - /// Number of the first block in the range. - pub first_number: NumberFor, - /// Blocks subrange ([begin; end) indices within `hashes`) where we should read keys at - /// each state to get changes. - pub unfiltered_range: Range, - /// Blocks subrange ([begin; end) indices within `hashes`) where we could pre-filter - /// blocks-with-changes by using changes tries. - pub filtered_range: Option>, + /// Hashes of all the blocks in the range. + pub hashes: Vec, + /// Number of the first block in the range. + pub first_number: NumberFor, + /// Blocks subrange ([begin; end) indices within `hashes`) where we should read keys at + /// each state to get changes. + pub unfiltered_range: Range, + /// Blocks subrange ([begin; end) indices within `hashes`) where we could pre-filter + /// blocks-with-changes by using changes tries. + pub filtered_range: Option>, } -impl State where - Block: BlockT, - B: client::backend::Backend, - E: CallExecutor, +impl State +where + Block: BlockT, + B: client::backend::Backend, + E: CallExecutor, { - /// Create new State API RPC handler. - pub fn new(client: Arc>, subscriptions: Subscriptions) -> Self { - Self { - client, - subscriptions, - } - } - - /// Splits the `query_storage` block range into 'filtered' and 'unfiltered' subranges. - /// Blocks that contain changes within filtered subrange could be filtered using changes tries. - /// Blocks that contain changes within unfiltered subrange must be filtered manually. - fn split_query_storage_range( - &self, - from: Block::Hash, - to: Option - ) -> Result> { - let to = self.unwrap_or_best(to)?; - let from_hdr = self.client.header(&BlockId::hash(from))?; - let to_hdr = self.client.header(&BlockId::hash(to))?; - match (from_hdr, to_hdr) { - (Some(ref from), Some(ref to)) if from.number() <= to.number() => { - // check if we can get from `to` to `from` by going through parent_hashes. - let from_number = *from.number(); - let blocks = { - let mut blocks = vec![to.hash()]; - let mut last = to.clone(); - while *last.number() > from_number { - if let Some(hdr) = self.client.header(&BlockId::hash(*last.parent_hash()))? { - blocks.push(hdr.hash()); - last = hdr; - } else { - bail!(invalid_block_range( - Some(from), - Some(to), - format!("Parent of {} ({}) not found", last.number(), last.hash()), - )) - } - } - if last.hash() != from.hash() { - bail!(invalid_block_range( - Some(from), - Some(to), - format!("Expected to reach `from`, got {} ({})", last.number(), last.hash()), - )) - } - blocks.reverse(); - blocks - }; - // check if we can filter blocks-with-changes from some (sub)range using changes tries - let changes_trie_range = self.client.max_key_changes_range(from_number, BlockId::Hash(to.hash()))?; - let filtered_range_begin = changes_trie_range.map(|(begin, _)| (begin - from_number).as_() as usize); - let (unfiltered_range, filtered_range) = split_range(blocks.len(), filtered_range_begin); - Ok(QueryStorageRange { - hashes: blocks, - first_number: from_number, - unfiltered_range, - filtered_range, - }) - }, - (from, to) => bail!( - invalid_block_range(from.as_ref(), to.as_ref(), "Invalid range or unknown block".into()) - ), - } - } - - /// Iterates through range.unfiltered_range and check each block for changes of keys' values. - fn query_storage_unfiltered( - &self, - range: &QueryStorageRange, - keys: &[StorageKey], - changes: &mut Vec>, - ) -> Result<()> { - let mut last_state: HashMap<_, Option<_>> = Default::default(); - for block in range.unfiltered_range.start..range.unfiltered_range.end { - let block_hash = range.hashes[block].clone(); - let mut block_changes = StorageChangeSet { block: block_hash.clone(), changes: Vec::new() }; - let id = BlockId::hash(block_hash); - for key in keys { - let (has_changed, data) = { - let curr_data = self.client.storage(&id, key)?; - let prev_data = last_state.get(key).and_then(|x| x.as_ref()); - (curr_data.as_ref() != prev_data, curr_data) - }; - if has_changed { - block_changes.changes.push((key.clone(), data.clone())); - } - last_state.insert(key.clone(), data); - } - changes.push(block_changes); - } - Ok(()) - } - - /// Iterates through all blocks that are changing keys within range.filtered_range and collects these changes. - fn query_storage_filtered( - &self, - range: &QueryStorageRange, - keys: &[StorageKey], - changes: &mut Vec>, - ) -> Result<()> { - let (begin, end) = match range.filtered_range { - Some(ref filtered_range) => ( - range.first_number + As::sa(filtered_range.start as u64), - BlockId::Hash(range.hashes[filtered_range.end - 1].clone()) - ), - None => return Ok(()), - }; - let mut changes_map: BTreeMap, StorageChangeSet> = BTreeMap::new(); - for key in keys { - let mut last_block = None; - for (block, _) in self.client.key_changes(begin, end, key)? { - if last_block == Some(block) { - continue; - } - let block_hash = range.hashes[(block - range.first_number).as_() as usize].clone(); - let id = BlockId::Hash(block_hash); - let value_at_block = self.client.storage(&id, key)?; - changes_map.entry(block) - .or_insert_with(|| StorageChangeSet { block: block_hash, changes: Vec::new() }) - .changes.push((key.clone(), value_at_block)); - last_block = Some(block); - } - } - if let Some(additional_capacity) = changes_map.len().checked_sub(changes.len()) { - changes.reserve(additional_capacity); - } - changes.extend(changes_map.into_iter().map(|(_, cs)| cs)); - Ok(()) - } + /// Create new State API RPC handler. + pub fn new(client: Arc>, subscriptions: Subscriptions) -> Self { + Self { + client, + subscriptions, + } + } + + /// Splits the `query_storage` block range into 'filtered' and 'unfiltered' subranges. + /// Blocks that contain changes within filtered subrange could be filtered using changes tries. + /// Blocks that contain changes within unfiltered subrange must be filtered manually. + fn split_query_storage_range( + &self, + from: Block::Hash, + to: Option, + ) -> Result> { + let to = self.unwrap_or_best(to)?; + let from_hdr = self.client.header(&BlockId::hash(from))?; + let to_hdr = self.client.header(&BlockId::hash(to))?; + match (from_hdr, to_hdr) { + (Some(ref from), Some(ref to)) if from.number() <= to.number() => { + // check if we can get from `to` to `from` by going through parent_hashes. + let from_number = *from.number(); + let blocks = { + let mut blocks = vec![to.hash()]; + let mut last = to.clone(); + while *last.number() > from_number { + if let Some(hdr) = + self.client.header(&BlockId::hash(*last.parent_hash()))? + { + blocks.push(hdr.hash()); + last = hdr; + } else { + bail!(invalid_block_range( + Some(from), + Some(to), + format!("Parent of {} ({}) not found", last.number(), last.hash()), + )) + } + } + if last.hash() != from.hash() { + bail!(invalid_block_range( + Some(from), + Some(to), + format!( + "Expected to reach `from`, got {} ({})", + last.number(), + last.hash() + ), + )) + } + blocks.reverse(); + blocks + }; + // check if we can filter blocks-with-changes from some (sub)range using changes tries + let changes_trie_range = self + .client + .max_key_changes_range(from_number, BlockId::Hash(to.hash()))?; + let filtered_range_begin = + changes_trie_range.map(|(begin, _)| (begin - from_number).as_() as usize); + let (unfiltered_range, filtered_range) = + split_range(blocks.len(), filtered_range_begin); + Ok(QueryStorageRange { + hashes: blocks, + first_number: from_number, + unfiltered_range, + filtered_range, + }) + } + (from, to) => bail!(invalid_block_range( + from.as_ref(), + to.as_ref(), + "Invalid range or unknown block".into() + )), + } + } + + /// Iterates through range.unfiltered_range and check each block for changes of keys' values. + fn query_storage_unfiltered( + &self, + range: &QueryStorageRange, + keys: &[StorageKey], + changes: &mut Vec>, + ) -> Result<()> { + let mut last_state: HashMap<_, Option<_>> = Default::default(); + for block in range.unfiltered_range.start..range.unfiltered_range.end { + let block_hash = range.hashes[block].clone(); + let mut block_changes = StorageChangeSet { + block: block_hash.clone(), + changes: Vec::new(), + }; + let id = BlockId::hash(block_hash); + for key in keys { + let (has_changed, data) = { + let curr_data = self.client.storage(&id, key)?; + let prev_data = last_state.get(key).and_then(|x| x.as_ref()); + (curr_data.as_ref() != prev_data, curr_data) + }; + if has_changed { + block_changes.changes.push((key.clone(), data.clone())); + } + last_state.insert(key.clone(), data); + } + changes.push(block_changes); + } + Ok(()) + } + + /// Iterates through all blocks that are changing keys within range.filtered_range and collects these changes. + fn query_storage_filtered( + &self, + range: &QueryStorageRange, + keys: &[StorageKey], + changes: &mut Vec>, + ) -> Result<()> { + let (begin, end) = match range.filtered_range { + Some(ref filtered_range) => ( + range.first_number + As::sa(filtered_range.start as u64), + BlockId::Hash(range.hashes[filtered_range.end - 1].clone()), + ), + None => return Ok(()), + }; + let mut changes_map: BTreeMap, StorageChangeSet> = + BTreeMap::new(); + for key in keys { + let mut last_block = None; + for (block, _) in self.client.key_changes(begin, end, key)? { + if last_block == Some(block) { + continue; + } + let block_hash = range.hashes[(block - range.first_number).as_() as usize].clone(); + let id = BlockId::Hash(block_hash); + let value_at_block = self.client.storage(&id, key)?; + changes_map + .entry(block) + .or_insert_with(|| StorageChangeSet { + block: block_hash, + changes: Vec::new(), + }) + .changes + .push((key.clone(), value_at_block)); + last_block = Some(block); + } + } + if let Some(additional_capacity) = changes_map.len().checked_sub(changes.len()) { + changes.reserve(additional_capacity); + } + changes.extend(changes_map.into_iter().map(|(_, cs)| cs)); + Ok(()) + } } -impl State where - Block: BlockT, - B: client::backend::Backend, - E: CallExecutor, +impl State +where + Block: BlockT, + B: client::backend::Backend, + E: CallExecutor, { - fn unwrap_or_best(&self, hash: Option) -> Result { - crate::helpers::unwrap_or_else(|| Ok(self.client.info()?.chain.best_hash), hash) - } + fn unwrap_or_best(&self, hash: Option) -> Result { + crate::helpers::unwrap_or_else(|| Ok(self.client.info()?.chain.best_hash), hash) + } } -impl StateApi for State where - Block: BlockT + 'static, - B: client::backend::Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + 'static + Clone, - RA: Send + Sync + 'static, - Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: Metadata +impl StateApi for State +where + Block: BlockT + 'static, + B: client::backend::Backend + Send + Sync + 'static, + E: CallExecutor + Send + Sync + 'static + Clone, + RA: Send + Sync + 'static, + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: Metadata, { - type Metadata = crate::metadata::Metadata; - - fn call(&self, method: String, data: Bytes, block: Option) -> Result { - let block = self.unwrap_or_best(block)?; - trace!(target: "rpc", "Calling runtime at {:?} for method {} ({})", block, method, HexDisplay::from(&data.0)); - let return_data = self.client - .executor() - .call( - &BlockId::Hash(block), - &method, &data.0, ExecutionStrategy::NativeElseWasm, state_machine::NeverOffchainExt::new(), - )?; - Ok(Bytes(return_data)) - } - - fn storage_keys(&self, key_prefix: StorageKey, block: Option) -> Result> { - let block = self.unwrap_or_best(block)?; - trace!(target: "rpc", "Querying storage keys at {:?}", block); - Ok(self.client.storage_keys(&BlockId::Hash(block), &key_prefix)?) - } - - fn storage(&self, key: StorageKey, block: Option) -> Result> { - let block = self.unwrap_or_best(block)?; - trace!(target: "rpc", "Querying storage at {:?} for key {}", block, HexDisplay::from(&key.0)); - Ok(self.client.storage(&BlockId::Hash(block), &key)?) - } - - fn storage_hash(&self, key: StorageKey, block: Option) -> Result> { - use runtime_primitives::traits::{Hash, Header as HeaderT}; - Ok(self.storage(key, block)?.map(|x| ::Hashing::hash(&x.0))) - } - - fn storage_size(&self, key: StorageKey, block: Option) -> Result> { - Ok(self.storage(key, block)?.map(|x| x.0.len() as u64)) - } - - fn metadata(&self, block: Option) -> Result { - let block = self.unwrap_or_best(block)?; - self.client.runtime_api().metadata(&BlockId::Hash(block)).map(Into::into).map_err(Into::into) - } - - fn query_storage( - &self, - keys: Vec, - from: Block::Hash, - to: Option - ) -> Result>> { - let range = self.split_query_storage_range(from, to)?; - let mut changes = Vec::new(); - self.query_storage_unfiltered(&range, &keys, &mut changes)?; - self.query_storage_filtered(&range, &keys, &mut changes)?; - Ok(changes) - } - - fn subscribe_storage( - &self, - _meta: Self::Metadata, - subscriber: Subscriber>, - keys: Option> - ) { - let keys = Into::>>::into(keys); - let stream = match self.client.storage_changes_notification_stream(keys.as_ref().map(|x| &**x)) { - Ok(stream) => stream, - Err(err) => { - let _ = subscriber.reject(error::Error::from(err).into()); - return; - }, - }; - - // initial values - let initial = stream::iter_result(keys - .map(|keys| { - let block = self.client.info().map(|info| info.chain.best_hash).unwrap_or_default(); - let changes = keys - .into_iter() - .map(|key| self.storage(key.clone(), Some(block.clone()).into()) - .map(|val| (key.clone(), val)) - .unwrap_or_else(|_| (key, None)) - ) - .collect(); - vec![Ok(Ok(StorageChangeSet { block, changes }))] - }).unwrap_or_default()); - - self.subscriptions.add(subscriber, |sink| { - let stream = stream - .map_err(|e| warn!("Error creating storage notification stream: {:?}", e)) - .map(|(block, changes)| Ok(StorageChangeSet { - block, - changes: changes.iter().cloned().collect(), - })); - - sink - .sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) - .send_all(initial.chain(stream)) - // we ignore the resulting Stream (if the first stream is over we are unsubscribed) - .map(|_| ()) - }) - } - - fn unsubscribe_storage(&self, _meta: Option, id: SubscriptionId) -> RpcResult { - Ok(self.subscriptions.cancel(id)) - } - - fn runtime_version(&self, at: Option) -> Result { - let at = self.unwrap_or_best(at)?; - Ok(self.client.runtime_version_at(&BlockId::Hash(at))?) - } - - fn subscribe_runtime_version(&self, _meta: Self::Metadata, subscriber: Subscriber) { - let stream = match self.client.storage_changes_notification_stream(Some(&[StorageKey(storage::well_known_keys::CODE.to_vec())])) { - Ok(stream) => stream, - Err(err) => { - let _ = subscriber.reject(error::Error::from(err).into()); - return; - } - }; - - self.subscriptions.add(subscriber, |sink| { - let version = self.runtime_version(None.into()) - .map_err(Into::into); - - let client = self.client.clone(); - let mut previous_version = version.clone(); - - let stream = stream - .map_err(|e| warn!("Error creating storage notification stream: {:?}", e)) - .filter_map(move |_| { - let version = client.info().and_then(|info| { - client.runtime_version_at(&BlockId::hash(info.chain.best_hash)) - }) - .map_err(error::Error::from) - .map_err(Into::into); - if previous_version != version { - previous_version = version.clone(); - Some(version) - } else { - None - } - }); - - sink - .sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) - .send_all( - stream::iter_result(vec![Ok(version)]) - .chain(stream) - ) - // we ignore the resulting Stream (if the first stream is over we are unsubscribed) - .map(|_| ()) - }); - } - - fn unsubscribe_runtime_version(&self, _meta: Option, id: SubscriptionId) -> RpcResult { - Ok(self.subscriptions.cancel(id)) - } + type Metadata = crate::metadata::Metadata; + + fn call(&self, method: String, data: Bytes, block: Option) -> Result { + let block = self.unwrap_or_best(block)?; + trace!(target: "rpc", "Calling runtime at {:?} for method {} ({})", block, method, HexDisplay::from(&data.0)); + let return_data = self.client.executor().call( + &BlockId::Hash(block), + &method, + &data.0, + ExecutionStrategy::NativeElseWasm, + state_machine::NeverOffchainExt::new(), + )?; + Ok(Bytes(return_data)) + } + + fn storage_keys( + &self, + key_prefix: StorageKey, + block: Option, + ) -> Result> { + let block = self.unwrap_or_best(block)?; + trace!(target: "rpc", "Querying storage keys at {:?}", block); + Ok(self + .client + .storage_keys(&BlockId::Hash(block), &key_prefix)?) + } + + fn storage(&self, key: StorageKey, block: Option) -> Result> { + let block = self.unwrap_or_best(block)?; + trace!(target: "rpc", "Querying storage at {:?} for key {}", block, HexDisplay::from(&key.0)); + Ok(self.client.storage(&BlockId::Hash(block), &key)?) + } + + fn storage_hash( + &self, + key: StorageKey, + block: Option, + ) -> Result> { + use runtime_primitives::traits::{Hash, Header as HeaderT}; + Ok(self + .storage(key, block)? + .map(|x| ::Hashing::hash(&x.0))) + } + + fn storage_size(&self, key: StorageKey, block: Option) -> Result> { + Ok(self.storage(key, block)?.map(|x| x.0.len() as u64)) + } + + fn metadata(&self, block: Option) -> Result { + let block = self.unwrap_or_best(block)?; + self.client + .runtime_api() + .metadata(&BlockId::Hash(block)) + .map(Into::into) + .map_err(Into::into) + } + + fn query_storage( + &self, + keys: Vec, + from: Block::Hash, + to: Option, + ) -> Result>> { + let range = self.split_query_storage_range(from, to)?; + let mut changes = Vec::new(); + self.query_storage_unfiltered(&range, &keys, &mut changes)?; + self.query_storage_filtered(&range, &keys, &mut changes)?; + Ok(changes) + } + + fn subscribe_storage( + &self, + _meta: Self::Metadata, + subscriber: Subscriber>, + keys: Option>, + ) { + let keys = Into::>>::into(keys); + let stream = match self + .client + .storage_changes_notification_stream(keys.as_ref().map(|x| &**x)) + { + Ok(stream) => stream, + Err(err) => { + let _ = subscriber.reject(error::Error::from(err).into()); + return; + } + }; + + // initial values + let initial = stream::iter_result( + keys.map(|keys| { + let block = self + .client + .info() + .map(|info| info.chain.best_hash) + .unwrap_or_default(); + let changes = keys + .into_iter() + .map(|key| { + self.storage(key.clone(), Some(block.clone()).into()) + .map(|val| (key.clone(), val)) + .unwrap_or_else(|_| (key, None)) + }) + .collect(); + vec![Ok(Ok(StorageChangeSet { block, changes }))] + }) + .unwrap_or_default(), + ); + + self.subscriptions.add(subscriber, |sink| { + let stream = stream + .map_err(|e| warn!("Error creating storage notification stream: {:?}", e)) + .map(|(block, changes)| { + Ok(StorageChangeSet { + block, + changes: changes.iter().cloned().collect(), + }) + }); + + sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) + .send_all(initial.chain(stream)) + // we ignore the resulting Stream (if the first stream is over we are unsubscribed) + .map(|_| ()) + }) + } + + fn unsubscribe_storage( + &self, + _meta: Option, + id: SubscriptionId, + ) -> RpcResult { + Ok(self.subscriptions.cancel(id)) + } + + fn runtime_version(&self, at: Option) -> Result { + let at = self.unwrap_or_best(at)?; + Ok(self.client.runtime_version_at(&BlockId::Hash(at))?) + } + + fn subscribe_runtime_version( + &self, + _meta: Self::Metadata, + subscriber: Subscriber, + ) { + let stream = match self + .client + .storage_changes_notification_stream(Some(&[StorageKey( + storage::well_known_keys::CODE.to_vec(), + )])) { + Ok(stream) => stream, + Err(err) => { + let _ = subscriber.reject(error::Error::from(err).into()); + return; + } + }; + + self.subscriptions.add(subscriber, |sink| { + let version = self.runtime_version(None.into()).map_err(Into::into); + + let client = self.client.clone(); + let mut previous_version = version.clone(); + + let stream = stream + .map_err(|e| warn!("Error creating storage notification stream: {:?}", e)) + .filter_map(move |_| { + let version = client + .info() + .and_then(|info| { + client.runtime_version_at(&BlockId::hash(info.chain.best_hash)) + }) + .map_err(error::Error::from) + .map_err(Into::into); + if previous_version != version { + previous_version = version.clone(); + Some(version) + } else { + None + } + }); + + sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) + .send_all(stream::iter_result(vec![Ok(version)]).chain(stream)) + // we ignore the resulting Stream (if the first stream is over we are unsubscribed) + .map(|_| ()) + }); + } + + fn unsubscribe_runtime_version( + &self, + _meta: Option, + id: SubscriptionId, + ) -> RpcResult { + Ok(self.subscriptions.cancel(id)) + } } /// Splits passed range into two subranges where: /// - first range has at least one element in it; /// - second range (optionally) starts at given `middle` element. -pub(crate) fn split_range(size: usize, middle: Option) -> (Range, Option>) { - // check if we can filter blocks-with-changes from some (sub)range using changes tries - let range2_begin = match middle { - // some of required changes tries are pruned => use available tries - Some(middle) if middle != 0 => Some(middle), - // all required changes tries are available, but we still want values at first block - // => do 'unfiltered' read for the first block and 'filtered' for the rest - Some(_) if size > 1 => Some(1), - // range contains single element => do not use changes tries - Some(_) => None, - // changes tries are not available => do 'unfiltered' read for the whole range - None => None, - }; - let range1 = 0..range2_begin.unwrap_or(size); - let range2 = range2_begin.map(|begin| begin..size); - (range1, range2) +pub(crate) fn split_range( + size: usize, + middle: Option, +) -> (Range, Option>) { + // check if we can filter blocks-with-changes from some (sub)range using changes tries + let range2_begin = match middle { + // some of required changes tries are pruned => use available tries + Some(middle) if middle != 0 => Some(middle), + // all required changes tries are available, but we still want values at first block + // => do 'unfiltered' read for the first block and 'filtered' for the rest + Some(_) if size > 1 => Some(1), + // range contains single element => do not use changes tries + Some(_) => None, + // changes tries are not available => do 'unfiltered' read for the whole range + None => None, + }; + let range1 = 0..range2_begin.unwrap_or(size); + let range2 = range2_begin.map(|begin| begin..size); + (range1, range2) } -fn invalid_block_range(from: Option<&H>, to: Option<&H>, reason: String) -> error::ErrorKind { - let to_string = |x: Option<&H>| match x { - None => "unknown hash".into(), - Some(h) => format!("{} ({})", h.number(), h.hash()), - }; - - error::ErrorKind::InvalidBlockRange(to_string(from), to_string(to), reason) +fn invalid_block_range( + from: Option<&H>, + to: Option<&H>, + reason: String, +) -> error::ErrorKind { + let to_string = |x: Option<&H>| match x { + None => "unknown hash".into(), + Some(h) => format!("{} ({})", h.number(), h.hash()), + }; + + error::ErrorKind::InvalidBlockRange(to_string(from), to_string(to), reason) } diff --git a/core/rpc/src/state/tests.rs b/core/rpc/src/state/tests.rs index 6746685d36..7c0143ece9 100644 --- a/core/rpc/src/state/tests.rs +++ b/core/rpc/src/state/tests.rs @@ -14,212 +14,225 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use super::*; use self::error::{Error, ErrorKind}; +use super::*; -use sr_io::twox_128; use assert_matches::assert_matches; use consensus::BlockOrigin; -use test_client::{self, runtime, AccountKeyring, TestClient, BlockBuilderExt}; +use sr_io::twox_128; +use test_client::{self, runtime, AccountKeyring, BlockBuilderExt, TestClient}; #[test] fn should_return_storage() { - let core = ::tokio::runtime::Runtime::new().unwrap(); - let client = Arc::new(test_client::new()); - let genesis_hash = client.genesis_hash(); - let client = State::new(client, Subscriptions::new(core.executor())); - - assert_matches!( - client.storage(StorageKey(vec![10]), Some(genesis_hash).into()), - Ok(None) - ) + let core = ::tokio::runtime::Runtime::new().unwrap(); + let client = Arc::new(test_client::new()); + let genesis_hash = client.genesis_hash(); + let client = State::new(client, Subscriptions::new(core.executor())); + + assert_matches!( + client.storage(StorageKey(vec![10]), Some(genesis_hash).into()), + Ok(None) + ) } #[test] fn should_call_contract() { - let core = ::tokio::runtime::Runtime::new().unwrap(); - let client = Arc::new(test_client::new()); - let genesis_hash = client.genesis_hash(); - let client = State::new(client, Subscriptions::new(core.executor())); - - assert_matches!( - client.call("balanceOf".into(), Bytes(vec![1,2,3]), Some(genesis_hash).into()), - Err(Error(ErrorKind::Client(client::error::ErrorKind::Execution(_)), _)) - ) + let core = ::tokio::runtime::Runtime::new().unwrap(); + let client = Arc::new(test_client::new()); + let genesis_hash = client.genesis_hash(); + let client = State::new(client, Subscriptions::new(core.executor())); + + assert_matches!( + client.call( + "balanceOf".into(), + Bytes(vec![1, 2, 3]), + Some(genesis_hash).into() + ), + Err(Error(ErrorKind::Client(client::error::ErrorKind::Execution(_)), _)) + ) } #[test] fn should_notify_about_storage_changes() { - let mut core = ::tokio::runtime::Runtime::new().unwrap(); - let remote = core.executor(); - let (subscriber, id, transport) = Subscriber::new_test("test"); - - { - let api = State::new(Arc::new(test_client::new()), Subscriptions::new(remote)); - - api.subscribe_storage(Default::default(), subscriber, None.into()); - - // assert id assigned - assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); - - let mut builder = api.client.new_block().unwrap(); - builder.push_transfer(runtime::Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 42, - nonce: 0, - }).unwrap(); - api.client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap(); - } - - // assert notification sent to transport - let (notification, next) = core.block_on(transport.into_future()).unwrap(); - assert!(notification.is_some()); - // no more notifications on this channel - assert_eq!(core.block_on(next.into_future()).unwrap().0, None); + let mut core = ::tokio::runtime::Runtime::new().unwrap(); + let remote = core.executor(); + let (subscriber, id, transport) = Subscriber::new_test("test"); + + { + let api = State::new(Arc::new(test_client::new()), Subscriptions::new(remote)); + + api.subscribe_storage(Default::default(), subscriber, None.into()); + + // assert id assigned + assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); + + let mut builder = api.client.new_block().unwrap(); + builder + .push_transfer(runtime::Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }) + .unwrap(); + api.client + .import(BlockOrigin::Own, builder.bake().unwrap()) + .unwrap(); + } + + // assert notification sent to transport + let (notification, next) = core.block_on(transport.into_future()).unwrap(); + assert!(notification.is_some()); + // no more notifications on this channel + assert_eq!(core.block_on(next.into_future()).unwrap().0, None); } #[test] fn should_send_initial_storage_changes_and_notifications() { - let mut core = ::tokio::runtime::Runtime::new().unwrap(); - let remote = core.executor(); - let (subscriber, id, transport) = Subscriber::new_test("test"); - - { - let api = State::new(Arc::new(test_client::new()), Subscriptions::new(remote)); - - let alice_balance_key = twox_128(&test_runtime::system::balance_of_key(AccountKeyring::Alice.into())); - - api.subscribe_storage(Default::default(), subscriber, Some(vec![ - StorageKey(alice_balance_key.to_vec()), - ]).into()); - - // assert id assigned - assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); - - let mut builder = api.client.new_block().unwrap(); - builder.push_transfer(runtime::Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 42, - nonce: 0, - }).unwrap(); - api.client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap(); - } - - // assert initial values sent to transport - let (notification, next) = core.block_on(transport.into_future()).unwrap(); - assert!(notification.is_some()); - // assert notification sent to transport - let (notification, next) = core.block_on(next.into_future()).unwrap(); - assert!(notification.is_some()); - // no more notifications on this channel - assert_eq!(core.block_on(next.into_future()).unwrap().0, None); + let mut core = ::tokio::runtime::Runtime::new().unwrap(); + let remote = core.executor(); + let (subscriber, id, transport) = Subscriber::new_test("test"); + + { + let api = State::new(Arc::new(test_client::new()), Subscriptions::new(remote)); + + let alice_balance_key = twox_128(&test_runtime::system::balance_of_key( + AccountKeyring::Alice.into(), + )); + + api.subscribe_storage( + Default::default(), + subscriber, + Some(vec![StorageKey(alice_balance_key.to_vec())]).into(), + ); + + // assert id assigned + assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); + + let mut builder = api.client.new_block().unwrap(); + builder + .push_transfer(runtime::Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }) + .unwrap(); + api.client + .import(BlockOrigin::Own, builder.bake().unwrap()) + .unwrap(); + } + + // assert initial values sent to transport + let (notification, next) = core.block_on(transport.into_future()).unwrap(); + assert!(notification.is_some()); + // assert notification sent to transport + let (notification, next) = core.block_on(next.into_future()).unwrap(); + assert!(notification.is_some()); + // no more notifications on this channel + assert_eq!(core.block_on(next.into_future()).unwrap().0, None); } #[test] fn should_query_storage() { - type TestClient = test_client::client::Client< - test_client::Backend, - test_client::Executor, - runtime::Block, - runtime::RuntimeApi - >; - - fn run_tests(client: Arc) { - let core = ::tokio::runtime::Runtime::new().unwrap(); - let api = State::new(client.clone(), Subscriptions::new(core.executor())); - - let add_block = |nonce| { - let mut builder = client.new_block().unwrap(); - builder.push_transfer(runtime::Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 42, - nonce, - }).unwrap(); - let block = builder.bake().unwrap(); - let hash = block.header.hash(); - client.import(BlockOrigin::Own, block).unwrap(); - hash - }; - let block1_hash = add_block(0); - let block2_hash = add_block(1); - let genesis_hash = client.genesis_hash(); - - let alice_balance_key = twox_128(&test_runtime::system::balance_of_key(AccountKeyring::Alice.into())); - - let mut expected = vec![ - StorageChangeSet { - block: genesis_hash, - changes: vec![ - ( - StorageKey(alice_balance_key.to_vec()), - Some(StorageData(vec![232, 3, 0, 0, 0, 0, 0, 0])) - ), - ], - }, - StorageChangeSet { - block: block1_hash, - changes: vec![ - ( - StorageKey(alice_balance_key.to_vec()), - Some(StorageData(vec![190, 3, 0, 0, 0, 0, 0, 0])) - ), - ], - }, - ]; - - // Query changes only up to block1 - let result = api.query_storage( - vec![StorageKey(alice_balance_key.to_vec())], - genesis_hash, - Some(block1_hash).into(), - ); - - assert_eq!(result.unwrap(), expected); - - // Query all changes - let result = api.query_storage( - vec![StorageKey(alice_balance_key.to_vec())], - genesis_hash, - None.into(), - ); - - expected.push(StorageChangeSet { - block: block2_hash, - changes: vec![ - ( - StorageKey(alice_balance_key.to_vec()), - Some(StorageData(vec![148, 3, 0, 0, 0, 0, 0, 0])) - ), - ], - }); - assert_eq!(result.unwrap(), expected); - } - - run_tests(Arc::new(test_client::new())); - run_tests(Arc::new(test_client::new_with_changes_trie())); + type TestClient = test_client::client::Client< + test_client::Backend, + test_client::Executor, + runtime::Block, + runtime::RuntimeApi, + >; + + fn run_tests(client: Arc) { + let core = ::tokio::runtime::Runtime::new().unwrap(); + let api = State::new(client.clone(), Subscriptions::new(core.executor())); + + let add_block = |nonce| { + let mut builder = client.new_block().unwrap(); + builder + .push_transfer(runtime::Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce, + }) + .unwrap(); + let block = builder.bake().unwrap(); + let hash = block.header.hash(); + client.import(BlockOrigin::Own, block).unwrap(); + hash + }; + let block1_hash = add_block(0); + let block2_hash = add_block(1); + let genesis_hash = client.genesis_hash(); + + let alice_balance_key = twox_128(&test_runtime::system::balance_of_key( + AccountKeyring::Alice.into(), + )); + + let mut expected = vec![ + StorageChangeSet { + block: genesis_hash, + changes: vec![( + StorageKey(alice_balance_key.to_vec()), + Some(StorageData(vec![232, 3, 0, 0, 0, 0, 0, 0])), + )], + }, + StorageChangeSet { + block: block1_hash, + changes: vec![( + StorageKey(alice_balance_key.to_vec()), + Some(StorageData(vec![190, 3, 0, 0, 0, 0, 0, 0])), + )], + }, + ]; + + // Query changes only up to block1 + let result = api.query_storage( + vec![StorageKey(alice_balance_key.to_vec())], + genesis_hash, + Some(block1_hash).into(), + ); + + assert_eq!(result.unwrap(), expected); + + // Query all changes + let result = api.query_storage( + vec![StorageKey(alice_balance_key.to_vec())], + genesis_hash, + None.into(), + ); + + expected.push(StorageChangeSet { + block: block2_hash, + changes: vec![( + StorageKey(alice_balance_key.to_vec()), + Some(StorageData(vec![148, 3, 0, 0, 0, 0, 0, 0])), + )], + }); + assert_eq!(result.unwrap(), expected); + } + + run_tests(Arc::new(test_client::new())); + run_tests(Arc::new(test_client::new_with_changes_trie())); } #[test] fn should_split_ranges() { - assert_eq!(split_range(1, None), (0..1, None)); - assert_eq!(split_range(100, None), (0..100, None)); - assert_eq!(split_range(1, Some(0)), (0..1, None)); - assert_eq!(split_range(100, Some(50)), (0..50, Some(50..100))); - assert_eq!(split_range(100, Some(99)), (0..99, Some(99..100))); + assert_eq!(split_range(1, None), (0..1, None)); + assert_eq!(split_range(100, None), (0..100, None)); + assert_eq!(split_range(1, Some(0)), (0..1, None)); + assert_eq!(split_range(100, Some(50)), (0..50, Some(50..100))); + assert_eq!(split_range(100, Some(99)), (0..99, Some(99..100))); } - #[test] fn should_return_runtime_version() { - let core = ::tokio::runtime::Runtime::new().unwrap(); + let core = ::tokio::runtime::Runtime::new().unwrap(); - let client = Arc::new(test_client::new()); - let api = State::new(client.clone(), Subscriptions::new(core.executor())); + let client = Arc::new(test_client::new()); + let api = State::new(client.clone(), Subscriptions::new(core.executor())); - assert_eq!( + assert_eq!( ::serde_json::to_string(&api.runtime_version(None.into()).unwrap()).unwrap(), r#"{"specName":"test","implName":"parity-test","authoringVersion":1,"specVersion":1,"implVersion":1,"apis":[["0xdf6acb689907609b",1],["0x37e397fc7c91f5e4",1],["0xd2bc9897eed08f15",1],["0x40fe3ad401f8959a",2],["0xc6e9a76309f39b09",1],["0xdd718d5cc53262d4",1],["0xf78b278be53f454c",1],["0x7801759919ee83e5",1]]}"# ); @@ -227,22 +240,22 @@ fn should_return_runtime_version() { #[test] fn should_notify_on_runtime_version_initially() { - let mut core = ::tokio::runtime::Runtime::new().unwrap(); - let (subscriber, id, transport) = Subscriber::new_test("test"); + let mut core = ::tokio::runtime::Runtime::new().unwrap(); + let (subscriber, id, transport) = Subscriber::new_test("test"); - { - let client = Arc::new(test_client::new()); - let api = State::new(client.clone(), Subscriptions::new(core.executor())); + { + let client = Arc::new(test_client::new()); + let api = State::new(client.clone(), Subscriptions::new(core.executor())); - api.subscribe_runtime_version(Default::default(), subscriber); + api.subscribe_runtime_version(Default::default(), subscriber); - // assert id assigned - assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); - } + // assert id assigned + assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); + } - // assert initial version sent. - let (notification, next) = core.block_on(transport.into_future()).unwrap(); - assert!(notification.is_some()); - // no more notifications on this channel - assert_eq!(core.block_on(next.into_future()).unwrap().0, None); + // assert initial version sent. + let (notification, next) = core.block_on(transport.into_future()).unwrap(); + assert!(notification.is_some()); + // no more notifications on this channel + assert_eq!(core.block_on(next.into_future()).unwrap().0, None); } diff --git a/core/rpc/src/subscriptions.rs b/core/rpc/src/subscriptions.rs index 500f3dac45..d99f706c29 100644 --- a/core/rpc/src/subscriptions.rs +++ b/core/rpc/src/subscriptions.rs @@ -15,13 +15,19 @@ // along with Substrate. If not, see . use std::collections::HashMap; -use std::sync::{Arc, atomic::{self, AtomicUsize}}; +use std::sync::{ + atomic::{self, AtomicUsize}, + Arc, +}; +use crate::rpc::futures::sync::oneshot; +use crate::rpc::futures::{future, Future}; +use jsonrpc_pubsub::{ + typed::{Sink, Subscriber}, + SubscriptionId, +}; use log::warn; -use jsonrpc_pubsub::{SubscriptionId, typed::{Sink, Subscriber}}; use parking_lot::Mutex; -use crate::rpc::futures::sync::oneshot; -use crate::rpc::futures::{Future, future}; use tokio::runtime::TaskExecutor; type Id = u64; @@ -29,21 +35,21 @@ type Id = u64; /// Generate unique ids for subscriptions. #[derive(Clone, Debug)] pub struct IdProvider { - next_id: Arc, + next_id: Arc, } impl Default for IdProvider { - fn default() -> Self { - IdProvider { - next_id: Arc::new(AtomicUsize::new(1)), - } - } + fn default() -> Self { + IdProvider { + next_id: Arc::new(AtomicUsize::new(1)), + } + } } impl IdProvider { - /// Returns next id for the subscription. - pub fn next_id(&self) -> Id { - self.next_id.fetch_add(1, atomic::Ordering::AcqRel) as u64 - } + /// Returns next id for the subscription. + pub fn next_id(&self) -> Id { + self.next_id.fetch_add(1, atomic::Ordering::AcqRel) as u64 + } } /// Subscriptions manager. @@ -52,54 +58,55 @@ impl IdProvider { /// driving the sinks into completion. #[derive(Debug, Clone)] pub struct Subscriptions { - next_id: IdProvider, - active_subscriptions: Arc>>>, - executor: TaskExecutor, + next_id: IdProvider, + active_subscriptions: Arc>>>, + executor: TaskExecutor, } impl Subscriptions { - /// Creates new `Subscriptions` object. - pub fn new(executor: TaskExecutor) -> Self { - Subscriptions { - next_id: Default::default(), - active_subscriptions: Default::default(), - executor, - } - } + /// Creates new `Subscriptions` object. + pub fn new(executor: TaskExecutor) -> Self { + Subscriptions { + next_id: Default::default(), + active_subscriptions: Default::default(), + executor, + } + } - /// Creates new subscription for given subscriber. - /// - /// Second parameter is a function that converts Subscriber sink into a future. - /// This future will be driven to completion bu underlying event loop - /// or will be cancelled in case #cancel is invoked. - pub fn add(&self, subscriber: Subscriber, into_future: G) where - G: FnOnce(Sink) -> R, - R: future::IntoFuture, - F: future::Future + Send + 'static, - { - let id = self.next_id.next_id(); - if let Ok(sink) = subscriber.assign_id(id.into()) { - let (tx, rx) = oneshot::channel(); - let future = into_future(sink) - .into_future() - .select(rx.map_err(|e| warn!("Error timeing out: {:?}", e))) - .then(|_| Ok(())); + /// Creates new subscription for given subscriber. + /// + /// Second parameter is a function that converts Subscriber sink into a future. + /// This future will be driven to completion bu underlying event loop + /// or will be cancelled in case #cancel is invoked. + pub fn add(&self, subscriber: Subscriber, into_future: G) + where + G: FnOnce(Sink) -> R, + R: future::IntoFuture, + F: future::Future + Send + 'static, + { + let id = self.next_id.next_id(); + if let Ok(sink) = subscriber.assign_id(id.into()) { + let (tx, rx) = oneshot::channel(); + let future = into_future(sink) + .into_future() + .select(rx.map_err(|e| warn!("Error timeing out: {:?}", e))) + .then(|_| Ok(())); - self.active_subscriptions.lock().insert(id, tx); - self.executor.spawn(future); - } - } + self.active_subscriptions.lock().insert(id, tx); + self.executor.spawn(future); + } + } - /// Cancel subscription. - /// - /// Returns true if subscription existed or false otherwise. - pub fn cancel(&self, id: SubscriptionId) -> bool { - if let SubscriptionId::Number(id) = id { - if let Some(tx) = self.active_subscriptions.lock().remove(&id) { - let _ = tx.send(()); - return true; - } - } - false - } + /// Cancel subscription. + /// + /// Returns true if subscription existed or false otherwise. + pub fn cancel(&self, id: SubscriptionId) -> bool { + if let SubscriptionId::Number(id) = id { + if let Some(tx) = self.active_subscriptions.lock().remove(&id) { + let _ = tx.send(()); + return true; + } + } + false + } } diff --git a/core/rpc/src/system/error.rs b/core/rpc/src/system/error.rs index d3c7e8b333..4eb6fecd08 100644 --- a/core/rpc/src/system/error.rs +++ b/core/rpc/src/system/error.rs @@ -18,38 +18,38 @@ use error_chain::*; -use crate::rpc; use crate::errors; +use crate::rpc; use crate::system::helpers::Health; error_chain! { - errors { - /// Node is not fully functional - NotHealthy(h: Health) { - description("node is not healthy"), - display("Node is not fully functional: {}", h) - } - - /// Not implemented yet - Unimplemented { - description("not yet implemented"), - display("Method Not Implemented"), - } - } + errors { + /// Node is not fully functional + NotHealthy(h: Health) { + description("node is not healthy"), + display("Node is not fully functional: {}", h) + } + + /// Not implemented yet + Unimplemented { + description("not yet implemented"), + display("Method Not Implemented"), + } + } } const ERROR: i64 = 2000; impl From for rpc::Error { - fn from(e: Error) -> Self { - match e { - Error(ErrorKind::Unimplemented, _) => errors::unimplemented(), - Error(ErrorKind::NotHealthy(h), _) => rpc::Error { - code: rpc::ErrorCode::ServerError(ERROR + 1), - message: "node is not healthy".into(), - data:serde_json::to_value(h).ok(), - }, - e => errors::internal(e), - } - } + fn from(e: Error) -> Self { + match e { + Error(ErrorKind::Unimplemented, _) => errors::unimplemented(), + Error(ErrorKind::NotHealthy(h), _) => rpc::Error { + code: rpc::ErrorCode::ServerError(ERROR + 1), + message: "node is not healthy".into(), + data: serde_json::to_value(h).ok(), + }, + e => errors::internal(e), + } + } } diff --git a/core/rpc/src/system/helpers.rs b/core/rpc/src/system/helpers.rs index 9f64318d5d..0879e23c1e 100644 --- a/core/rpc/src/system/helpers.rs +++ b/core/rpc/src/system/helpers.rs @@ -16,9 +16,9 @@ //! Substrate system API helpers. +use serde_derive::Serialize; +use serde_json::{map::Map, Value}; use std::fmt; -use serde_derive::{Serialize}; -use serde_json::{Value, map::Map}; /// Node properties pub type Properties = Map; @@ -26,81 +26,86 @@ pub type Properties = Map; /// Running node's static details. #[derive(Clone, Debug)] pub struct SystemInfo { - /// Implementation name. - pub impl_name: String, - /// Implementation version. - pub impl_version: String, - /// Chain name. - pub chain_name: String, - /// A custom set of properties defined in the chain spec. - pub properties: Properties, + /// Implementation name. + pub impl_name: String, + /// Implementation version. + pub impl_version: String, + /// Chain name. + pub chain_name: String, + /// A custom set of properties defined in the chain spec. + pub properties: Properties, } /// Health struct returned by the RPC #[derive(Debug, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub struct Health { - /// Number of connected peers - pub peers: usize, - /// Is the node syncing - pub is_syncing: bool, - /// Should this node have any peers - /// - /// Might be false for local chains or when running without discovery. - pub should_have_peers: bool, + /// Number of connected peers + pub peers: usize, + /// Is the node syncing + pub is_syncing: bool, + /// Should this node have any peers + /// + /// Might be false for local chains or when running without discovery. + pub should_have_peers: bool, } /// Network Peer information #[derive(Debug, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub struct PeerInfo { - /// Peer ID - pub peer_id: String, - /// Roles - pub roles: String, - /// Protocol version - pub protocol_version: u32, - /// Peer best block hash - pub best_hash: Hash, - /// Peer best block number - pub best_number: Number, + /// Peer ID + pub peer_id: String, + /// Roles + pub roles: String, + /// Protocol version + pub protocol_version: u32, + /// Peer best block hash + pub best_hash: Hash, + /// Peer best block number + pub best_number: Number, } impl fmt::Display for Health { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "{} peers ({})", self.peers, if self.is_syncing { - "syncing" - } else { "idle" }) - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!( + fmt, + "{} peers ({})", + self.peers, + if self.is_syncing { "syncing" } else { "idle" } + ) + } } #[cfg(test)] mod tests { - use super::*; + use super::*; - #[test] - fn should_serialize_health() { - assert_eq!( - ::serde_json::to_string(&Health { - peers: 1, - is_syncing: false, - should_have_peers: true, - }).unwrap(), - r#"{"peers":1,"isSyncing":false,"shouldHavePeers":true}"#, - ); - } + #[test] + fn should_serialize_health() { + assert_eq!( + ::serde_json::to_string(&Health { + peers: 1, + is_syncing: false, + should_have_peers: true, + }) + .unwrap(), + r#"{"peers":1,"isSyncing":false,"shouldHavePeers":true}"#, + ); + } - #[test] - fn should_serialize_peer_info() { - assert_eq!( - ::serde_json::to_string(&PeerInfo { - peer_id: "2".into(), - roles: "a".into(), - protocol_version: 2, - best_hash: 5u32, - best_number: 6u32, - }).unwrap(), - r#"{"peerId":"2","roles":"a","protocolVersion":2,"bestHash":5,"bestNumber":6}"#, - ); - } + #[test] + fn should_serialize_peer_info() { + assert_eq!( + ::serde_json::to_string(&PeerInfo { + peer_id: "2".into(), + roles: "a".into(), + protocol_version: 2, + best_hash: 5u32, + best_number: 6u32, + }) + .unwrap(), + r#"{"peerId":"2","roles":"a","protocolVersion":2,"bestHash":5,"bestNumber":6}"#, + ); + } } diff --git a/core/rpc/src/system/mod.rs b/core/rpc/src/system/mod.rs index 331d9cd85b..5775c298c8 100644 --- a/core/rpc/src/system/mod.rs +++ b/core/rpc/src/system/mod.rs @@ -22,111 +22,116 @@ mod helpers; #[cfg(test)] mod tests; -use std::sync::Arc; use jsonrpc_derive::rpc; use network; use runtime_primitives::traits::{self, Header as HeaderT}; +use std::sync::Arc; use self::error::Result; -pub use self::helpers::{Properties, SystemInfo, Health, PeerInfo}; +pub use self::helpers::{Health, PeerInfo, Properties, SystemInfo}; /// Substrate system RPC API #[rpc] pub trait SystemApi { - /// Get the node's implementation name. Plain old string. - #[rpc(name = "system_name")] - fn system_name(&self) -> Result; - - /// Get the node implementation's version. Should be a semver string. - #[rpc(name = "system_version")] - fn system_version(&self) -> Result; - - /// Get the chain's type. Given as a string identifier. - #[rpc(name = "system_chain")] - fn system_chain(&self) -> Result; - - /// Get a custom set of properties as a JSON object, defined in the chain spec. - #[rpc(name = "system_properties")] - fn system_properties(&self) -> Result; - - /// Return health status of the node. - /// - /// Node is considered healthy if it is: - /// - connected to some peers (unless running in dev mode) - /// - not performing a major sync - #[rpc(name = "system_health")] - fn system_health(&self) -> Result; - - /// Returns currently connected peers - #[rpc(name = "system_peers")] - fn system_peers(&self) -> Result>>; - - /// Returns current state of the network. - /// - /// **Warning**: This API is not stable. - // TODO: make this stable and move structs https://github.com/paritytech/substrate/issues/1890 - #[rpc(name = "system_networkState")] - fn system_network_state(&self) -> Result; + /// Get the node's implementation name. Plain old string. + #[rpc(name = "system_name")] + fn system_name(&self) -> Result; + + /// Get the node implementation's version. Should be a semver string. + #[rpc(name = "system_version")] + fn system_version(&self) -> Result; + + /// Get the chain's type. Given as a string identifier. + #[rpc(name = "system_chain")] + fn system_chain(&self) -> Result; + + /// Get a custom set of properties as a JSON object, defined in the chain spec. + #[rpc(name = "system_properties")] + fn system_properties(&self) -> Result; + + /// Return health status of the node. + /// + /// Node is considered healthy if it is: + /// - connected to some peers (unless running in dev mode) + /// - not performing a major sync + #[rpc(name = "system_health")] + fn system_health(&self) -> Result; + + /// Returns currently connected peers + #[rpc(name = "system_peers")] + fn system_peers(&self) -> Result>>; + + /// Returns current state of the network. + /// + /// **Warning**: This API is not stable. + // TODO: make this stable and move structs https://github.com/paritytech/substrate/issues/1890 + #[rpc(name = "system_networkState")] + fn system_network_state(&self) -> Result; } /// System API implementation pub struct System { - info: SystemInfo, - sync: Arc>, - should_have_peers: bool, + info: SystemInfo, + sync: Arc>, + should_have_peers: bool, } impl System { - /// Creates new `System` given the `SystemInfo`. - pub fn new( - info: SystemInfo, - sync: Arc>, - should_have_peers: bool, - ) -> Self { - System { - info, - should_have_peers, - sync, - } - } + /// Creates new `System` given the `SystemInfo`. + pub fn new( + info: SystemInfo, + sync: Arc>, + should_have_peers: bool, + ) -> Self { + System { + info, + should_have_peers, + sync, + } + } } impl SystemApi::Number> for System { - fn system_name(&self) -> Result { - Ok(self.info.impl_name.clone()) - } - - fn system_version(&self) -> Result { - Ok(self.info.impl_version.clone()) - } - - fn system_chain(&self) -> Result { - Ok(self.info.chain_name.clone()) - } - - fn system_properties(&self) -> Result { - Ok(self.info.properties.clone()) - } - - fn system_health(&self) -> Result { - Ok(Health { - peers: self.sync.peers().len(), - is_syncing: self.sync.is_major_syncing(), - should_have_peers: self.should_have_peers, - }) - } - - fn system_peers(&self) -> Result::Number>>> { - Ok(self.sync.peers().into_iter().map(|(peer_id, p)| PeerInfo { - peer_id: peer_id.to_base58(), - roles: format!("{:?}", p.roles), - protocol_version: p.protocol_version, - best_hash: p.best_hash, - best_number: p.best_number, - }).collect()) - } - - fn system_network_state(&self) -> Result { - Ok(self.sync.network_state()) - } + fn system_name(&self) -> Result { + Ok(self.info.impl_name.clone()) + } + + fn system_version(&self) -> Result { + Ok(self.info.impl_version.clone()) + } + + fn system_chain(&self) -> Result { + Ok(self.info.chain_name.clone()) + } + + fn system_properties(&self) -> Result { + Ok(self.info.properties.clone()) + } + + fn system_health(&self) -> Result { + Ok(Health { + peers: self.sync.peers().len(), + is_syncing: self.sync.is_major_syncing(), + should_have_peers: self.should_have_peers, + }) + } + + fn system_peers(&self) -> Result::Number>>> { + Ok(self + .sync + .peers() + .into_iter() + .map(|(peer_id, p)| PeerInfo { + peer_id: peer_id.to_base58(), + roles: format!("{:?}", p.roles), + protocol_version: p.protocol_version, + best_hash: p.best_hash, + best_number: p.best_number, + }) + .collect()) + } + + fn system_network_state(&self) -> Result { + Ok(self.sync.network_state()) + } } diff --git a/core/rpc/src/system/tests.rs b/core/rpc/src/system/tests.rs index b4b71a7937..b731e403cf 100644 --- a/core/rpc/src/system/tests.rs +++ b/core/rpc/src/system/tests.rs @@ -16,200 +16,203 @@ use super::*; -use network::{self, ProtocolStatus, PeerId, PeerInfo as NetworkPeerInfo}; -use network::config::Roles; -use test_client::runtime::Block; use assert_matches::assert_matches; use futures::sync::mpsc; +use network::config::Roles; +use network::{self, PeerId, PeerInfo as NetworkPeerInfo, ProtocolStatus}; +use test_client::runtime::Block; struct Status { - pub peers: usize, - pub is_syncing: bool, - pub is_dev: bool, - pub peer_id: PeerId, + pub peers: usize, + pub is_syncing: bool, + pub is_dev: bool, + pub peer_id: PeerId, } impl Default for Status { - fn default() -> Status { - Status { - peer_id: PeerId::random(), - peers: 0, - is_syncing: false, - is_dev: false, - } - } + fn default() -> Status { + Status { + peer_id: PeerId::random(), + peers: 0, + is_syncing: false, + is_dev: false, + } + } } impl network::SyncProvider for Status { - fn status(&self) -> mpsc::UnboundedReceiver> { - let (_sink, stream) = mpsc::unbounded(); - stream - } - - fn network_state(&self) -> network::NetworkState { - network::NetworkState { - peer_id: String::new(), - listened_addresses: Default::default(), - external_addresses: Default::default(), - connected_peers: Default::default(), - not_connected_peers: Default::default(), - average_download_per_sec: 0, - average_upload_per_sec: 0, - peerset: serde_json::Value::Null, - } - } - - fn peers(&self) -> Vec<(PeerId, NetworkPeerInfo)> { - let mut peers = vec![]; - for _peer in 0..self.peers { - peers.push( - (self.peer_id.clone(), NetworkPeerInfo { - roles: Roles::FULL, - protocol_version: 1, - best_hash: Default::default(), - best_number: 1 - }) - ); - } - peers - } - - fn is_major_syncing(&self) -> bool { - self.is_syncing - } + fn status(&self) -> mpsc::UnboundedReceiver> { + let (_sink, stream) = mpsc::unbounded(); + stream + } + + fn network_state(&self) -> network::NetworkState { + network::NetworkState { + peer_id: String::new(), + listened_addresses: Default::default(), + external_addresses: Default::default(), + connected_peers: Default::default(), + not_connected_peers: Default::default(), + average_download_per_sec: 0, + average_upload_per_sec: 0, + peerset: serde_json::Value::Null, + } + } + + fn peers(&self) -> Vec<(PeerId, NetworkPeerInfo)> { + let mut peers = vec![]; + for _peer in 0..self.peers { + peers.push(( + self.peer_id.clone(), + NetworkPeerInfo { + roles: Roles::FULL, + protocol_version: 1, + best_hash: Default::default(), + best_number: 1, + }, + )); + } + peers + } + + fn is_major_syncing(&self) -> bool { + self.is_syncing + } } - fn api>>(sync: T) -> System { - let status = sync.into().unwrap_or_default(); - let should_have_peers = !status.is_dev; - System::new(SystemInfo { - impl_name: "testclient".into(), - impl_version: "0.2.0".into(), - chain_name: "testchain".into(), - properties: Default::default(), - }, Arc::new(status), should_have_peers) + let status = sync.into().unwrap_or_default(); + let should_have_peers = !status.is_dev; + System::new( + SystemInfo { + impl_name: "testclient".into(), + impl_version: "0.2.0".into(), + chain_name: "testchain".into(), + properties: Default::default(), + }, + Arc::new(status), + should_have_peers, + ) } #[test] fn system_name_works() { - assert_eq!( - api(None).system_name().unwrap(), - "testclient".to_owned() - ); + assert_eq!(api(None).system_name().unwrap(), "testclient".to_owned()); } #[test] fn system_version_works() { - assert_eq!( - api(None).system_version().unwrap(), - "0.2.0".to_owned() - ); + assert_eq!(api(None).system_version().unwrap(), "0.2.0".to_owned()); } #[test] fn system_chain_works() { - assert_eq!( - api(None).system_chain().unwrap(), - "testchain".to_owned() - ); + assert_eq!(api(None).system_chain().unwrap(), "testchain".to_owned()); } #[test] fn system_properties_works() { - assert_eq!( - api(None).system_properties().unwrap(), - serde_json::map::Map::new() - ); + assert_eq!( + api(None).system_properties().unwrap(), + serde_json::map::Map::new() + ); } #[test] fn system_health() { - assert_matches!( - api(None).system_health().unwrap(), - Health { - peers: 0, - is_syncing: false, - should_have_peers: true, - } - ); - - assert_matches!( - api(Status { - peer_id: PeerId::random(), - peers: 5, - is_syncing: true, - is_dev: true, - }).system_health().unwrap(), - Health { - peers: 5, - is_syncing: true, - should_have_peers: false, - } - ); - - assert_eq!( - api(Status { - peer_id: PeerId::random(), - peers: 5, - is_syncing: false, - is_dev: false, - }).system_health().unwrap(), - Health { - peers: 5, - is_syncing: false, - should_have_peers: true, - } - ); - - assert_eq!( - api(Status { - peer_id: PeerId::random(), - peers: 0, - is_syncing: false, - is_dev: true, - }).system_health().unwrap(), - Health { - peers: 0, - is_syncing: false, - should_have_peers: false, - } - ); + assert_matches!( + api(None).system_health().unwrap(), + Health { + peers: 0, + is_syncing: false, + should_have_peers: true, + } + ); + + assert_matches!( + api(Status { + peer_id: PeerId::random(), + peers: 5, + is_syncing: true, + is_dev: true, + }) + .system_health() + .unwrap(), + Health { + peers: 5, + is_syncing: true, + should_have_peers: false, + } + ); + + assert_eq!( + api(Status { + peer_id: PeerId::random(), + peers: 5, + is_syncing: false, + is_dev: false, + }) + .system_health() + .unwrap(), + Health { + peers: 5, + is_syncing: false, + should_have_peers: true, + } + ); + + assert_eq!( + api(Status { + peer_id: PeerId::random(), + peers: 0, + is_syncing: false, + is_dev: true, + }) + .system_health() + .unwrap(), + Health { + peers: 0, + is_syncing: false, + should_have_peers: false, + } + ); } #[test] fn system_peers() { - let peer_id = PeerId::random(); - assert_eq!( - api(Status { - peer_id: peer_id.clone(), - peers: 1, - is_syncing: false, - is_dev: true, - }).system_peers().unwrap(), - vec![PeerInfo { - peer_id: peer_id.to_base58(), - roles: "FULL".into(), - protocol_version: 1, - best_hash: Default::default(), - best_number: 1u64, - }] - ); + let peer_id = PeerId::random(); + assert_eq!( + api(Status { + peer_id: peer_id.clone(), + peers: 1, + is_syncing: false, + is_dev: true, + }) + .system_peers() + .unwrap(), + vec![PeerInfo { + peer_id: peer_id.to_base58(), + roles: "FULL".into(), + protocol_version: 1, + best_hash: Default::default(), + best_number: 1u64, + }] + ); } #[test] fn system_network_state() { - assert_eq!( - api(None).system_network_state().unwrap(), - network::NetworkState { - peer_id: String::new(), - listened_addresses: Default::default(), - external_addresses: Default::default(), - connected_peers: Default::default(), - not_connected_peers: Default::default(), - average_download_per_sec: 0, - average_upload_per_sec: 0, - peerset: serde_json::Value::Null, - } - ); + assert_eq!( + api(None).system_network_state().unwrap(), + network::NetworkState { + peer_id: String::new(), + listened_addresses: Default::default(), + external_addresses: Default::default(), + connected_peers: Default::default(), + not_connected_peers: Default::default(), + average_download_per_sec: 0, + average_upload_per_sec: 0, + peerset: serde_json::Value::Null, + } + ); } diff --git a/core/serializer/src/lib.rs b/core/serializer/src/lib.rs index 2586d49f00..60270b6bad 100644 --- a/core/serializer/src/lib.rs +++ b/core/serializer/src/lib.rs @@ -21,21 +21,24 @@ #![warn(missing_docs)] -pub use serde_json::{from_str, from_slice, from_reader, Result, Error}; +pub use serde_json::{from_reader, from_slice, from_str, Error, Result}; const PROOF: &str = "Serializers are infallible; qed"; /// Serialize the given data structure as a pretty-printed String of JSON. pub fn to_string_pretty(value: &T) -> String { - serde_json::to_string_pretty(value).expect(PROOF) + serde_json::to_string_pretty(value).expect(PROOF) } /// Serialize the given data structure as a JSON byte vector. pub fn encode(value: &T) -> Vec { - serde_json::to_vec(value).expect(PROOF) + serde_json::to_vec(value).expect(PROOF) } /// Serialize the given data structure as JSON into the IO stream. -pub fn to_writer(writer: W, value: &T) -> Result<()> { - serde_json::to_writer(writer, value) +pub fn to_writer( + writer: W, + value: &T, +) -> Result<()> { + serde_json::to_writer(writer, value) } diff --git a/core/service/src/chain_ops.rs b/core/service/src/chain_ops.rs index 36cbee9039..a26b4a72f4 100644 --- a/core/service/src/chain_ops.rs +++ b/core/service/src/chain_ops.rs @@ -16,202 +16,218 @@ //! Chain utilities. -use std::{self, io::{Read, Write}}; use futures::Future; use log::{info, warn}; +use std::{ + self, + io::{Read, Write}, +}; -use runtime_primitives::generic::{SignedBlock, BlockId}; -use runtime_primitives::traits::{As, Block, Header, NumberFor}; use consensus_common::import_queue::{ImportQueue, IncomingBlock, Link}; use network::message; +use runtime_primitives::generic::{BlockId, SignedBlock}; +use runtime_primitives::traits::{As, Block, Header, NumberFor}; -use consensus_common::BlockOrigin; -use crate::components::{self, Components, ServiceFactory, FactoryFullConfiguration, FactoryBlockNumber, RuntimeGenesis}; +use crate::chain_spec::ChainSpec; +use crate::components::{ + self, Components, FactoryBlockNumber, FactoryFullConfiguration, RuntimeGenesis, ServiceFactory, +}; +use crate::error; use crate::new_client; +use consensus_common::BlockOrigin; use parity_codec::{Decode, Encode}; -use crate::error; -use crate::chain_spec::ChainSpec; /// Export a range of blocks to a binary stream. pub fn export_blocks( - config: FactoryFullConfiguration, - exit: E, - mut output: W, - from: FactoryBlockNumber, - to: Option>, - json: bool + config: FactoryFullConfiguration, + exit: E, + mut output: W, + from: FactoryBlockNumber, + to: Option>, + json: bool, ) -> error::Result<()> - where - F: ServiceFactory, - E: Future + Send + 'static, - W: Write, +where + F: ServiceFactory, + E: Future + Send + 'static, + W: Write, { - let client = new_client::(&config)?; - let mut block = from; - - let last = match to { - Some(v) if v == As::sa(0) => As::sa(1), - Some(v) => v, - None => client.info()?.chain.best_number, - }; - - if last < block { - return Err("Invalid block range specified".into()); - } - - let (exit_send, exit_recv) = std::sync::mpsc::channel(); - ::std::thread::spawn(move || { - let _ = exit.wait(); - let _ = exit_send.send(()); - }); - info!("Exporting blocks from #{} to #{}", block, last); - if !json { - let last_: u64 = last.as_(); - let block_: u64 = block.as_(); - let len: u64 = last_ - block_ + 1; - output.write(&len.encode())?; - } - - loop { - if exit_recv.try_recv().is_ok() { - break; - } - match client.block(&BlockId::number(block))? { - Some(block) => { - if json { - serde_json::to_writer(&mut output, &block) - .map_err(|e| format!("Error writing JSON: {}", e))?; - } else { - output.write(&block.encode())?; - } - }, - None => break, - } - if block.as_() % 10000 == 0 { - info!("#{}", block); - } - if block == last { - break; - } - block += As::sa(1); - } - Ok(()) + let client = new_client::(&config)?; + let mut block = from; + + let last = match to { + Some(v) if v == As::sa(0) => As::sa(1), + Some(v) => v, + None => client.info()?.chain.best_number, + }; + + if last < block { + return Err("Invalid block range specified".into()); + } + + let (exit_send, exit_recv) = std::sync::mpsc::channel(); + ::std::thread::spawn(move || { + let _ = exit.wait(); + let _ = exit_send.send(()); + }); + info!("Exporting blocks from #{} to #{}", block, last); + if !json { + let last_: u64 = last.as_(); + let block_: u64 = block.as_(); + let len: u64 = last_ - block_ + 1; + output.write(&len.encode())?; + } + + loop { + if exit_recv.try_recv().is_ok() { + break; + } + match client.block(&BlockId::number(block))? { + Some(block) => { + if json { + serde_json::to_writer(&mut output, &block) + .map_err(|e| format!("Error writing JSON: {}", e))?; + } else { + output.write(&block.encode())?; + } + } + None => break, + } + if block.as_() % 10000 == 0 { + info!("#{}", block); + } + if block == last { + break; + } + block += As::sa(1); + } + Ok(()) } struct WaitLink { - wait_send: std::sync::mpsc::Sender<()>, + wait_send: std::sync::mpsc::Sender<()>, } impl WaitLink { - fn new(wait_send: std::sync::mpsc::Sender<()>) -> WaitLink { - WaitLink { - wait_send, - } - } + fn new(wait_send: std::sync::mpsc::Sender<()>) -> WaitLink { + WaitLink { wait_send } + } } impl Link for WaitLink { - fn block_imported(&self, _hash: &B::Hash, _number: NumberFor) { - self.wait_send.send(()) + fn block_imported(&self, _hash: &B::Hash, _number: NumberFor) { + self.wait_send.send(()) .expect("Unable to notify main process; if the main process panicked then this thread would already be dead as well. qed."); - } + } } /// Import blocks from a binary stream. pub fn import_blocks( - mut config: FactoryFullConfiguration, - exit: E, - mut input: R + mut config: FactoryFullConfiguration, + exit: E, + mut input: R, ) -> error::Result<()> - where F: ServiceFactory, E: Future + Send + 'static, R: Read, +where + F: ServiceFactory, + E: Future + Send + 'static, + R: Read, { - let client = new_client::(&config)?; - // FIXME #1134 this shouldn't need a mutable config. - let queue = components::FullComponents::::build_import_queue(&mut config, client.clone())?; - - let (wait_send, wait_recv) = std::sync::mpsc::channel(); - let wait_link = WaitLink::new(wait_send); - queue.start(Box::new(wait_link))?; - - let (exit_send, exit_recv) = std::sync::mpsc::channel(); - ::std::thread::spawn(move || { - let _ = exit.wait(); - let _ = exit_send.send(()); - }); - - let count: u64 = Decode::decode(&mut input).ok_or("Error reading file")?; - info!("Importing {} blocks", count); - let mut block_count = 0; - for b in 0 .. count { - if exit_recv.try_recv().is_ok() { - break; - } - if let Some(signed) = SignedBlock::::decode(&mut input) { - let (header, extrinsics) = signed.block.deconstruct(); - let hash = header.hash(); - let block = message::BlockData:: { - hash: hash, - justification: signed.justification, - header: Some(header), - body: Some(extrinsics), - receipt: None, - message_queue: None - }; - // import queue handles verification and importing it into the client - queue.import_blocks(BlockOrigin::File, vec![ - IncomingBlock::{ - hash: block.hash, - header: block.header, - body: block.body, - justification: block.justification, - origin: None, - } - ]); - } else { - warn!("Error reading block data at {}.", b); - break; - } - - block_count = b; - if b % 1000 == 0 { - info!("#{}", b); - } - } - - let mut blocks_imported = 0; - while blocks_imported < count { - wait_recv.recv() + let client = new_client::(&config)?; + // FIXME #1134 this shouldn't need a mutable config. + let queue = components::FullComponents::::build_import_queue(&mut config, client.clone())?; + + let (wait_send, wait_recv) = std::sync::mpsc::channel(); + let wait_link = WaitLink::new(wait_send); + queue.start(Box::new(wait_link))?; + + let (exit_send, exit_recv) = std::sync::mpsc::channel(); + ::std::thread::spawn(move || { + let _ = exit.wait(); + let _ = exit_send.send(()); + }); + + let count: u64 = Decode::decode(&mut input).ok_or("Error reading file")?; + info!("Importing {} blocks", count); + let mut block_count = 0; + for b in 0..count { + if exit_recv.try_recv().is_ok() { + break; + } + if let Some(signed) = SignedBlock::::decode(&mut input) { + let (header, extrinsics) = signed.block.deconstruct(); + let hash = header.hash(); + let block = message::BlockData:: { + hash: hash, + justification: signed.justification, + header: Some(header), + body: Some(extrinsics), + receipt: None, + message_queue: None, + }; + // import queue handles verification and importing it into the client + queue.import_blocks( + BlockOrigin::File, + vec![IncomingBlock:: { + hash: block.hash, + header: block.header, + body: block.body, + justification: block.justification, + origin: None, + }], + ); + } else { + warn!("Error reading block data at {}.", b); + break; + } + + block_count = b; + if b % 1000 == 0 { + info!("#{}", b); + } + } + + let mut blocks_imported = 0; + while blocks_imported < count { + wait_recv.recv() .expect("Importing thread has panicked. Then the main process will die before this can be reached. qed."); - blocks_imported += 1; - } + blocks_imported += 1; + } - info!("Imported {} blocks. Best: #{}", block_count, client.info()?.chain.best_number); + info!( + "Imported {} blocks. Best: #{}", + block_count, + client.info()?.chain.best_number + ); - Ok(()) + Ok(()) } /// Revert the chain. pub fn revert_chain( - config: FactoryFullConfiguration, - blocks: FactoryBlockNumber + config: FactoryFullConfiguration, + blocks: FactoryBlockNumber, ) -> error::Result<()> - where F: ServiceFactory, +where + F: ServiceFactory, { - let client = new_client::(&config)?; - let reverted = client.revert(blocks)?; - let info = client.info()?.chain; - - if reverted.as_() == 0 { - info!("There aren't any non-finalized blocks to revert."); - } else { - info!("Reverted {} blocks. Best: #{} ({})", reverted, info.best_number, info.best_hash); - } - Ok(()) + let client = new_client::(&config)?; + let reverted = client.revert(blocks)?; + let info = client.info()?.chain; + + if reverted.as_() == 0 { + info!("There aren't any non-finalized blocks to revert."); + } else { + info!( + "Reverted {} blocks. Best: #{} ({})", + reverted, info.best_number, info.best_hash + ); + } + Ok(()) } /// Build a chain spec json pub fn build_spec(spec: ChainSpec, raw: bool) -> error::Result - where G: RuntimeGenesis, +where + G: RuntimeGenesis, { - Ok(spec.to_json(raw)?) + Ok(spec.to_json(raw)?) } diff --git a/core/service/src/chain_spec.rs b/core/service/src/chain_spec.rs index 78aad64dd0..3741fb9f12 100644 --- a/core/service/src/chain_spec.rs +++ b/core/service/src/chain_spec.rs @@ -16,85 +16,95 @@ //! Substrate chain configurations. +use crate::components::RuntimeGenesis; +use network::Multiaddr; +use primitives::storage::{StorageData, StorageKey}; +use runtime_primitives::{BuildStorage, ChildrenStorageOverlay, StorageOverlay}; +use serde_derive::{Deserialize, Serialize}; +use serde_json as json; use std::collections::HashMap; use std::fs::File; use std::path::PathBuf; -use serde_derive::{Serialize, Deserialize}; -use primitives::storage::{StorageKey, StorageData}; -use runtime_primitives::{BuildStorage, StorageOverlay, ChildrenStorageOverlay}; -use serde_json as json; -use crate::components::RuntimeGenesis; -use network::Multiaddr; use tel::TelemetryEndpoints; enum GenesisSource { - File(PathBuf), - Embedded(&'static [u8]), - Factory(fn() -> G), + File(PathBuf), + Embedded(&'static [u8]), + Factory(fn() -> G), } impl Clone for GenesisSource { - fn clone(&self) -> Self { - match *self { - GenesisSource::File(ref path) => GenesisSource::File(path.clone()), - GenesisSource::Embedded(d) => GenesisSource::Embedded(d), - GenesisSource::Factory(f) => GenesisSource::Factory(f), - } - } + fn clone(&self) -> Self { + match *self { + GenesisSource::File(ref path) => GenesisSource::File(path.clone()), + GenesisSource::Embedded(d) => GenesisSource::Embedded(d), + GenesisSource::Factory(f) => GenesisSource::Factory(f), + } + } } impl GenesisSource { - fn resolve(&self) -> Result, String> { - #[derive(Serialize, Deserialize)] - struct GenesisContainer { - genesis: Genesis, - } - - match *self { - GenesisSource::File(ref path) => { - let file = File::open(path).map_err(|e| format!("Error opening spec file: {}", e))?; - let genesis: GenesisContainer = json::from_reader(file).map_err(|e| format!("Error parsing spec file: {}", e))?; - Ok(genesis.genesis) - }, - GenesisSource::Embedded(buf) => { - let genesis: GenesisContainer = json::from_reader(buf).map_err(|e| format!("Error parsing embedded file: {}", e))?; - Ok(genesis.genesis) - }, - GenesisSource::Factory(f) => Ok(Genesis::Runtime(f())), - } - } + fn resolve(&self) -> Result, String> { + #[derive(Serialize, Deserialize)] + struct GenesisContainer { + genesis: Genesis, + } + + match *self { + GenesisSource::File(ref path) => { + let file = + File::open(path).map_err(|e| format!("Error opening spec file: {}", e))?; + let genesis: GenesisContainer = json::from_reader(file) + .map_err(|e| format!("Error parsing spec file: {}", e))?; + Ok(genesis.genesis) + } + GenesisSource::Embedded(buf) => { + let genesis: GenesisContainer = json::from_reader(buf) + .map_err(|e| format!("Error parsing embedded file: {}", e))?; + Ok(genesis.genesis) + } + GenesisSource::Factory(f) => Ok(Genesis::Runtime(f())), + } + } } impl<'a, G: RuntimeGenesis> BuildStorage for &'a ChainSpec { - fn build_storage(self) -> Result<(StorageOverlay, ChildrenStorageOverlay), String> { - match self.genesis.resolve()? { - Genesis::Runtime(gc) => gc.build_storage(), - Genesis::Raw(map) => Ok((map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), Default::default())), - } - } - fn assimilate_storage(self, _: &mut StorageOverlay, _: &mut ChildrenStorageOverlay) -> Result<(), String> { - Err("`assimilate_storage` not implemented for `ChainSpec`.".into()) - } + fn build_storage(self) -> Result<(StorageOverlay, ChildrenStorageOverlay), String> { + match self.genesis.resolve()? { + Genesis::Runtime(gc) => gc.build_storage(), + Genesis::Raw(map) => Ok(( + map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), + Default::default(), + )), + } + } + fn assimilate_storage( + self, + _: &mut StorageOverlay, + _: &mut ChildrenStorageOverlay, + ) -> Result<(), String> { + Err("`assimilate_storage` not implemented for `ChainSpec`.".into()) + } } #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] enum Genesis { - Runtime(G), - Raw(HashMap), + Runtime(G), + Raw(HashMap), } #[derive(Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] struct ChainSpecFile { - pub name: String, - pub id: String, - pub boot_nodes: Vec, - pub telemetry_endpoints: Option, - pub protocol_id: Option, - pub consensus_engine: Option, - pub properties: Option, + pub name: String, + pub id: String, + pub boot_nodes: Vec, + pub telemetry_endpoints: Option, + pub protocol_id: Option, + pub consensus_engine: Option, + pub properties: Option, } /// Arbitrary properties defined in chain spec as a JSON object @@ -102,130 +112,136 @@ pub type Properties = json::map::Map; /// A configuration of a chain. Can be used to build a genesis block. pub struct ChainSpec { - spec: ChainSpecFile, - genesis: GenesisSource, + spec: ChainSpecFile, + genesis: GenesisSource, } impl Clone for ChainSpec { - fn clone(&self) -> Self { - ChainSpec { - spec: self.spec.clone(), - genesis: self.genesis.clone(), - } - } + fn clone(&self) -> Self { + ChainSpec { + spec: self.spec.clone(), + genesis: self.genesis.clone(), + } + } } impl ChainSpec { - /// A list of bootnode addresses. - pub fn boot_nodes(&self) -> &[String] { - &self.spec.boot_nodes - } - - /// Spec name. - pub fn name(&self) -> &str { - &self.spec.name - } - - /// Spec id. - pub fn id(&self) -> &str { - &self.spec.id - } - - /// Telemetry endpoints (if any) - pub fn telemetry_endpoints(&self) -> &Option { - &self.spec.telemetry_endpoints - } - - /// Network protocol id. - pub fn protocol_id(&self) -> Option<&str> { - self.spec.protocol_id.as_ref().map(String::as_str) - } - - /// Name of the consensus engine. - pub fn consensus_engine(&self) -> Option<&str> { - self.spec.consensus_engine.as_ref().map(String::as_str) - } - - /// Additional loosly-typed properties of the chain. - pub fn properties(&self) -> Properties { - // Return an empty JSON object if 'properties' not defined in config - self.spec.properties.as_ref().unwrap_or(&json::map::Map::new()).clone() - } - - /// Add a bootnode to the list. - pub fn add_boot_node(&mut self, addr: Multiaddr) { - self.spec.boot_nodes.push(addr.to_string()) - } - - /// Parse json content into a `ChainSpec` - pub fn from_embedded(json: &'static [u8]) -> Result { - let spec = json::from_slice(json).map_err(|e| format!("Error parsing spec file: {}", e))?; - Ok(ChainSpec { - spec, - genesis: GenesisSource::Embedded(json), - }) - } - - /// Parse json file into a `ChainSpec` - pub fn from_json_file(path: PathBuf) -> Result { - let file = File::open(&path).map_err(|e| format!("Error opening spec file: {}", e))?; - let spec = json::from_reader(file).map_err(|e| format!("Error parsing spec file: {}", e))?; - Ok(ChainSpec { - spec, - genesis: GenesisSource::File(path), - }) - } - - /// Create hardcoded spec. - pub fn from_genesis( - name: &str, - id: &str, - constructor: fn() -> G, - boot_nodes: Vec, - telemetry_endpoints: Option, - protocol_id: Option<&str>, - consensus_engine: Option<&str>, - properties: Option, - ) -> Self - { - let spec = ChainSpecFile { - name: name.to_owned(), - id: id.to_owned(), - boot_nodes: boot_nodes, - telemetry_endpoints, - protocol_id: protocol_id.map(str::to_owned), - consensus_engine: consensus_engine.map(str::to_owned), - properties, - }; - ChainSpec { - spec, - genesis: GenesisSource::Factory(constructor), - } - } - - /// Dump to json string. - pub fn to_json(self, raw: bool) -> Result { - #[derive(Serialize, Deserialize)] - struct Container { - #[serde(flatten)] - spec: ChainSpecFile, - genesis: Genesis, - - }; - let genesis = match (raw, self.genesis.resolve()?) { - (true, Genesis::Runtime(g)) => { - let storage = g.build_storage()?.0.into_iter() - .map(|(k, v)| (StorageKey(k), StorageData(v))) - .collect(); - - Genesis::Raw(storage) - }, - (_, genesis) => genesis, - }; - let spec = Container { - spec: self.spec, - genesis, - }; - json::to_string_pretty(&spec).map_err(|e| format!("Error generating spec json: {}", e)) - } + /// A list of bootnode addresses. + pub fn boot_nodes(&self) -> &[String] { + &self.spec.boot_nodes + } + + /// Spec name. + pub fn name(&self) -> &str { + &self.spec.name + } + + /// Spec id. + pub fn id(&self) -> &str { + &self.spec.id + } + + /// Telemetry endpoints (if any) + pub fn telemetry_endpoints(&self) -> &Option { + &self.spec.telemetry_endpoints + } + + /// Network protocol id. + pub fn protocol_id(&self) -> Option<&str> { + self.spec.protocol_id.as_ref().map(String::as_str) + } + + /// Name of the consensus engine. + pub fn consensus_engine(&self) -> Option<&str> { + self.spec.consensus_engine.as_ref().map(String::as_str) + } + + /// Additional loosly-typed properties of the chain. + pub fn properties(&self) -> Properties { + // Return an empty JSON object if 'properties' not defined in config + self.spec + .properties + .as_ref() + .unwrap_or(&json::map::Map::new()) + .clone() + } + + /// Add a bootnode to the list. + pub fn add_boot_node(&mut self, addr: Multiaddr) { + self.spec.boot_nodes.push(addr.to_string()) + } + + /// Parse json content into a `ChainSpec` + pub fn from_embedded(json: &'static [u8]) -> Result { + let spec = json::from_slice(json).map_err(|e| format!("Error parsing spec file: {}", e))?; + Ok(ChainSpec { + spec, + genesis: GenesisSource::Embedded(json), + }) + } + + /// Parse json file into a `ChainSpec` + pub fn from_json_file(path: PathBuf) -> Result { + let file = File::open(&path).map_err(|e| format!("Error opening spec file: {}", e))?; + let spec = + json::from_reader(file).map_err(|e| format!("Error parsing spec file: {}", e))?; + Ok(ChainSpec { + spec, + genesis: GenesisSource::File(path), + }) + } + + /// Create hardcoded spec. + pub fn from_genesis( + name: &str, + id: &str, + constructor: fn() -> G, + boot_nodes: Vec, + telemetry_endpoints: Option, + protocol_id: Option<&str>, + consensus_engine: Option<&str>, + properties: Option, + ) -> Self { + let spec = ChainSpecFile { + name: name.to_owned(), + id: id.to_owned(), + boot_nodes: boot_nodes, + telemetry_endpoints, + protocol_id: protocol_id.map(str::to_owned), + consensus_engine: consensus_engine.map(str::to_owned), + properties, + }; + ChainSpec { + spec, + genesis: GenesisSource::Factory(constructor), + } + } + + /// Dump to json string. + pub fn to_json(self, raw: bool) -> Result { + #[derive(Serialize, Deserialize)] + struct Container { + #[serde(flatten)] + spec: ChainSpecFile, + genesis: Genesis, + }; + let genesis = match (raw, self.genesis.resolve()?) { + (true, Genesis::Runtime(g)) => { + let storage = g + .build_storage()? + .0 + .into_iter() + .map(|(k, v)| (StorageKey(k), StorageData(v))) + .collect(); + + Genesis::Raw(storage) + } + (_, genesis) => genesis, + }; + let spec = Container { + spec: self.spec, + genesis, + }; + json::to_string_pretty(&spec).map_err(|e| format!("Error generating spec json: {}", e)) + } } diff --git a/core/service/src/components.rs b/core/service/src/components.rs index eb37a69a14..b16d3ec7c2 100644 --- a/core/service/src/components.rs +++ b/core/service/src/components.rs @@ -16,29 +16,32 @@ //! Substrate service components. -use std::{sync::Arc, net::SocketAddr, marker::PhantomData, ops::Deref, ops::DerefMut}; -use serde::{Serialize, de::DeserializeOwned}; -use tokio::runtime::TaskExecutor; use crate::chain_spec::ChainSpec; +use crate::config::Configuration; +use crate::{error, maybe_start_server, Service}; +use client::{self, runtime_api, Client}; use client_db; -use client::{self, Client, runtime_api}; -use crate::{error, Service, maybe_start_server}; use consensus_common::import_queue::ImportQueue; use network::{self, OnDemand}; -use substrate_executor::{NativeExecutor, NativeExecutionDispatch}; -use transaction_pool::txpool::{self, Options as TransactionPoolOptions, Pool as TransactionPool}; -use runtime_primitives::{ - BuildStorage, traits::{Block as BlockT, Header as HeaderT, ProvideRuntimeApi}, generic::BlockId -}; -use crate::config::Configuration; +use parking_lot::Mutex; use primitives::{Blake2Hasher, H256}; use rpc::{self, apis::system::SystemInfo}; -use parking_lot::Mutex; +use runtime_primitives::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, ProvideRuntimeApi}, + BuildStorage, +}; +use serde::{de::DeserializeOwned, Serialize}; +use std::{marker::PhantomData, net::SocketAddr, ops::Deref, ops::DerefMut, sync::Arc}; +use substrate_executor::{NativeExecutionDispatch, NativeExecutor}; +use tokio::runtime::TaskExecutor; +use transaction_pool::txpool::{self, Options as TransactionPoolOptions, Pool as TransactionPool}; // Type aliases. // These exist mainly to avoid typing `::Foo` all over the code. /// Network service type for a factory. -pub type NetworkService = network::Service<::Block, ::NetworkProtocol>; +pub type NetworkService = + network::Service<::Block, ::NetworkProtocol>; /// Code executor type for a factory. pub type CodeExecutor = NativeExecutor<::RuntimeDispatch>; @@ -47,48 +50,56 @@ pub type CodeExecutor = NativeExecutor<::RuntimeDispatch pub type FullBackend = client_db::Backend<::Block>; /// Full client executor type for a factory. -pub type FullExecutor = client::LocalCallExecutor< - client_db::Backend<::Block>, - CodeExecutor, ->; +pub type FullExecutor = + client::LocalCallExecutor::Block>, CodeExecutor>; /// Light client backend type for a factory. pub type LightBackend = client::light::backend::Backend< - client_db::light::LightStorage<::Block>, - network::OnDemand<::Block>, - Blake2Hasher, + client_db::light::LightStorage<::Block>, + network::OnDemand<::Block>, + Blake2Hasher, >; /// Light client executor type for a factory. pub type LightExecutor = client::light::call_executor::RemoteOrLocalCallExecutor< - ::Block, - client::light::backend::Backend< - client_db::light::LightStorage<::Block>, - network::OnDemand<::Block>, - Blake2Hasher - >, - client::light::call_executor::RemoteCallExecutor< - client::light::blockchain::Blockchain< - client_db::light::LightStorage<::Block>, - network::OnDemand<::Block> - >, - network::OnDemand<::Block> - >, - client::LocalCallExecutor< - client::light::backend::Backend< - client_db::light::LightStorage<::Block>, - network::OnDemand<::Block>, - Blake2Hasher - >, - CodeExecutor - > + ::Block, + client::light::backend::Backend< + client_db::light::LightStorage<::Block>, + network::OnDemand<::Block>, + Blake2Hasher, + >, + client::light::call_executor::RemoteCallExecutor< + client::light::blockchain::Blockchain< + client_db::light::LightStorage<::Block>, + network::OnDemand<::Block>, + >, + network::OnDemand<::Block>, + >, + client::LocalCallExecutor< + client::light::backend::Backend< + client_db::light::LightStorage<::Block>, + network::OnDemand<::Block>, + Blake2Hasher, + >, + CodeExecutor, + >, >; /// Full client type for a factory. -pub type FullClient = Client, FullExecutor, ::Block, ::RuntimeApi>; +pub type FullClient = Client< + FullBackend, + FullExecutor, + ::Block, + ::RuntimeApi, +>; /// Light client type for a factory. -pub type LightClient = Client, LightExecutor, ::Block, ::RuntimeApi>; +pub type LightClient = Client< + LightBackend, + LightExecutor, + ::Block, + ::RuntimeApi, +>; /// `ChainSpec` specialization for a factory. pub type FactoryChainSpec = ChainSpec<::Genesis>; @@ -106,14 +117,15 @@ pub type FactoryExtrinsic = <::Block as BlockT>::Extrins pub type FactoryBlockNumber = < as BlockT>::Header as HeaderT>::Number; /// Full `Configuration` type for a factory. -pub type FactoryFullConfiguration = Configuration<::Configuration, FactoryGenesis>; +pub type FactoryFullConfiguration = + Configuration<::Configuration, FactoryGenesis>; /// Client type for `Components`. pub type ComponentClient = Client< - ::Backend, - ::Executor, - FactoryBlock<::Factory>, - ::RuntimeApi, + ::Backend, + ::Executor, + FactoryBlock<::Factory>, + ::RuntimeApi, >; /// Block type for `Components` @@ -134,469 +146,530 @@ impl RuntimeGenesis for T {} /// Something that can start the RPC service. pub trait StartRPC { - type ServersHandle: Send + Sync; - - fn start_rpc( - client: Arc>, - network: Arc>>, - should_have_peers: bool, - system_info: SystemInfo, - rpc_http: Option, - rpc_ws: Option, - task_executor: TaskExecutor, - transaction_pool: Arc>, - ) -> error::Result; + type ServersHandle: Send + Sync; + + fn start_rpc( + client: Arc>, + network: Arc>>, + should_have_peers: bool, + system_info: SystemInfo, + rpc_http: Option, + rpc_ws: Option, + task_executor: TaskExecutor, + transaction_pool: Arc>, + ) -> error::Result; } -impl StartRPC for C where - ComponentClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: runtime_api::Metadata>, +impl StartRPC for C +where + ComponentClient: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: runtime_api::Metadata>, { - type ServersHandle = (Option, Option>); - - fn start_rpc( - client: Arc>, - network: Arc>>, - should_have_peers: bool, - rpc_system_info: SystemInfo, - rpc_http: Option, - rpc_ws: Option, - task_executor: TaskExecutor, - transaction_pool: Arc>, - ) -> error::Result { - let handler = || { - let client = client.clone(); - let subscriptions = rpc::apis::Subscriptions::new(task_executor.clone()); - let chain = rpc::apis::chain::Chain::new(client.clone(), subscriptions.clone()); - let state = rpc::apis::state::State::new(client.clone(), subscriptions.clone()); - let author = rpc::apis::author::Author::new( - client.clone(), transaction_pool.clone(), subscriptions - ); - let system = rpc::apis::system::System::new( - rpc_system_info.clone(), network.clone(), should_have_peers - ); - rpc::rpc_handler::, ComponentExHash, _, _, _, _>( - state, - chain, - author, - system, - ) - }; - - Ok(( - maybe_start_server(rpc_http, |address| rpc::start_http(address, handler()))?, - maybe_start_server(rpc_ws, |address| rpc::start_ws(address, handler()))?.map(Mutex::new), - )) - } + type ServersHandle = (Option, Option>); + + fn start_rpc( + client: Arc>, + network: Arc>>, + should_have_peers: bool, + rpc_system_info: SystemInfo, + rpc_http: Option, + rpc_ws: Option, + task_executor: TaskExecutor, + transaction_pool: Arc>, + ) -> error::Result { + let handler = || { + let client = client.clone(); + let subscriptions = rpc::apis::Subscriptions::new(task_executor.clone()); + let chain = rpc::apis::chain::Chain::new(client.clone(), subscriptions.clone()); + let state = rpc::apis::state::State::new(client.clone(), subscriptions.clone()); + let author = rpc::apis::author::Author::new( + client.clone(), + transaction_pool.clone(), + subscriptions, + ); + let system = rpc::apis::system::System::new( + rpc_system_info.clone(), + network.clone(), + should_have_peers, + ); + rpc::rpc_handler::, ComponentExHash, _, _, _, _>( + state, chain, author, system, + ) + }; + + Ok(( + maybe_start_server(rpc_http, |address| rpc::start_http(address, handler()))?, + maybe_start_server(rpc_ws, |address| rpc::start_ws(address, handler()))? + .map(Mutex::new), + )) + } } /// Something that can maintain transaction pool on every imported block. pub trait MaintainTransactionPool { - fn maintain_transaction_pool( - id: &BlockId>, - client: &ComponentClient, - transaction_pool: &TransactionPool, - ) -> error::Result<()>; + fn maintain_transaction_pool( + id: &BlockId>, + client: &ComponentClient, + transaction_pool: &TransactionPool, + ) -> error::Result<()>; } fn maintain_transaction_pool( - id: &BlockId, - client: &Client, - transaction_pool: &TransactionPool, -) -> error::Result<()> where - Block: BlockT::Out>, - Backend: client::backend::Backend, - Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: runtime_api::TaggedTransactionQueue, - Executor: client::CallExecutor, - PoolApi: txpool::ChainApi, + id: &BlockId, + client: &Client, + transaction_pool: &TransactionPool, +) -> error::Result<()> +where + Block: BlockT::Out>, + Backend: client::backend::Backend, + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: + runtime_api::TaggedTransactionQueue, + Executor: client::CallExecutor, + PoolApi: txpool::ChainApi, { - // Avoid calling into runtime if there is nothing to prune from the pool anyway. - if transaction_pool.status().is_empty() { - return Ok(()) - } - - if let Some(block) = client.block(id)? { - let parent_id = BlockId::hash(*block.block.header().parent_hash()); - let extrinsics = block.block.extrinsics(); - transaction_pool.prune(id, &parent_id, extrinsics).map_err(|e| format!("{:?}", e))?; - } - - Ok(()) + // Avoid calling into runtime if there is nothing to prune from the pool anyway. + if transaction_pool.status().is_empty() { + return Ok(()); + } + + if let Some(block) = client.block(id)? { + let parent_id = BlockId::hash(*block.block.header().parent_hash()); + let extrinsics = block.block.extrinsics(); + transaction_pool + .prune(id, &parent_id, extrinsics) + .map_err(|e| format!("{:?}", e))?; + } + + Ok(()) } -impl MaintainTransactionPool for C where - ComponentClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: runtime_api::TaggedTransactionQueue>, +impl MaintainTransactionPool for C +where + ComponentClient: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: + runtime_api::TaggedTransactionQueue>, { - fn maintain_transaction_pool( - id: &BlockId>, - client: &ComponentClient, - transaction_pool: &TransactionPool, - ) -> error::Result<()> { - maintain_transaction_pool(id, client, transaction_pool) - } + fn maintain_transaction_pool( + id: &BlockId>, + client: &ComponentClient, + transaction_pool: &TransactionPool, + ) -> error::Result<()> { + maintain_transaction_pool(id, client, transaction_pool) + } } pub trait OffchainWorker { - fn offchain_workers( - number: &FactoryBlockNumber, - offchain: &offchain::OffchainWorkers, ComponentBlock>, - pool: &Arc>, - ) -> error::Result<()>; + fn offchain_workers( + number: &FactoryBlockNumber, + offchain: &offchain::OffchainWorkers, ComponentBlock>, + pool: &Arc>, + ) -> error::Result<()>; } -impl OffchainWorker for C where - ComponentClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: offchain::OffchainWorkerApi>, +impl OffchainWorker for C +where + ComponentClient: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: offchain::OffchainWorkerApi>, { - fn offchain_workers( - number: &FactoryBlockNumber, - offchain: &offchain::OffchainWorkers, ComponentBlock>, - pool: &Arc>, - ) -> error::Result<()> { - Ok(offchain.on_block_imported(number, pool)) - } + fn offchain_workers( + number: &FactoryBlockNumber, + offchain: &offchain::OffchainWorkers, ComponentBlock>, + pool: &Arc>, + ) -> error::Result<()> { + Ok(offchain.on_block_imported(number, pool)) + } } /// The super trait that combines all required traits a `Service` needs to implement. pub trait ServiceTrait: - Deref> - + Send - + Sync - + 'static - + StartRPC - + MaintainTransactionPool - + OffchainWorker -{} + Deref> + + Send + + Sync + + 'static + + StartRPC + + MaintainTransactionPool + + OffchainWorker +{ +} impl ServiceTrait for T where - T: Deref> - + Send - + Sync - + 'static - + StartRPC - + MaintainTransactionPool - + OffchainWorker -{} + T: Deref> + + Send + + Sync + + 'static + + StartRPC + + MaintainTransactionPool + + OffchainWorker +{ +} /// A collection of types and methods to build a service on top of the substrate service. pub trait ServiceFactory: 'static + Sized { - /// Block type. - type Block: BlockT; - /// The type that implements the runtime API. - type RuntimeApi: Send + Sync; - /// Network protocol extensions. - type NetworkProtocol: network::specialization::NetworkSpecialization; - /// Chain runtime. - type RuntimeDispatch: NativeExecutionDispatch + Send + Sync + 'static; - /// Extrinsic pool backend type for the full client. - type FullTransactionPoolApi: txpool::ChainApi::Hash, Block = Self::Block> + Send + 'static; - /// Extrinsic pool backend type for the light client. - type LightTransactionPoolApi: txpool::ChainApi::Hash, Block = Self::Block> + 'static; - /// Genesis configuration for the runtime. - type Genesis: RuntimeGenesis; - /// Other configuration for service members. - type Configuration: Default; - /// Extended full service type. - type FullService: ServiceTrait>; - /// Extended light service type. - type LightService: ServiceTrait>; - /// ImportQueue for full client - type FullImportQueue: consensus_common::import_queue::ImportQueue + 'static; - /// ImportQueue for light clients - type LightImportQueue: consensus_common::import_queue::ImportQueue + 'static; - - //TODO: replace these with a constructor trait. that TransactionPool implements. (#1242) - /// Extrinsic pool constructor for the full client. - fn build_full_transaction_pool(config: TransactionPoolOptions, client: Arc>) - -> Result, error::Error>; - /// Extrinsic pool constructor for the light client. - fn build_light_transaction_pool(config: TransactionPoolOptions, client: Arc>) - -> Result, error::Error>; - - /// Build network protocol. - fn build_network_protocol(config: &FactoryFullConfiguration) - -> Result; - - /// Build full service. - fn new_full(config: FactoryFullConfiguration, executor: TaskExecutor) - -> Result; - /// Build light service. - fn new_light(config: FactoryFullConfiguration, executor: TaskExecutor) - -> Result; - - /// ImportQueue for a full client - fn build_full_import_queue( - config: &mut FactoryFullConfiguration, - _client: Arc> - ) -> Result { - if let Some(name) = config.chain_spec.consensus_engine() { - match name { - _ => Err(format!("Chain Specification defines unknown consensus engine '{}'", name).into()) - } - - } else { - Err("Chain Specification doesn't contain any consensus_engine name".into()) - } - } - - /// ImportQueue for a light client - fn build_light_import_queue( - config: &mut FactoryFullConfiguration, - _client: Arc> - ) -> Result { - if let Some(name) = config.chain_spec.consensus_engine() { - match name { - _ => Err(format!("Chain Specification defines unknown consensus engine '{}'", name).into()) - } - - } else { - Err("Chain Specification doesn't contain any consensus_engine name".into()) - } - } + /// Block type. + type Block: BlockT; + /// The type that implements the runtime API. + type RuntimeApi: Send + Sync; + /// Network protocol extensions. + type NetworkProtocol: network::specialization::NetworkSpecialization; + /// Chain runtime. + type RuntimeDispatch: NativeExecutionDispatch + Send + Sync + 'static; + /// Extrinsic pool backend type for the full client. + type FullTransactionPoolApi: txpool::ChainApi< + Hash = ::Hash, + Block = Self::Block, + > + Send + + 'static; + /// Extrinsic pool backend type for the light client. + type LightTransactionPoolApi: txpool::ChainApi< + Hash = ::Hash, + Block = Self::Block, + > + 'static; + /// Genesis configuration for the runtime. + type Genesis: RuntimeGenesis; + /// Other configuration for service members. + type Configuration: Default; + /// Extended full service type. + type FullService: ServiceTrait>; + /// Extended light service type. + type LightService: ServiceTrait>; + /// ImportQueue for full client + type FullImportQueue: consensus_common::import_queue::ImportQueue + 'static; + /// ImportQueue for light clients + type LightImportQueue: consensus_common::import_queue::ImportQueue + 'static; + + //TODO: replace these with a constructor trait. that TransactionPool implements. (#1242) + /// Extrinsic pool constructor for the full client. + fn build_full_transaction_pool( + config: TransactionPoolOptions, + client: Arc>, + ) -> Result, error::Error>; + /// Extrinsic pool constructor for the light client. + fn build_light_transaction_pool( + config: TransactionPoolOptions, + client: Arc>, + ) -> Result, error::Error>; + + /// Build network protocol. + fn build_network_protocol( + config: &FactoryFullConfiguration, + ) -> Result; + + /// Build full service. + fn new_full( + config: FactoryFullConfiguration, + executor: TaskExecutor, + ) -> Result; + /// Build light service. + fn new_light( + config: FactoryFullConfiguration, + executor: TaskExecutor, + ) -> Result; + + /// ImportQueue for a full client + fn build_full_import_queue( + config: &mut FactoryFullConfiguration, + _client: Arc>, + ) -> Result { + if let Some(name) = config.chain_spec.consensus_engine() { + match name { + _ => Err(format!( + "Chain Specification defines unknown consensus engine '{}'", + name + ) + .into()), + } + } else { + Err("Chain Specification doesn't contain any consensus_engine name".into()) + } + } + + /// ImportQueue for a light client + fn build_light_import_queue( + config: &mut FactoryFullConfiguration, + _client: Arc>, + ) -> Result { + if let Some(name) = config.chain_spec.consensus_engine() { + match name { + _ => Err(format!( + "Chain Specification defines unknown consensus engine '{}'", + name + ) + .into()), + } + } else { + Err("Chain Specification doesn't contain any consensus_engine name".into()) + } + } } /// A collection of types and function to generalize over full / light client type. pub trait Components: Sized + 'static { - /// Associated service factory. - type Factory: ServiceFactory; - /// Client backend. - type Backend: 'static + client::backend::Backend, Blake2Hasher>; - /// Client executor. - type Executor: 'static + client::CallExecutor, Blake2Hasher> + Send + Sync + Clone; - /// The type that implements the runtime API. - type RuntimeApi: Send + Sync; - /// A type that can start all runtime-dependent services. - type RuntimeServices: ServiceTrait; - // TODO: Traitify transaction pool and allow people to implement their own. (#1242) - /// Extrinsic pool type. - type TransactionPoolApi: 'static + txpool::ChainApi< - Hash = as BlockT>::Hash, - Block = FactoryBlock - >; - /// Our Import Queue - type ImportQueue: ImportQueue> + 'static; - - /// Create client. - fn build_client( - config: &FactoryFullConfiguration, - executor: CodeExecutor, - ) -> Result< - ( - Arc>, - Option>>> - ), - error::Error - >; - - /// Create extrinsic pool. - fn build_transaction_pool(config: TransactionPoolOptions, client: Arc>) - -> Result, error::Error>; - - /// instance of import queue for clients - fn build_import_queue( - config: &mut FactoryFullConfiguration, - client: Arc> - ) -> Result; + /// Associated service factory. + type Factory: ServiceFactory; + /// Client backend. + type Backend: 'static + client::backend::Backend, Blake2Hasher>; + /// Client executor. + type Executor: 'static + + client::CallExecutor, Blake2Hasher> + + Send + + Sync + + Clone; + /// The type that implements the runtime API. + type RuntimeApi: Send + Sync; + /// A type that can start all runtime-dependent services. + type RuntimeServices: ServiceTrait; + // TODO: Traitify transaction pool and allow people to implement their own. (#1242) + /// Extrinsic pool type. + type TransactionPoolApi: 'static + + txpool::ChainApi< + Hash = as BlockT>::Hash, + Block = FactoryBlock, + >; + /// Our Import Queue + type ImportQueue: ImportQueue> + 'static; + + /// Create client. + fn build_client( + config: &FactoryFullConfiguration, + executor: CodeExecutor, + ) -> Result< + ( + Arc>, + Option>>>, + ), + error::Error, + >; + + /// Create extrinsic pool. + fn build_transaction_pool( + config: TransactionPoolOptions, + client: Arc>, + ) -> Result, error::Error>; + + /// instance of import queue for clients + fn build_import_queue( + config: &mut FactoryFullConfiguration, + client: Arc>, + ) -> Result; } /// A struct that implement `Components` for the full client. pub struct FullComponents { - _factory: PhantomData, - service: Service>, + _factory: PhantomData, + service: Service>, } impl FullComponents { - /// Create new `FullComponents` - pub fn new( - config: FactoryFullConfiguration, - task_executor: TaskExecutor - ) -> Result { - Ok( - Self { - _factory: Default::default(), - service: Service::new(config, task_executor)?, - } - ) - } + /// Create new `FullComponents` + pub fn new( + config: FactoryFullConfiguration, + task_executor: TaskExecutor, + ) -> Result { + Ok(Self { + _factory: Default::default(), + service: Service::new(config, task_executor)?, + }) + } } impl Deref for FullComponents { - type Target = Service; + type Target = Service; - fn deref(&self) -> &Self::Target { - &self.service - } + fn deref(&self) -> &Self::Target { + &self.service + } } impl DerefMut for FullComponents { - fn deref_mut(&mut self) -> &mut Service { - &mut self.service - } + fn deref_mut(&mut self) -> &mut Service { + &mut self.service + } } impl Components for FullComponents { - type Factory = Factory; - type Executor = FullExecutor; - type Backend = FullBackend; - type TransactionPoolApi = ::FullTransactionPoolApi; - type ImportQueue = Factory::FullImportQueue; - type RuntimeApi = Factory::RuntimeApi; - type RuntimeServices = Factory::FullService; - - fn build_client( - config: &FactoryFullConfiguration, - executor: CodeExecutor, - ) - -> Result<( - Arc>, - Option>>> - ), error::Error> - { - let db_settings = client_db::DatabaseSettings { - cache_size: config.database_cache_size.map(|u| u as usize), - path: config.database_path.as_str().into(), - pruning: config.pruning.clone(), - }; - Ok((Arc::new(client_db::new_client( - db_settings, - executor, - &config.chain_spec, - config.execution_strategies.clone(), - )?), None)) - } - - fn build_transaction_pool(config: TransactionPoolOptions, client: Arc>) - -> Result, error::Error> - { - Factory::build_full_transaction_pool(config, client) - } - - fn build_import_queue( - config: &mut FactoryFullConfiguration, - client: Arc> - ) -> Result { - Factory::build_full_import_queue(config, client) - } + type Factory = Factory; + type Executor = FullExecutor; + type Backend = FullBackend; + type TransactionPoolApi = ::FullTransactionPoolApi; + type ImportQueue = Factory::FullImportQueue; + type RuntimeApi = Factory::RuntimeApi; + type RuntimeServices = Factory::FullService; + + fn build_client( + config: &FactoryFullConfiguration, + executor: CodeExecutor, + ) -> Result< + ( + Arc>, + Option>>>, + ), + error::Error, + > { + let db_settings = client_db::DatabaseSettings { + cache_size: config.database_cache_size.map(|u| u as usize), + path: config.database_path.as_str().into(), + pruning: config.pruning.clone(), + }; + Ok(( + Arc::new(client_db::new_client( + db_settings, + executor, + &config.chain_spec, + config.execution_strategies.clone(), + )?), + None, + )) + } + + fn build_transaction_pool( + config: TransactionPoolOptions, + client: Arc>, + ) -> Result, error::Error> { + Factory::build_full_transaction_pool(config, client) + } + + fn build_import_queue( + config: &mut FactoryFullConfiguration, + client: Arc>, + ) -> Result { + Factory::build_full_import_queue(config, client) + } } /// A struct that implement `Components` for the light client. pub struct LightComponents { - _factory: PhantomData, - service: Service>, + _factory: PhantomData, + service: Service>, } impl LightComponents { - /// Create new `LightComponents` - pub fn new( - config: FactoryFullConfiguration, - task_executor: TaskExecutor - ) -> Result { - Ok( - Self { - _factory: Default::default(), - service: Service::new(config, task_executor)?, - } - ) - } + /// Create new `LightComponents` + pub fn new( + config: FactoryFullConfiguration, + task_executor: TaskExecutor, + ) -> Result { + Ok(Self { + _factory: Default::default(), + service: Service::new(config, task_executor)?, + }) + } } impl Deref for LightComponents { - type Target = Service; + type Target = Service; - fn deref(&self) -> &Self::Target { - &self.service - } + fn deref(&self) -> &Self::Target { + &self.service + } } impl Components for LightComponents { - type Factory = Factory; - type Executor = LightExecutor; - type Backend = LightBackend; - type TransactionPoolApi = ::LightTransactionPoolApi; - type ImportQueue = ::LightImportQueue; - type RuntimeApi = Factory::RuntimeApi; - type RuntimeServices = Factory::LightService; - - fn build_client( - config: &FactoryFullConfiguration, - executor: CodeExecutor, - ) - -> Result< - ( - Arc>, - Option>>> - ), error::Error> - { - let db_settings = client_db::DatabaseSettings { - cache_size: None, - path: config.database_path.as_str().into(), - pruning: config.pruning.clone(), - }; - let db_storage = client_db::light::LightStorage::new(db_settings)?; - let light_blockchain = client::light::new_light_blockchain(db_storage); - let fetch_checker = Arc::new(client::light::new_fetch_checker(light_blockchain.clone(), executor.clone())); - let fetcher = Arc::new(network::OnDemand::new(fetch_checker)); - let client_backend = client::light::new_light_backend(light_blockchain, fetcher.clone()); - let client = client::light::new_light(client_backend, fetcher.clone(), &config.chain_spec, executor)?; - Ok((Arc::new(client), Some(fetcher))) - } - - fn build_transaction_pool(config: TransactionPoolOptions, client: Arc>) - -> Result, error::Error> - { - Factory::build_light_transaction_pool(config, client) - } - - fn build_import_queue( - config: &mut FactoryFullConfiguration, - client: Arc> - ) -> Result { - Factory::build_light_import_queue(config, client) - } + type Factory = Factory; + type Executor = LightExecutor; + type Backend = LightBackend; + type TransactionPoolApi = ::LightTransactionPoolApi; + type ImportQueue = ::LightImportQueue; + type RuntimeApi = Factory::RuntimeApi; + type RuntimeServices = Factory::LightService; + + fn build_client( + config: &FactoryFullConfiguration, + executor: CodeExecutor, + ) -> Result< + ( + Arc>, + Option>>>, + ), + error::Error, + > { + let db_settings = client_db::DatabaseSettings { + cache_size: None, + path: config.database_path.as_str().into(), + pruning: config.pruning.clone(), + }; + let db_storage = client_db::light::LightStorage::new(db_settings)?; + let light_blockchain = client::light::new_light_blockchain(db_storage); + let fetch_checker = Arc::new(client::light::new_fetch_checker( + light_blockchain.clone(), + executor.clone(), + )); + let fetcher = Arc::new(network::OnDemand::new(fetch_checker)); + let client_backend = client::light::new_light_backend(light_blockchain, fetcher.clone()); + let client = client::light::new_light( + client_backend, + fetcher.clone(), + &config.chain_spec, + executor, + )?; + Ok((Arc::new(client), Some(fetcher))) + } + + fn build_transaction_pool( + config: TransactionPoolOptions, + client: Arc>, + ) -> Result, error::Error> { + Factory::build_light_transaction_pool(config, client) + } + + fn build_import_queue( + config: &mut FactoryFullConfiguration, + client: Arc>, + ) -> Result { + Factory::build_light_import_queue(config, client) + } } #[cfg(test)] mod tests { - use super::*; - use parity_codec::Encode; - use consensus_common::BlockOrigin; - use substrate_test_client::{self, TestClient, AccountKeyring, runtime::{Extrinsic, Transfer}}; - - #[test] - fn should_remove_transactions_from_the_pool() { - let client = Arc::new(substrate_test_client::new()); - let pool = TransactionPool::new(Default::default(), ::transaction_pool::ChainApi::new(client.clone())); - let transaction = { - let transfer = Transfer { - amount: 5, - nonce: 0, - from: AccountKeyring::Alice.into(), - to: Default::default(), - }; - let signature = AccountKeyring::from_public(&transfer.from).unwrap().sign(&transfer.encode()).into(); - Extrinsic::Transfer(transfer, signature) - }; - // store the transaction in the pool - pool.submit_one(&BlockId::hash(client.best_block_header().unwrap().hash()), transaction.clone()).unwrap(); - - // import the block - let mut builder = client.new_block().unwrap(); - builder.push(transaction.clone()).unwrap(); - let block = builder.bake().unwrap(); - let id = BlockId::hash(block.header().hash()); - client.import(BlockOrigin::Own, block).unwrap(); - - // fire notification - this should clean up the queue - assert_eq!(pool.status().ready, 1); - maintain_transaction_pool( - &id, - &client, - &pool, - ).unwrap(); - - // then - assert_eq!(pool.status().ready, 0); - assert_eq!(pool.status().future, 0); - } + use super::*; + use consensus_common::BlockOrigin; + use parity_codec::Encode; + use substrate_test_client::{ + self, + runtime::{Extrinsic, Transfer}, + AccountKeyring, TestClient, + }; + + #[test] + fn should_remove_transactions_from_the_pool() { + let client = Arc::new(substrate_test_client::new()); + let pool = TransactionPool::new( + Default::default(), + ::transaction_pool::ChainApi::new(client.clone()), + ); + let transaction = { + let transfer = Transfer { + amount: 5, + nonce: 0, + from: AccountKeyring::Alice.into(), + to: Default::default(), + }; + let signature = AccountKeyring::from_public(&transfer.from) + .unwrap() + .sign(&transfer.encode()) + .into(); + Extrinsic::Transfer(transfer, signature) + }; + // store the transaction in the pool + pool.submit_one( + &BlockId::hash(client.best_block_header().unwrap().hash()), + transaction.clone(), + ) + .unwrap(); + + // import the block + let mut builder = client.new_block().unwrap(); + builder.push(transaction.clone()).unwrap(); + let block = builder.bake().unwrap(); + let id = BlockId::hash(block.header().hash()); + client.import(BlockOrigin::Own, block).unwrap(); + + // fire notification - this should clean up the queue + assert_eq!(pool.status().ready, 1); + maintain_transaction_pool(&id, &client, &pool).unwrap(); + + // then + assert_eq!(pool.status().ready, 0); + assert_eq!(pool.status().future, 0); + } } diff --git a/core/service/src/config.rs b/core/service/src/config.rs index b7a3b8ba14..6a12a5027d 100644 --- a/core/service/src/config.rs +++ b/core/service/src/config.rs @@ -16,121 +16,126 @@ //! Service configuration. -use std::net::SocketAddr; -use transaction_pool; use crate::chain_spec::ChainSpec; pub use client::ExecutionStrategies; pub use client_db::PruningMode; pub use network::config::{NetworkConfiguration, Roles}; use runtime_primitives::BuildStorage; -use serde::{Serialize, de::DeserializeOwned}; +use serde::{de::DeserializeOwned, Serialize}; +use std::net::SocketAddr; use target_info::Target; use tel::TelemetryEndpoints; +use transaction_pool; /// Service configuration. #[derive(Clone)] pub struct Configuration { - /// Implementation name - pub impl_name: &'static str, - /// Implementation version - pub impl_version: &'static str, - /// Git commit if any. - pub impl_commit: &'static str, - /// Node roles. - pub roles: Roles, - /// Extrinsic pool configuration. - pub transaction_pool: transaction_pool::txpool::Options, - /// Network configuration. - pub network: NetworkConfiguration, - /// Path to key files. - pub keystore_path: String, - /// Path to the database. - pub database_path: String, - /// Cache Size for internal database in MiB - pub database_cache_size: Option, - /// Pruning settings. - pub pruning: PruningMode, - /// Additional key seeds. - pub keys: Vec, - /// Chain configuration. - pub chain_spec: ChainSpec, - /// Custom configuration. - pub custom: C, - /// Node name. - pub name: String, - /// Execution strategies. - pub execution_strategies: ExecutionStrategies, - /// RPC over HTTP binding address. `None` if disabled. - pub rpc_http: Option, - /// RPC over Websockets binding address. `None` if disabled. - pub rpc_ws: Option, - /// Telemetry service URL. `None` if disabled. - pub telemetry_endpoints: Option, - /// The default number of 64KB pages to allocate for Wasm execution - pub default_heap_pages: Option, - /// Should offchain workers be executed. - pub offchain_worker: bool, - /// Enable authoring even when offline. - pub force_authoring: bool, - /// Disable GRANDPA when running in validator mode - pub disable_grandpa: bool, + /// Implementation name + pub impl_name: &'static str, + /// Implementation version + pub impl_version: &'static str, + /// Git commit if any. + pub impl_commit: &'static str, + /// Node roles. + pub roles: Roles, + /// Extrinsic pool configuration. + pub transaction_pool: transaction_pool::txpool::Options, + /// Network configuration. + pub network: NetworkConfiguration, + /// Path to key files. + pub keystore_path: String, + /// Path to the database. + pub database_path: String, + /// Cache Size for internal database in MiB + pub database_cache_size: Option, + /// Pruning settings. + pub pruning: PruningMode, + /// Additional key seeds. + pub keys: Vec, + /// Chain configuration. + pub chain_spec: ChainSpec, + /// Custom configuration. + pub custom: C, + /// Node name. + pub name: String, + /// Execution strategies. + pub execution_strategies: ExecutionStrategies, + /// RPC over HTTP binding address. `None` if disabled. + pub rpc_http: Option, + /// RPC over Websockets binding address. `None` if disabled. + pub rpc_ws: Option, + /// Telemetry service URL. `None` if disabled. + pub telemetry_endpoints: Option, + /// The default number of 64KB pages to allocate for Wasm execution + pub default_heap_pages: Option, + /// Should offchain workers be executed. + pub offchain_worker: bool, + /// Enable authoring even when offline. + pub force_authoring: bool, + /// Disable GRANDPA when running in validator mode + pub disable_grandpa: bool, } impl Configuration { - /// Create default config for given chain spec. - pub fn default_with_spec(chain_spec: ChainSpec) -> Self { - let mut configuration = Configuration { - impl_name: "parity-substrate", - impl_version: "0.0.0", - impl_commit: "", - chain_spec, - name: Default::default(), - roles: Roles::FULL, - transaction_pool: Default::default(), - network: Default::default(), - keystore_path: Default::default(), - database_path: Default::default(), - database_cache_size: Default::default(), - keys: Default::default(), - custom: Default::default(), - pruning: PruningMode::default(), - execution_strategies: Default::default(), - rpc_http: None, - rpc_ws: None, - telemetry_endpoints: None, - default_heap_pages: None, - offchain_worker: Default::default(), - force_authoring: false, - disable_grandpa: false, - }; - configuration.network.boot_nodes = configuration.chain_spec.boot_nodes().to_vec(); + /// Create default config for given chain spec. + pub fn default_with_spec(chain_spec: ChainSpec) -> Self { + let mut configuration = Configuration { + impl_name: "parity-substrate", + impl_version: "0.0.0", + impl_commit: "", + chain_spec, + name: Default::default(), + roles: Roles::FULL, + transaction_pool: Default::default(), + network: Default::default(), + keystore_path: Default::default(), + database_path: Default::default(), + database_cache_size: Default::default(), + keys: Default::default(), + custom: Default::default(), + pruning: PruningMode::default(), + execution_strategies: Default::default(), + rpc_http: None, + rpc_ws: None, + telemetry_endpoints: None, + default_heap_pages: None, + offchain_worker: Default::default(), + force_authoring: false, + disable_grandpa: false, + }; + configuration.network.boot_nodes = configuration.chain_spec.boot_nodes().to_vec(); - configuration.telemetry_endpoints = configuration.chain_spec.telemetry_endpoints().clone(); + configuration.telemetry_endpoints = configuration.chain_spec.telemetry_endpoints().clone(); - configuration - } + configuration + } - /// Returns full version string of this configuration. - pub fn full_version(&self) -> String { - full_version_from_strs(self.impl_version, self.impl_commit) - } + /// Returns full version string of this configuration. + pub fn full_version(&self) -> String { + full_version_from_strs(self.impl_version, self.impl_commit) + } - /// Implementation id and version. - pub fn client_id(&self) -> String { - format!("{}/v{}", self.impl_name, self.full_version()) - } + /// Implementation id and version. + pub fn client_id(&self) -> String { + format!("{}/v{}", self.impl_name, self.full_version()) + } } /// Returns platform info pub fn platform() -> String { - let env = Target::env(); - let env_dash = if env.is_empty() { "" } else { "-" }; - format!("{}-{}{}{}", Target::arch(), Target::os(), env_dash, env) + let env = Target::env(); + let env_dash = if env.is_empty() { "" } else { "-" }; + format!("{}-{}{}{}", Target::arch(), Target::os(), env_dash, env) } /// Returns full version string, using supplied version and commit. pub fn full_version_from_strs(impl_version: &str, impl_commit: &str) -> String { - let commit_dash = if impl_commit.is_empty() { "" } else { "-" }; - format!("{}{}{}-{}", impl_version, commit_dash, impl_commit, platform()) + let commit_dash = if impl_commit.is_empty() { "" } else { "-" }; + format!( + "{}{}{}-{}", + impl_version, + commit_dash, + impl_commit, + platform() + ) } - diff --git a/core/service/src/error.rs b/core/service/src/error.rs index 4832efb2ab..3c5ab0ceb9 100644 --- a/core/service/src/error.rs +++ b/core/service/src/error.rs @@ -21,23 +21,23 @@ #![allow(deprecated)] use client; -use network; -use keystore; use consensus_common; use error_chain::*; +use keystore; +use network; error_chain! { - foreign_links { - Io(::std::io::Error) #[doc="IO error"]; - } - - links { - Client(client::error::Error, client::error::ErrorKind) #[doc="Client error"]; - Consensus(consensus_common::Error, consensus_common::ErrorKind) #[doc="Consesus error"]; - Network(network::error::Error, network::error::ErrorKind) #[doc="Network error"]; - Keystore(keystore::Error, keystore::ErrorKind) #[doc="Keystore error"]; - } - - errors { - } + foreign_links { + Io(::std::io::Error) #[doc="IO error"]; + } + + links { + Client(client::error::Error, client::error::ErrorKind) #[doc="Client error"]; + Consensus(consensus_common::Error, consensus_common::ErrorKind) #[doc="Consesus error"]; + Network(network::error::Error, network::error::ErrorKind) #[doc="Network error"]; + Keystore(keystore::Error, keystore::ErrorKind) #[doc="Keystore error"]; + } + + errors { + } } diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index 105bab8ca0..8efb308876 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -19,472 +19,503 @@ #![warn(missing_docs)] -mod components; -mod error; +pub mod chain_ops; mod chain_spec; +mod components; pub mod config; -pub mod chain_ops; +mod error; +use std::collections::HashMap; use std::io; use std::net::SocketAddr; -use std::collections::HashMap; use client::BlockchainEvents; use exit_future::Signal; use futures::prelude::*; use inherents::pool::InherentsPool; use keystore::Store as Keystore; -use log::{info, warn, debug}; -use parity_codec::{Encode, Decode}; +use log::{debug, info, warn}; +use parity_codec::{Decode, Encode}; use primitives::Pair; use runtime_primitives::generic::BlockId; -use runtime_primitives::traits::{Header, As}; +use runtime_primitives::traits::{As, Header}; use substrate_executor::NativeExecutor; use tel::{telemetry, SUBSTRATE_INFO}; -pub use self::error::{ErrorKind, Error}; -pub use config::{Configuration, Roles, PruningMode}; +pub use self::error::{Error, ErrorKind}; pub use chain_spec::{ChainSpec, Properties}; -pub use transaction_pool::txpool::{ - self, Pool as TransactionPool, Options as TransactionPoolOptions, ChainApi, IntoPoolError -}; use client::runtime_api::BlockT; pub use client::FinalityNotifications; +pub use config::{Configuration, PruningMode, Roles}; +pub use transaction_pool::txpool::{ + self, ChainApi, IntoPoolError, Options as TransactionPoolOptions, Pool as TransactionPool, +}; -pub use components::{ServiceFactory, FullBackend, FullExecutor, LightBackend, - LightExecutor, Components, PoolApi, ComponentClient, - ComponentBlock, FullClient, LightClient, FullComponents, LightComponents, - CodeExecutor, NetworkService, FactoryChainSpec, FactoryBlock, - FactoryFullConfiguration, RuntimeGenesis, FactoryGenesis, - ComponentExHash, ComponentExtrinsic, FactoryExtrinsic +pub use components::{ + CodeExecutor, ComponentBlock, ComponentClient, ComponentExHash, ComponentExtrinsic, Components, + FactoryBlock, FactoryChainSpec, FactoryExtrinsic, FactoryFullConfiguration, FactoryGenesis, + FullBackend, FullClient, FullComponents, FullExecutor, LightBackend, LightClient, + LightComponents, LightExecutor, NetworkService, PoolApi, RuntimeGenesis, ServiceFactory, }; -use components::{StartRPC, MaintainTransactionPool, OffchainWorker}; -#[doc(hidden)] -pub use std::{ops::Deref, result::Result, sync::Arc}; +use components::{MaintainTransactionPool, OffchainWorker, StartRPC}; #[doc(hidden)] pub use network::OnDemand; #[doc(hidden)] +pub use std::{ops::Deref, result::Result, sync::Arc}; +#[doc(hidden)] pub use tokio::runtime::TaskExecutor; const DEFAULT_PROTOCOL_ID: &str = "sup"; /// Substrate service. pub struct Service { - client: Arc>, - network: Option>>, - transaction_pool: Arc>, - inherents_pool: Arc>>, - keystore: Keystore, - exit: ::exit_future::Exit, - signal: Option, - /// Configuration of this Service - pub config: FactoryFullConfiguration, - _rpc: Box<::std::any::Any + Send + Sync>, - _telemetry: Option>, - _offchain_workers: Option, ComponentBlock>>>, + client: Arc>, + network: Option>>, + transaction_pool: Arc>, + inherents_pool: Arc>>, + keystore: Keystore, + exit: ::exit_future::Exit, + signal: Option, + /// Configuration of this Service + pub config: FactoryFullConfiguration, + _rpc: Box<::std::any::Any + Send + Sync>, + _telemetry: Option>, + _offchain_workers: Option< + Arc, ComponentBlock>>, + >, } /// Creates bare client without any networking. -pub fn new_client(config: &FactoryFullConfiguration) - -> Result>>, error::Error> -{ - let executor = NativeExecutor::new(config.default_heap_pages); - let (client, _) = components::FullComponents::::build_client( - config, - executor, - )?; - Ok(client) +pub fn new_client( + config: &FactoryFullConfiguration, +) -> Result>>, error::Error> { + let executor = NativeExecutor::new(config.default_heap_pages); + let (client, _) = components::FullComponents::::build_client(config, executor)?; + Ok(client) } impl Service { - /// Creates a new service. - pub fn new( - mut config: FactoryFullConfiguration, - task_executor: TaskExecutor, - ) -> Result { - let (signal, exit) = ::exit_future::signal(); - - // Create client - let executor = NativeExecutor::new(config.default_heap_pages); - - let mut keystore = Keystore::open(config.keystore_path.as_str().into())?; - - // This is meant to be for testing only - // FIXME #1063 remove this - for seed in &config.keys { - keystore.generate_from_seed(seed)?; - } - // Keep the public key for telemetry - let public_key = match keystore.contents()?.get(0) { - Some(public_key) => public_key.clone(), - None => { - let key = keystore.generate("")?; - let public_key = key.public(); - info!("Generated a new keypair: {:?}", public_key); - - public_key - } - }; - - let (client, on_demand) = Components::build_client(&config, executor)?; - let import_queue = Box::new(Components::build_import_queue(&mut config, client.clone())?); - let best_header = client.best_block_header()?; - - let version = config.full_version(); - info!("Best block: #{}", best_header.number()); - telemetry!(SUBSTRATE_INFO; "node.start"; "height" => best_header.number().as_(), "best" => ?best_header.hash()); - - let network_protocol = ::build_network_protocol(&config)?; - let transaction_pool = Arc::new( - Components::build_transaction_pool(config.transaction_pool.clone(), client.clone())? - ); - let transaction_pool_adapter = Arc::new(TransactionPoolAdapter:: { - imports_external_transactions: !(config.roles == Roles::LIGHT), - pool: transaction_pool.clone(), - client: client.clone(), - }); - - let network_params = network::config::Params { - config: network::config::ProtocolConfig { roles: config.roles }, - network_config: config.network.clone(), - chain: client.clone(), - on_demand: on_demand.as_ref().map(|d| d.clone() as _), - transaction_pool: transaction_pool_adapter.clone() as _, - specialization: network_protocol, - }; - - let protocol_id = { - let protocol_id_full = config.chain_spec.protocol_id().unwrap_or(DEFAULT_PROTOCOL_ID).as_bytes(); - let mut protocol_id = network::ProtocolId::default(); - if protocol_id_full.len() > protocol_id.len() { - warn!("Protocol ID truncated to {} chars", protocol_id.len()); - } - let id_len = protocol_id_full.len().min(protocol_id.len()); - &mut protocol_id[0..id_len].copy_from_slice(&protocol_id_full[0..id_len]); - protocol_id - }; - - let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); - let (network, network_chan) = network::Service::new( - network_params, - protocol_id, - import_queue - )?; - on_demand.map(|on_demand| on_demand.set_network_sender(network_chan)); - - let inherents_pool = Arc::new(InherentsPool::default()); - let offchain_workers = if config.offchain_worker { - Some(Arc::new(offchain::OffchainWorkers::new( - client.clone(), - inherents_pool.clone(), - task_executor.clone(), - ))) - } else { - None - }; - - { - // block notifications - let network = Arc::downgrade(&network); - let txpool = Arc::downgrade(&transaction_pool); - let wclient = Arc::downgrade(&client); - let offchain = offchain_workers.as_ref().map(Arc::downgrade); - - let events = client.import_notification_stream() - .for_each(move |notification| { - let number = *notification.header.number(); - - if let Some(network) = network.upgrade() { - network.on_block_imported(notification.hash, notification.header); - } - - if let (Some(txpool), Some(client)) = (txpool.upgrade(), wclient.upgrade()) { - Components::RuntimeServices::maintain_transaction_pool( - &BlockId::hash(notification.hash), - &*client, - &*txpool, - ).map_err(|e| warn!("Pool error processing new block: {:?}", e))?; - } - - if let (Some(txpool), Some(offchain)) = (txpool.upgrade(), offchain.as_ref().and_then(|o| o.upgrade())) { - Components::RuntimeServices::offchain_workers( - &number, - &offchain, - &txpool, - ).map_err(|e| warn!("Offchain workers error processing new block: {:?}", e))?; - } - - Ok(()) - }) - .select(exit.clone()) - .then(|_| Ok(())); - task_executor.spawn(events); - } - - { - // finality notifications - let network = Arc::downgrade(&network); - - // A utility stream that drops all ready items and only returns the last one. - // This is used to only keep the last finality notification and avoid - // overloading the sync module with notifications. - struct MostRecentNotification(futures::stream::Fuse>); - - impl Stream for MostRecentNotification { - type Item = as Stream>::Item; - type Error = as Stream>::Error; - - fn poll(&mut self) -> Poll, Self::Error> { - let mut last = None; - let last = loop { - match self.0.poll()? { - Async::Ready(Some(item)) => { last = Some(item) } - Async::Ready(None) => match last { - None => return Ok(Async::Ready(None)), - Some(last) => break last, - }, - Async::NotReady => match last { - None => return Ok(Async::NotReady), - Some(last) => break last, - }, - } - }; - - Ok(Async::Ready(Some(last))) - } - } - - let events = MostRecentNotification(client.finality_notification_stream().fuse()) - .for_each(move |notification| { - if let Some(network) = network.upgrade() { - network.on_block_finalized(notification.hash, notification.header); - } - Ok(()) - }) - .select(exit.clone()) - .then(|_| Ok(())); - - task_executor.spawn(events); - } - - { - // extrinsic notifications - let network = Arc::downgrade(&network); - let events = transaction_pool.import_notification_stream() - .for_each(move |_| { - if let Some(network) = network.upgrade() { - network.trigger_repropagate(); - } - Ok(()) - }) - .select(exit.clone()) - .then(|_| Ok(())); - - task_executor.spawn(events); - } - - - // RPC - let system_info = rpc::apis::system::SystemInfo { - chain_name: config.chain_spec.name().into(), - impl_name: config.impl_name.into(), - impl_version: config.impl_version.into(), - properties: config.chain_spec.properties(), - }; - let rpc = Components::RuntimeServices::start_rpc( - client.clone(), network.clone(), has_bootnodes, system_info, config.rpc_http, - config.rpc_ws, task_executor.clone(), transaction_pool.clone(), - )?; - - // Telemetry - let telemetry = config.telemetry_endpoints.clone().map(|endpoints| { - let is_authority = config.roles == Roles::AUTHORITY; - let network_id = network.local_peer_id().to_base58(); - let pubkey = format!("{}", public_key); - let name = config.name.clone(); - let impl_name = config.impl_name.to_owned(); - let version = version.clone(); - let chain_name = config.chain_spec.name().to_owned(); - Arc::new(tel::init_telemetry(tel::TelemetryConfig { - endpoints, - on_connect: Box::new(move || { - telemetry!(SUBSTRATE_INFO; "system.connected"; - "name" => name.clone(), - "implementation" => impl_name.clone(), - "version" => version.clone(), - "config" => "", - "chain" => chain_name.clone(), - "pubkey" => &pubkey, - "authority" => is_authority, - "network_id" => network_id.clone() - ); - }), - })) - }); - - Ok(Service { - client, - network: Some(network), - transaction_pool, - inherents_pool, - signal: Some(signal), - keystore, - config, - exit, - _rpc: Box::new(rpc), - _telemetry: telemetry, - _offchain_workers: offchain_workers, - }) - } - - /// give the authority key, if we are an authority and have a key - pub fn authority_key(&self) -> Option { - if self.config.roles != Roles::AUTHORITY { return None } - let keystore = &self.keystore; - if let Ok(Some(Ok(key))) = keystore.contents().map(|keys| keys.get(0) - .map(|k| keystore.load(k, ""))) - { - Some(key) - } else { - None - } - } - - /// return a shared instance of Telemtry (if enabled) - pub fn telemetry(&self) -> Option> { - self._telemetry.as_ref().map(|t| t.clone()) - } + /// Creates a new service. + pub fn new( + mut config: FactoryFullConfiguration, + task_executor: TaskExecutor, + ) -> Result { + let (signal, exit) = ::exit_future::signal(); + + // Create client + let executor = NativeExecutor::new(config.default_heap_pages); + + let mut keystore = Keystore::open(config.keystore_path.as_str().into())?; + + // This is meant to be for testing only + // FIXME #1063 remove this + for seed in &config.keys { + keystore.generate_from_seed(seed)?; + } + // Keep the public key for telemetry + let public_key = match keystore.contents()?.get(0) { + Some(public_key) => public_key.clone(), + None => { + let key = keystore.generate("")?; + let public_key = key.public(); + info!("Generated a new keypair: {:?}", public_key); + + public_key + } + }; + + let (client, on_demand) = Components::build_client(&config, executor)?; + let import_queue = Box::new(Components::build_import_queue(&mut config, client.clone())?); + let best_header = client.best_block_header()?; + + let version = config.full_version(); + info!("Best block: #{}", best_header.number()); + telemetry!(SUBSTRATE_INFO; "node.start"; "height" => best_header.number().as_(), "best" => ?best_header.hash()); + + let network_protocol = ::build_network_protocol(&config)?; + let transaction_pool = Arc::new(Components::build_transaction_pool( + config.transaction_pool.clone(), + client.clone(), + )?); + let transaction_pool_adapter = Arc::new(TransactionPoolAdapter:: { + imports_external_transactions: !(config.roles == Roles::LIGHT), + pool: transaction_pool.clone(), + client: client.clone(), + }); + + let network_params = network::config::Params { + config: network::config::ProtocolConfig { + roles: config.roles, + }, + network_config: config.network.clone(), + chain: client.clone(), + on_demand: on_demand.as_ref().map(|d| d.clone() as _), + transaction_pool: transaction_pool_adapter.clone() as _, + specialization: network_protocol, + }; + + let protocol_id = { + let protocol_id_full = config + .chain_spec + .protocol_id() + .unwrap_or(DEFAULT_PROTOCOL_ID) + .as_bytes(); + let mut protocol_id = network::ProtocolId::default(); + if protocol_id_full.len() > protocol_id.len() { + warn!("Protocol ID truncated to {} chars", protocol_id.len()); + } + let id_len = protocol_id_full.len().min(protocol_id.len()); + &mut protocol_id[0..id_len].copy_from_slice(&protocol_id_full[0..id_len]); + protocol_id + }; + + let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); + let (network, network_chan) = + network::Service::new(network_params, protocol_id, import_queue)?; + on_demand.map(|on_demand| on_demand.set_network_sender(network_chan)); + + let inherents_pool = Arc::new(InherentsPool::default()); + let offchain_workers = if config.offchain_worker { + Some(Arc::new(offchain::OffchainWorkers::new( + client.clone(), + inherents_pool.clone(), + task_executor.clone(), + ))) + } else { + None + }; + + { + // block notifications + let network = Arc::downgrade(&network); + let txpool = Arc::downgrade(&transaction_pool); + let wclient = Arc::downgrade(&client); + let offchain = offchain_workers.as_ref().map(Arc::downgrade); + + let events = client + .import_notification_stream() + .for_each(move |notification| { + let number = *notification.header.number(); + + if let Some(network) = network.upgrade() { + network.on_block_imported(notification.hash, notification.header); + } + + if let (Some(txpool), Some(client)) = (txpool.upgrade(), wclient.upgrade()) { + Components::RuntimeServices::maintain_transaction_pool( + &BlockId::hash(notification.hash), + &*client, + &*txpool, + ) + .map_err(|e| warn!("Pool error processing new block: {:?}", e))?; + } + + if let (Some(txpool), Some(offchain)) = ( + txpool.upgrade(), + offchain.as_ref().and_then(|o| o.upgrade()), + ) { + Components::RuntimeServices::offchain_workers(&number, &offchain, &txpool) + .map_err(|e| { + warn!("Offchain workers error processing new block: {:?}", e) + })?; + } + + Ok(()) + }) + .select(exit.clone()) + .then(|_| Ok(())); + task_executor.spawn(events); + } + + { + // finality notifications + let network = Arc::downgrade(&network); + + // A utility stream that drops all ready items and only returns the last one. + // This is used to only keep the last finality notification and avoid + // overloading the sync module with notifications. + struct MostRecentNotification( + futures::stream::Fuse>, + ); + + impl Stream for MostRecentNotification { + type Item = as Stream>::Item; + type Error = as Stream>::Error; + + fn poll(&mut self) -> Poll, Self::Error> { + let mut last = None; + let last = loop { + match self.0.poll()? { + Async::Ready(Some(item)) => last = Some(item), + Async::Ready(None) => match last { + None => return Ok(Async::Ready(None)), + Some(last) => break last, + }, + Async::NotReady => match last { + None => return Ok(Async::NotReady), + Some(last) => break last, + }, + } + }; + + Ok(Async::Ready(Some(last))) + } + } + + let events = MostRecentNotification(client.finality_notification_stream().fuse()) + .for_each(move |notification| { + if let Some(network) = network.upgrade() { + network.on_block_finalized(notification.hash, notification.header); + } + Ok(()) + }) + .select(exit.clone()) + .then(|_| Ok(())); + + task_executor.spawn(events); + } + + { + // extrinsic notifications + let network = Arc::downgrade(&network); + let events = transaction_pool + .import_notification_stream() + .for_each(move |_| { + if let Some(network) = network.upgrade() { + network.trigger_repropagate(); + } + Ok(()) + }) + .select(exit.clone()) + .then(|_| Ok(())); + + task_executor.spawn(events); + } + + // RPC + let system_info = rpc::apis::system::SystemInfo { + chain_name: config.chain_spec.name().into(), + impl_name: config.impl_name.into(), + impl_version: config.impl_version.into(), + properties: config.chain_spec.properties(), + }; + let rpc = Components::RuntimeServices::start_rpc( + client.clone(), + network.clone(), + has_bootnodes, + system_info, + config.rpc_http, + config.rpc_ws, + task_executor.clone(), + transaction_pool.clone(), + )?; + + // Telemetry + let telemetry = config.telemetry_endpoints.clone().map(|endpoints| { + let is_authority = config.roles == Roles::AUTHORITY; + let network_id = network.local_peer_id().to_base58(); + let pubkey = format!("{}", public_key); + let name = config.name.clone(); + let impl_name = config.impl_name.to_owned(); + let version = version.clone(); + let chain_name = config.chain_spec.name().to_owned(); + Arc::new(tel::init_telemetry(tel::TelemetryConfig { + endpoints, + on_connect: Box::new(move || { + telemetry!(SUBSTRATE_INFO; "system.connected"; + "name" => name.clone(), + "implementation" => impl_name.clone(), + "version" => version.clone(), + "config" => "", + "chain" => chain_name.clone(), + "pubkey" => &pubkey, + "authority" => is_authority, + "network_id" => network_id.clone() + ); + }), + })) + }); + + Ok(Service { + client, + network: Some(network), + transaction_pool, + inherents_pool, + signal: Some(signal), + keystore, + config, + exit, + _rpc: Box::new(rpc), + _telemetry: telemetry, + _offchain_workers: offchain_workers, + }) + } + + /// give the authority key, if we are an authority and have a key + pub fn authority_key(&self) -> Option { + if self.config.roles != Roles::AUTHORITY { + return None; + } + let keystore = &self.keystore; + if let Ok(Some(Ok(key))) = keystore + .contents() + .map(|keys| keys.get(0).map(|k| keystore.load(k, ""))) + { + Some(key) + } else { + None + } + } + + /// return a shared instance of Telemtry (if enabled) + pub fn telemetry(&self) -> Option> { + self._telemetry.as_ref().map(|t| t.clone()) + } } -impl Service where Components: components::Components { - /// Get shared client instance. - pub fn client(&self) -> Arc> { - self.client.clone() - } - - /// Get shared network instance. - pub fn network(&self) -> Arc> { - self.network.as_ref().expect("self.network always Some").clone() - } - - /// Get shared transaction pool instance. - pub fn transaction_pool(&self) -> Arc> { - self.transaction_pool.clone() - } - - /// Get shared inherents pool instance. - pub fn inherents_pool(&self) -> Arc>> { - self.inherents_pool.clone() - } - - /// Get shared keystore. - pub fn keystore(&self) -> &Keystore { - &self.keystore - } - - /// Get a handle to a future that will resolve on exit. - pub fn on_exit(&self) -> ::exit_future::Exit { - self.exit.clone() - } +impl Service +where + Components: components::Components, +{ + /// Get shared client instance. + pub fn client(&self) -> Arc> { + self.client.clone() + } + + /// Get shared network instance. + pub fn network(&self) -> Arc> { + self.network + .as_ref() + .expect("self.network always Some") + .clone() + } + + /// Get shared transaction pool instance. + pub fn transaction_pool(&self) -> Arc> { + self.transaction_pool.clone() + } + + /// Get shared inherents pool instance. + pub fn inherents_pool(&self) -> Arc>> { + self.inherents_pool.clone() + } + + /// Get shared keystore. + pub fn keystore(&self) -> &Keystore { + &self.keystore + } + + /// Get a handle to a future that will resolve on exit. + pub fn on_exit(&self) -> ::exit_future::Exit { + self.exit.clone() + } } +impl Drop for Service +where + Components: components::Components, +{ + fn drop(&mut self) { + debug!(target: "service", "Substrate service shutdown"); -impl Drop for Service where Components: components::Components { - fn drop(&mut self) { - debug!(target: "service", "Substrate service shutdown"); - - drop(self.network.take()); + drop(self.network.take()); - if let Some(signal) = self.signal.take() { - signal.fire(); - } - } + if let Some(signal) = self.signal.take() { + signal.fire(); + } + } } fn maybe_start_server(address: Option, start: F) -> Result, io::Error> - where F: Fn(&SocketAddr) -> Result, +where + F: Fn(&SocketAddr) -> Result, { - Ok(match address { - Some(mut address) => Some(start(&address) - .or_else(|e| match e.kind() { - io::ErrorKind::AddrInUse | - io::ErrorKind::PermissionDenied => { - warn!("Unable to bind server to {}. Trying random port.", address); - address.set_port(0); - start(&address) - }, - _ => Err(e), - })?), - None => None, - }) + Ok(match address { + Some(mut address) => Some(start(&address).or_else(|e| match e.kind() { + io::ErrorKind::AddrInUse | io::ErrorKind::PermissionDenied => { + warn!("Unable to bind server to {}. Trying random port.", address); + address.set_port(0); + start(&address) + } + _ => Err(e), + })?), + None => None, + }) } /// Transaction pool adapter. pub struct TransactionPoolAdapter { - imports_external_transactions: bool, - pool: Arc>, - client: Arc>, + imports_external_transactions: bool, + pool: Arc>, + client: Arc>, } impl TransactionPoolAdapter { - fn best_block_id(&self) -> Option>> { - self.client.info() - .map(|info| BlockId::hash(info.chain.best_hash)) - .map_err(|e| { - debug!("Error getting best block: {:?}", e); - }) - .ok() - } + fn best_block_id(&self) -> Option>> { + self.client + .info() + .map(|info| BlockId::hash(info.chain.best_hash)) + .map_err(|e| { + debug!("Error getting best block: {:?}", e); + }) + .ok() + } } -impl network::TransactionPool, ComponentBlock> for - TransactionPoolAdapter where ::RuntimeApi: Send + Sync +impl network::TransactionPool, ComponentBlock> + for TransactionPoolAdapter +where + ::RuntimeApi: Send + Sync, { - fn transactions(&self) -> Vec<(ComponentExHash, ComponentExtrinsic)> { - self.pool.ready() - .map(|t| { - let hash = t.hash.clone(); - let ex: ComponentExtrinsic = t.data.clone(); - (hash, ex) - }) - .collect() - } - - fn import(&self, transaction: &ComponentExtrinsic) -> Option> { - if !self.imports_external_transactions { - debug!("Transaction rejected"); - return None; - } - - let encoded = transaction.encode(); - if let Some(uxt) = Decode::decode(&mut &encoded[..]) { - let best_block_id = self.best_block_id()?; - match self.pool.submit_one(&best_block_id, uxt) { - Ok(hash) => Some(hash), - Err(e) => match e.into_pool_error() { - Ok(txpool::error::Error(txpool::error::ErrorKind::AlreadyImported(hash), _)) => { - hash.downcast::>().ok() - .map(|x| x.as_ref().clone()) - }, - Ok(e) => { - debug!("Error adding transaction to the pool: {:?}", e); - None - }, - Err(e) => { - debug!("Error converting pool error: {:?}", e); - None - }, - } - } - } else { - debug!("Error decoding transaction"); - None - } - } - - fn on_broadcasted(&self, propagations: HashMap, Vec>) { - self.pool.on_broadcasted(propagations) - } + fn transactions(&self) -> Vec<(ComponentExHash, ComponentExtrinsic)> { + self.pool + .ready() + .map(|t| { + let hash = t.hash.clone(); + let ex: ComponentExtrinsic = t.data.clone(); + (hash, ex) + }) + .collect() + } + + fn import(&self, transaction: &ComponentExtrinsic) -> Option> { + if !self.imports_external_transactions { + debug!("Transaction rejected"); + return None; + } + + let encoded = transaction.encode(); + if let Some(uxt) = Decode::decode(&mut &encoded[..]) { + let best_block_id = self.best_block_id()?; + match self.pool.submit_one(&best_block_id, uxt) { + Ok(hash) => Some(hash), + Err(e) => match e.into_pool_error() { + Ok(txpool::error::Error( + txpool::error::ErrorKind::AlreadyImported(hash), + _, + )) => hash + .downcast::>() + .ok() + .map(|x| x.as_ref().clone()), + Ok(e) => { + debug!("Error adding transaction to the pool: {:?}", e); + None + } + Err(e) => { + debug!("Error converting pool error: {:?}", e); + None + } + }, + } + } else { + debug!("Error decoding transaction"); + None + } + } + + fn on_broadcasted(&self, propagations: HashMap, Vec>) { + self.pool.on_broadcasted(propagations) + } } /// Constructs a service factory with the given name that implements the `ServiceFactory` trait. diff --git a/core/service/test/src/lib.rs b/core/service/test/src/lib.rs index dcdae582fa..707e8998da 100644 --- a/core/service/test/src/lib.rs +++ b/core/service/test/src/lib.rs @@ -16,264 +16,357 @@ //! Service integration test utils. +use consensus::{BlockImport, ImportBlock}; +use futures::{Future, Stream}; +use log::info; +use network::config::{NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, Secret}; +use network::{multiaddr, ManageNetwork, SyncProvider}; +use service::{ + Configuration, FactoryChainSpec, FactoryExtrinsic, FactoryFullConfiguration, Roles, + ServiceFactory, +}; +use sr_primitives::generic::BlockId; +use sr_primitives::traits::As; +use std::collections::HashMap; use std::iter; -use std::sync::Arc; use std::net::Ipv4Addr; +use std::sync::Arc; use std::time::Duration; -use std::collections::HashMap; -use log::info; -use futures::{Future, Stream}; use tempdir::TempDir; use tokio::runtime::Runtime; use tokio::timer::Interval; -use service::{ - ServiceFactory, - Configuration, - FactoryFullConfiguration, - FactoryChainSpec, - Roles, - FactoryExtrinsic, -}; -use network::{multiaddr, SyncProvider, ManageNetwork}; -use network::config::{NetworkConfiguration, NodeKeyConfig, Secret, NonReservedPeerMode}; -use sr_primitives::traits::As; -use sr_primitives::generic::BlockId; -use consensus::{ImportBlock, BlockImport}; struct TestNet { - runtime: Runtime, - authority_nodes: Vec<(u32, Arc)>, - full_nodes: Vec<(u32, Arc)>, - _light_nodes: Vec<(u32, Arc)>, - chain_spec: FactoryChainSpec, - base_port: u16, - nodes: usize, + runtime: Runtime, + authority_nodes: Vec<(u32, Arc)>, + full_nodes: Vec<(u32, Arc)>, + _light_nodes: Vec<(u32, Arc)>, + chain_spec: FactoryChainSpec, + base_port: u16, + nodes: usize, } impl TestNet { - pub fn run_until_all_full bool + 'static>(&mut self, predicate: P) { - let full_nodes = self.full_nodes.clone(); - let interval = Interval::new_interval(Duration::from_millis(100)).map_err(|_| ()).for_each(move |_| { - if full_nodes.iter().all(|&(ref id, ref service)| predicate(*id, service)) { - Err(()) - } else { - Ok(()) - } - }); - self.runtime.block_on(interval).ok(); - } + pub fn run_until_all_full bool + 'static>( + &mut self, + predicate: P, + ) { + let full_nodes = self.full_nodes.clone(); + let interval = Interval::new_interval(Duration::from_millis(100)) + .map_err(|_| ()) + .for_each(move |_| { + if full_nodes + .iter() + .all(|&(ref id, ref service)| predicate(*id, service)) + { + Err(()) + } else { + Ok(()) + } + }); + self.runtime.block_on(interval).ok(); + } } -fn node_config ( - index: u32, - spec: &FactoryChainSpec, - role: Roles, - key_seed: Option, - base_port: u16, - root: &TempDir, -) -> FactoryFullConfiguration -{ - let root = root.path().join(format!("node-{}", index)); - let mut keys = Vec::new(); - if let Some(seed) = key_seed { - keys.push(seed); - } +fn node_config( + index: u32, + spec: &FactoryChainSpec, + role: Roles, + key_seed: Option, + base_port: u16, + root: &TempDir, +) -> FactoryFullConfiguration { + let root = root.path().join(format!("node-{}", index)); + let mut keys = Vec::new(); + if let Some(seed) = key_seed { + keys.push(seed); + } - let config_path = Some(String::from(root.join("network").to_str().unwrap())); - let net_config_path = config_path.clone(); + let config_path = Some(String::from(root.join("network").to_str().unwrap())); + let net_config_path = config_path.clone(); - let network_config = NetworkConfiguration { - config_path, - net_config_path, - listen_addresses: vec! [ - iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) - .chain(iter::once(multiaddr::Protocol::Tcp(base_port + index as u16))) - .collect() - ], - public_addresses: vec![], - boot_nodes: vec![], - node_key: NodeKeyConfig::Ed25519(Secret::New), - in_peers: 50, - out_peers: 450, - reserved_nodes: vec![], - non_reserved_mode: NonReservedPeerMode::Accept, - client_version: "network/test/0.1".to_owned(), - node_name: "unknown".to_owned(), - enable_mdns: false, - }; + let network_config = NetworkConfiguration { + config_path, + net_config_path, + listen_addresses: vec![ + iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) + .chain(iter::once(multiaddr::Protocol::Tcp( + base_port + index as u16, + ))) + .collect(), + ], + public_addresses: vec![], + boot_nodes: vec![], + node_key: NodeKeyConfig::Ed25519(Secret::New), + in_peers: 50, + out_peers: 450, + reserved_nodes: vec![], + non_reserved_mode: NonReservedPeerMode::Accept, + client_version: "network/test/0.1".to_owned(), + node_name: "unknown".to_owned(), + enable_mdns: false, + }; - Configuration { - impl_name: "network-test-impl", - impl_version: "0.1", - impl_commit: "", - roles: role, - transaction_pool: Default::default(), - network: network_config, - keystore_path: root.join("key").to_str().unwrap().into(), - database_path: root.join("db").to_str().unwrap().into(), - database_cache_size: None, - pruning: Default::default(), - keys: keys, - chain_spec: (*spec).clone(), - custom: Default::default(), - name: format!("Node {}", index), - execution_strategies: Default::default(), - rpc_http: None, - rpc_ws: None, - telemetry_endpoints: None, - default_heap_pages: None, - offchain_worker: false, - force_authoring: false, - disable_grandpa: false, - } + Configuration { + impl_name: "network-test-impl", + impl_version: "0.1", + impl_commit: "", + roles: role, + transaction_pool: Default::default(), + network: network_config, + keystore_path: root.join("key").to_str().unwrap().into(), + database_path: root.join("db").to_str().unwrap().into(), + database_cache_size: None, + pruning: Default::default(), + keys: keys, + chain_spec: (*spec).clone(), + custom: Default::default(), + name: format!("Node {}", index), + execution_strategies: Default::default(), + rpc_http: None, + rpc_ws: None, + telemetry_endpoints: None, + default_heap_pages: None, + offchain_worker: false, + force_authoring: false, + disable_grandpa: false, + } } impl TestNet { - fn new(temp: &TempDir, spec: FactoryChainSpec, full: u32, light: u32, authorities: Vec, base_port: u16) -> TestNet { - let _ = ::env_logger::try_init(); - ::fdlimit::raise_fd_limit(); - let runtime = Runtime::new().expect("Error creating tokio runtime"); - let mut net = TestNet { - runtime, - authority_nodes: Default::default(), - full_nodes: Default::default(), - _light_nodes: Default::default(), - chain_spec: spec.clone(), - base_port, - nodes: 0, - }; - net.insert_nodes(temp, full, light, authorities); - net - } + fn new( + temp: &TempDir, + spec: FactoryChainSpec, + full: u32, + light: u32, + authorities: Vec, + base_port: u16, + ) -> TestNet { + let _ = ::env_logger::try_init(); + ::fdlimit::raise_fd_limit(); + let runtime = Runtime::new().expect("Error creating tokio runtime"); + let mut net = TestNet { + runtime, + authority_nodes: Default::default(), + full_nodes: Default::default(), + _light_nodes: Default::default(), + chain_spec: spec.clone(), + base_port, + nodes: 0, + }; + net.insert_nodes(temp, full, light, authorities); + net + } - fn insert_nodes(&mut self, temp: &TempDir, full: u32, light: u32, authorities: Vec) { - let mut nodes = self.nodes; - let base_port = self.base_port; - let spec = self.chain_spec.clone(); - let executor = self.runtime.executor(); - self.authority_nodes.extend(authorities.iter().enumerate().map(|(index, key)| ((index + nodes) as u32, - Arc::new(F::new_full(node_config::(index as u32, &spec, Roles::AUTHORITY, Some(key.clone()), base_port, &temp), executor.clone()) - .expect("Error creating test node service"))) - )); - nodes += authorities.len(); + fn insert_nodes(&mut self, temp: &TempDir, full: u32, light: u32, authorities: Vec) { + let mut nodes = self.nodes; + let base_port = self.base_port; + let spec = self.chain_spec.clone(); + let executor = self.runtime.executor(); + self.authority_nodes + .extend(authorities.iter().enumerate().map(|(index, key)| { + ( + (index + nodes) as u32, + Arc::new( + F::new_full( + node_config::( + index as u32, + &spec, + Roles::AUTHORITY, + Some(key.clone()), + base_port, + &temp, + ), + executor.clone(), + ) + .expect("Error creating test node service"), + ), + ) + })); + nodes += authorities.len(); - self.full_nodes.extend((nodes..nodes + full as usize).map(|index| (index as u32, - Arc::new(F::new_full(node_config::(index as u32, &spec, Roles::FULL, None, base_port, &temp), executor.clone()) - .expect("Error creating test node service"))) - )); - nodes += full as usize; + self.full_nodes + .extend((nodes..nodes + full as usize).map(|index| { + ( + index as u32, + Arc::new( + F::new_full( + node_config::( + index as u32, + &spec, + Roles::FULL, + None, + base_port, + &temp, + ), + executor.clone(), + ) + .expect("Error creating test node service"), + ), + ) + })); + nodes += full as usize; - self._light_nodes.extend((nodes..nodes + light as usize).map(|index| (index as u32, - Arc::new(F::new_light(node_config::(index as u32, &spec, Roles::LIGHT, None, base_port, &temp), executor.clone()) - .expect("Error creating test node service"))) - )); - nodes += light as usize; - self.nodes = nodes; - } + self._light_nodes + .extend((nodes..nodes + light as usize).map(|index| { + ( + index as u32, + Arc::new( + F::new_light( + node_config::( + index as u32, + &spec, + Roles::LIGHT, + None, + base_port, + &temp, + ), + executor.clone(), + ) + .expect("Error creating test node service"), + ), + ) + })); + nodes += light as usize; + self.nodes = nodes; + } } pub fn connectivity(spec: FactoryChainSpec) { - const NUM_NODES: u32 = 10; - { - let temp = TempDir::new("substrate-connectivity-test").expect("Error creating test dir"); - let runtime = { - let mut network = TestNet::::new(&temp, spec.clone(), NUM_NODES, 0, vec![], 30400); - info!("Checking star topology"); - let first_address = network.full_nodes[0].1.network().node_id().expect("No node address"); - for (_, service) in network.full_nodes.iter().skip(1) { - service.network().add_reserved_peer(first_address.clone()).expect("Error adding reserved peer"); - } - network.run_until_all_full(|_index, service| - service.network().peers().len() == NUM_NODES as usize - 1 - ); - network.runtime - }; + const NUM_NODES: u32 = 10; + { + let temp = TempDir::new("substrate-connectivity-test").expect("Error creating test dir"); + let runtime = { + let mut network = TestNet::::new(&temp, spec.clone(), NUM_NODES, 0, vec![], 30400); + info!("Checking star topology"); + let first_address = network.full_nodes[0] + .1 + .network() + .node_id() + .expect("No node address"); + for (_, service) in network.full_nodes.iter().skip(1) { + service + .network() + .add_reserved_peer(first_address.clone()) + .expect("Error adding reserved peer"); + } + network.run_until_all_full(|_index, service| { + service.network().peers().len() == NUM_NODES as usize - 1 + }); + network.runtime + }; - runtime.shutdown_on_idle().wait().expect("Error shutting down runtime"); + runtime + .shutdown_on_idle() + .wait() + .expect("Error shutting down runtime"); - temp.close().expect("Error removing temp dir"); - } - { - let temp = TempDir::new("substrate-connectivity-test").expect("Error creating test dir"); - { - let mut network = TestNet::::new(&temp, spec, NUM_NODES, 0, vec![], 30400); - info!("Checking linked topology"); - let mut address = network.full_nodes[0].1.network().node_id().expect("No node address"); - for (_, service) in network.full_nodes.iter().skip(1) { - service.network().add_reserved_peer(address.clone()).expect("Error adding reserved peer"); - address = service.network().node_id().expect("No node address"); - } - network.run_until_all_full(|_index, service| { - service.network().peers().len() == NUM_NODES as usize - 1 - }); - } - temp.close().expect("Error removing temp dir"); - } + temp.close().expect("Error removing temp dir"); + } + { + let temp = TempDir::new("substrate-connectivity-test").expect("Error creating test dir"); + { + let mut network = TestNet::::new(&temp, spec, NUM_NODES, 0, vec![], 30400); + info!("Checking linked topology"); + let mut address = network.full_nodes[0] + .1 + .network() + .node_id() + .expect("No node address"); + for (_, service) in network.full_nodes.iter().skip(1) { + service + .network() + .add_reserved_peer(address.clone()) + .expect("Error adding reserved peer"); + address = service.network().node_id().expect("No node address"); + } + network.run_until_all_full(|_index, service| { + service.network().peers().len() == NUM_NODES as usize - 1 + }); + } + temp.close().expect("Error removing temp dir"); + } } pub fn sync(spec: FactoryChainSpec, block_factory: B, extrinsic_factory: E) where - F: ServiceFactory, - B: Fn(&F::FullService) -> ImportBlock, - E: Fn(&F::FullService) -> FactoryExtrinsic, + F: ServiceFactory, + B: Fn(&F::FullService) -> ImportBlock, + E: Fn(&F::FullService) -> FactoryExtrinsic, { - const NUM_NODES: u32 = 10; - const NUM_BLOCKS: usize = 512; - let temp = TempDir::new("substrate-sync-test").expect("Error creating test dir"); - let mut network = TestNet::::new(&temp, spec.clone(), NUM_NODES, 0, vec![], 30500); - info!("Checking block sync"); - let first_address = { - let first_service = &network.full_nodes[0].1; - for i in 0 .. NUM_BLOCKS { - if i % 128 == 0 { - info!("Generating #{}", i); - } - let import_data = block_factory(&first_service); - first_service.client().import_block(import_data, HashMap::new()).expect("Error importing test block"); - } - first_service.network().node_id().unwrap() - }; - info!("Running sync"); - for (_, service) in network.full_nodes.iter().skip(1) { - service.network().add_reserved_peer(first_address.clone()).expect("Error adding reserved peer"); - } - network.run_until_all_full(|_index, service| - service.client().info().unwrap().chain.best_number == As::sa(NUM_BLOCKS as u64) - ); - info!("Checking extrinsic propagation"); - let first_service = network.full_nodes[0].1.clone(); - let best_block = BlockId::number(first_service.client().info().unwrap().chain.best_number); - first_service.transaction_pool().submit_one(&best_block, extrinsic_factory(&first_service)).unwrap(); - network.run_until_all_full(|_index, service| - service.transaction_pool().ready().count() == 1 - ); + const NUM_NODES: u32 = 10; + const NUM_BLOCKS: usize = 512; + let temp = TempDir::new("substrate-sync-test").expect("Error creating test dir"); + let mut network = TestNet::::new(&temp, spec.clone(), NUM_NODES, 0, vec![], 30500); + info!("Checking block sync"); + let first_address = { + let first_service = &network.full_nodes[0].1; + for i in 0..NUM_BLOCKS { + if i % 128 == 0 { + info!("Generating #{}", i); + } + let import_data = block_factory(&first_service); + first_service + .client() + .import_block(import_data, HashMap::new()) + .expect("Error importing test block"); + } + first_service.network().node_id().unwrap() + }; + info!("Running sync"); + for (_, service) in network.full_nodes.iter().skip(1) { + service + .network() + .add_reserved_peer(first_address.clone()) + .expect("Error adding reserved peer"); + } + network.run_until_all_full(|_index, service| { + service.client().info().unwrap().chain.best_number == As::sa(NUM_BLOCKS as u64) + }); + info!("Checking extrinsic propagation"); + let first_service = network.full_nodes[0].1.clone(); + let best_block = BlockId::number(first_service.client().info().unwrap().chain.best_number); + first_service + .transaction_pool() + .submit_one(&best_block, extrinsic_factory(&first_service)) + .unwrap(); + network.run_until_all_full(|_index, service| service.transaction_pool().ready().count() == 1); } pub fn consensus(spec: FactoryChainSpec, authorities: Vec) - where - F: ServiceFactory, +where + F: ServiceFactory, { - const NUM_NODES: u32 = 20; - const NUM_BLOCKS: u64 = 200; - let temp = TempDir::new("substrate-conensus-test").expect("Error creating test dir"); - let mut network = TestNet::::new(&temp, spec.clone(), NUM_NODES / 2, 0, authorities, 30600); - info!("Checking consensus"); - let first_address = network.authority_nodes[0].1.network().node_id().unwrap(); - for (_, service) in network.full_nodes.iter() { - service.network().add_reserved_peer(first_address.clone()).expect("Error adding reserved peer"); - } - for (_, service) in network.authority_nodes.iter().skip(1) { - service.network().add_reserved_peer(first_address.clone()).expect("Error adding reserved peer"); - } - network.run_until_all_full(|_index, service| { - service.client().info().unwrap().chain.finalized_number >= As::sa(NUM_BLOCKS / 2) - }); - info!("Adding more peers"); - network.insert_nodes(&temp, NUM_NODES / 2, 0, vec![]); - for (_, service) in network.full_nodes.iter() { - service.network().add_reserved_peer(first_address.clone()).expect("Error adding reserved peer"); - } - network.run_until_all_full(|_index, service| - service.client().info().unwrap().chain.finalized_number >= As::sa(NUM_BLOCKS) - ); + const NUM_NODES: u32 = 20; + const NUM_BLOCKS: u64 = 200; + let temp = TempDir::new("substrate-conensus-test").expect("Error creating test dir"); + let mut network = TestNet::::new(&temp, spec.clone(), NUM_NODES / 2, 0, authorities, 30600); + info!("Checking consensus"); + let first_address = network.authority_nodes[0].1.network().node_id().unwrap(); + for (_, service) in network.full_nodes.iter() { + service + .network() + .add_reserved_peer(first_address.clone()) + .expect("Error adding reserved peer"); + } + for (_, service) in network.authority_nodes.iter().skip(1) { + service + .network() + .add_reserved_peer(first_address.clone()) + .expect("Error adding reserved peer"); + } + network.run_until_all_full(|_index, service| { + service.client().info().unwrap().chain.finalized_number >= As::sa(NUM_BLOCKS / 2) + }); + info!("Adding more peers"); + network.insert_nodes(&temp, NUM_NODES / 2, 0, vec![]); + for (_, service) in network.full_nodes.iter() { + service + .network() + .add_reserved_peer(first_address.clone()) + .expect("Error adding reserved peer"); + } + network.run_until_all_full(|_index, service| { + service.client().info().unwrap().chain.finalized_number >= As::sa(NUM_BLOCKS) + }); } diff --git a/core/sr-api-macros/benches/bench.rs b/core/sr-api-macros/benches/bench.rs index f467721789..d7ad937918 100644 --- a/core/sr-api-macros/benches/bench.rs +++ b/core/sr-api-macros/benches/bench.rs @@ -14,55 +14,69 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use criterion::{Criterion, criterion_group, criterion_main}; -use test_client::runtime::TestAPI; +use criterion::{criterion_group, criterion_main, Criterion}; use runtime_primitives::{generic::BlockId, traits::ProvideRuntimeApi}; use state_machine::ExecutionStrategy; +use test_client::runtime::TestAPI; fn sr_api_benchmark(c: &mut Criterion) { - c.bench_function("add one with same runtime api", |b| { - let client = test_client::new(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.info().unwrap().chain.best_number); + c.bench_function("add one with same runtime api", |b| { + let client = test_client::new(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.info().unwrap().chain.best_number); - b.iter(|| runtime_api.benchmark_add_one(&block_id, &1)) - }); + b.iter(|| runtime_api.benchmark_add_one(&block_id, &1)) + }); - c.bench_function("add one with recreating runtime api", |b| { - let client = test_client::new(); - let block_id = BlockId::Number(client.info().unwrap().chain.best_number); + c.bench_function("add one with recreating runtime api", |b| { + let client = test_client::new(); + let block_id = BlockId::Number(client.info().unwrap().chain.best_number); - b.iter(|| client.runtime_api().benchmark_add_one(&block_id, &1)) - }); + b.iter(|| client.runtime_api().benchmark_add_one(&block_id, &1)) + }); - c.bench_function("vector add one with same runtime api", |b| { - let client = test_client::new(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.info().unwrap().chain.best_number); - let data = vec![0; 1000]; + c.bench_function("vector add one with same runtime api", |b| { + let client = test_client::new(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.info().unwrap().chain.best_number); + let data = vec![0; 1000]; - b.iter_with_large_drop(|| runtime_api.benchmark_vector_add_one(&block_id, &data)) - }); + b.iter_with_large_drop(|| runtime_api.benchmark_vector_add_one(&block_id, &data)) + }); - c.bench_function("vector add one with recreating runtime api", |b| { - let client = test_client::new(); - let block_id = BlockId::Number(client.info().unwrap().chain.best_number); - let data = vec![0; 1000]; + c.bench_function("vector add one with recreating runtime api", |b| { + let client = test_client::new(); + let block_id = BlockId::Number(client.info().unwrap().chain.best_number); + let data = vec![0; 1000]; - b.iter_with_large_drop(|| client.runtime_api().benchmark_vector_add_one(&block_id, &data)) - }); + b.iter_with_large_drop(|| { + client + .runtime_api() + .benchmark_vector_add_one(&block_id, &data) + }) + }); - c.bench_function("calling function by function pointer in wasm", |b| { - let client = test_client::new_with_execution_strategy(ExecutionStrategy::AlwaysWasm); - let block_id = BlockId::Number(client.info().unwrap().chain.best_number); - b.iter(|| client.runtime_api().benchmark_indirect_call(&block_id).unwrap()) - }); + c.bench_function("calling function by function pointer in wasm", |b| { + let client = test_client::new_with_execution_strategy(ExecutionStrategy::AlwaysWasm); + let block_id = BlockId::Number(client.info().unwrap().chain.best_number); + b.iter(|| { + client + .runtime_api() + .benchmark_indirect_call(&block_id) + .unwrap() + }) + }); - c.bench_function("calling function in wasm", |b| { - let client = test_client::new_with_execution_strategy(ExecutionStrategy::AlwaysWasm); - let block_id = BlockId::Number(client.info().unwrap().chain.best_number); - b.iter(|| client.runtime_api().benchmark_direct_call(&block_id).unwrap()) - }); + c.bench_function("calling function in wasm", |b| { + let client = test_client::new_with_execution_strategy(ExecutionStrategy::AlwaysWasm); + let block_id = BlockId::Number(client.info().unwrap().chain.best_number); + b.iter(|| { + client + .runtime_api() + .benchmark_direct_call(&block_id) + .unwrap() + }) + }); } criterion_group!(benches, sr_api_benchmark); diff --git a/core/sr-api-macros/src/compile_fail_tests.rs b/core/sr-api-macros/src/compile_fail_tests.rs index e562f8b2fe..58ce6d932a 100644 --- a/core/sr-api-macros/src/compile_fail_tests.rs +++ b/core/sr-api-macros/src/compile_fail_tests.rs @@ -17,396 +17,396 @@ //! Compile fail tests. mod declaring_own_block { - /*! - ```compile_fail - #[macro_use] - extern crate client; - extern crate sr_primitives as runtime_primitives; - - use runtime_primitives::traits::Block as BlockT; - - decl_runtime_apis! { - pub trait Api { - fn test(); - } - } - - fn main() {} - ``` - */ + /*! + ```compile_fail + #[macro_use] + extern crate client; + extern crate sr_primitives as runtime_primitives; + + use runtime_primitives::traits::Block as BlockT; + + decl_runtime_apis! { + pub trait Api { + fn test(); + } + } + + fn main() {} + ``` + */ } mod declaring_own_block_with_different_name { - /*! - ```compile_fail - #[macro_use] - extern crate client; - extern crate sr_primitives as runtime_primitives; - - use runtime_primitives::traits::Block as BlockT; - - decl_runtime_apis! { - pub trait Api { - fn test(); - } - } - - fn main() {} - ``` - */ + /*! + ```compile_fail + #[macro_use] + extern crate client; + extern crate sr_primitives as runtime_primitives; + + use runtime_primitives::traits::Block as BlockT; + + decl_runtime_apis! { + pub trait Api { + fn test(); + } + } + + fn main() {} + ``` + */ } mod adding_self_parameter { - /*! - ```compile_fail - #[macro_use] - extern crate client; - extern crate sr_primitives as runtime_primitives; - - decl_runtime_apis! { - pub trait Api { - fn test(&self); - } - } - - fn main() {} - ``` - */ + /*! + ```compile_fail + #[macro_use] + extern crate client; + extern crate sr_primitives as runtime_primitives; + + decl_runtime_apis! { + pub trait Api { + fn test(&self); + } + } + + fn main() {} + ``` + */ } mod adding_at_parameter { - /*! - ```compile_fail - #[macro_use] - extern crate client; - extern crate sr_primitives as runtime_primitives; - - decl_runtime_apis! { - pub trait Api { - fn test(at: u64); - } - } - - fn main() {} - ``` - */ + /*! + ```compile_fail + #[macro_use] + extern crate client; + extern crate sr_primitives as runtime_primitives; + + decl_runtime_apis! { + pub trait Api { + fn test(at: u64); + } + } + + fn main() {} + ``` + */ } mod invalid_api_version { - /*! - ```compile_fail - #[macro_use] - extern crate client; - extern crate sr_primitives as runtime_primitives; - - decl_runtime_apis! { - #[api_version] - pub trait Api { - fn test(data: u64); - } - } - - fn main() {} - ``` - */ + /*! + ```compile_fail + #[macro_use] + extern crate client; + extern crate sr_primitives as runtime_primitives; + + decl_runtime_apis! { + #[api_version] + pub trait Api { + fn test(data: u64); + } + } + + fn main() {} + ``` + */ } mod invalid_api_version_2 { - /*! - ```compile_fail - #[macro_use] - extern crate client; - extern crate sr_primitives as runtime_primitives; - - decl_runtime_apis! { - #[api_version("1")] - pub trait Api { - fn test(data: u64); - } - } - - fn main() {} - ``` - */ + /*! + ```compile_fail + #[macro_use] + extern crate client; + extern crate sr_primitives as runtime_primitives; + + decl_runtime_apis! { + #[api_version("1")] + pub trait Api { + fn test(data: u64); + } + } + + fn main() {} + ``` + */ } mod invalid_api_version_3 { - /*! - ```compile_fail - #[macro_use] - extern crate client; - extern crate sr_primitives as runtime_primitives; - - decl_runtime_apis! { - #[api_version()] - pub trait Api { - fn test(data: u64); - } - } - - fn main() {} - ``` - */ + /*! + ```compile_fail + #[macro_use] + extern crate client; + extern crate sr_primitives as runtime_primitives; + + decl_runtime_apis! { + #[api_version()] + pub trait Api { + fn test(data: u64); + } + } + + fn main() {} + ``` + */ } mod missing_block_generic_parameter { - /*! - ```compile_fail - #[macro_use] - extern crate client; - extern crate substrate_test_client as test_client; - extern crate sr_primitives as runtime_primitives; - extern crate substrate_primitives as primitives; - - use runtime_primitives::traits::GetNodeBlockType; - use test_client::runtime::Block; - - /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` - /// trait are done by the `construct_runtime!` macro in a real runtime. - struct Runtime {} - impl GetNodeBlockType for Runtime { - type NodeBlock = Block; - } - - decl_runtime_apis! { - pub trait Api { - fn test(data: u64); - } - } - - impl_runtime_apis! { - impl self::Api for Runtime { - fn test(data: u64) { - unimplemented!() - } - } - } - - fn main() {} - ``` - */ + /*! + ```compile_fail + #[macro_use] + extern crate client; + extern crate substrate_test_client as test_client; + extern crate sr_primitives as runtime_primitives; + extern crate substrate_primitives as primitives; + + use runtime_primitives::traits::GetNodeBlockType; + use test_client::runtime::Block; + + /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` + /// trait are done by the `construct_runtime!` macro in a real runtime. + struct Runtime {} + impl GetNodeBlockType for Runtime { + type NodeBlock = Block; + } + + decl_runtime_apis! { + pub trait Api { + fn test(data: u64); + } + } + + impl_runtime_apis! { + impl self::Api for Runtime { + fn test(data: u64) { + unimplemented!() + } + } + } + + fn main() {} + ``` + */ } mod missing_path_for_trait { - /*! - ```compile_fail - #[macro_use] - extern crate client; - extern crate substrate_test_client as test_client; - extern crate sr_primitives as runtime_primitives; - extern crate substrate_primitives as primitives; - - use runtime_primitives::traits::GetNodeBlockType; - use test_client::runtime::Block; - - /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` - /// trait are done by the `construct_runtime!` macro in a real runtime. - struct Runtime {} - impl GetNodeBlockType for Runtime { - type NodeBlock = Block; - } - - decl_runtime_apis! { - pub trait Api { - fn test(data: u64); - } - } - - impl_runtime_apis! { - impl Api for Runtime { - fn test(data: u64) { - unimplemented!() - } - } - } - - fn main() {} - ``` - */ + /*! + ```compile_fail + #[macro_use] + extern crate client; + extern crate substrate_test_client as test_client; + extern crate sr_primitives as runtime_primitives; + extern crate substrate_primitives as primitives; + + use runtime_primitives::traits::GetNodeBlockType; + use test_client::runtime::Block; + + /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` + /// trait are done by the `construct_runtime!` macro in a real runtime. + struct Runtime {} + impl GetNodeBlockType for Runtime { + type NodeBlock = Block; + } + + decl_runtime_apis! { + pub trait Api { + fn test(data: u64); + } + } + + impl_runtime_apis! { + impl Api for Runtime { + fn test(data: u64) { + unimplemented!() + } + } + } + + fn main() {} + ``` + */ } mod empty_impl_runtime_apis_call { - /*! - ```compile_fail - #[macro_use] - extern crate client; - extern crate substrate_test_client as test_client; - extern crate sr_primitives as runtime_primitives; - extern crate substrate_primitives as primitives; - - use runtime_primitives::traits::GetNodeBlockType; - use test_client::runtime::Block; - - /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` - /// trait are done by the `construct_runtime!` macro in a real runtime. - struct Runtime {} - impl GetNodeBlockType for Runtime { - type NodeBlock = Block; - } - - decl_runtime_apis! { - pub trait Api { - fn test(data: u64); - } - } - - impl_runtime_apis! {} - - fn main() {} - ``` - */ + /*! + ```compile_fail + #[macro_use] + extern crate client; + extern crate substrate_test_client as test_client; + extern crate sr_primitives as runtime_primitives; + extern crate substrate_primitives as primitives; + + use runtime_primitives::traits::GetNodeBlockType; + use test_client::runtime::Block; + + /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` + /// trait are done by the `construct_runtime!` macro in a real runtime. + struct Runtime {} + impl GetNodeBlockType for Runtime { + type NodeBlock = Block; + } + + decl_runtime_apis! { + pub trait Api { + fn test(data: u64); + } + } + + impl_runtime_apis! {} + + fn main() {} + ``` + */ } mod type_reference_in_impl_runtime_apis_call { - /*! - ```compile_fail - #[macro_use] - extern crate client; - extern crate substrate_test_client as test_client; - extern crate sr_primitives as runtime_primitives; - extern crate substrate_primitives as primitives; - - use runtime_primitives::traits::GetNodeBlockType; - use test_client::runtime::Block; - - /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` - /// trait are done by the `construct_runtime!` macro in a real runtime. - struct Runtime {} - impl GetNodeBlockType for Runtime { - type NodeBlock = Block; - } - - decl_runtime_apis! { - pub trait Api { - fn test(data: u64); - } - } - - impl_runtime_apis! { - impl self::Api for Runtime { - fn test(data: &u64) { - unimplemented!() - } - } - } - - fn main() {} - ``` - */ + /*! + ```compile_fail + #[macro_use] + extern crate client; + extern crate substrate_test_client as test_client; + extern crate sr_primitives as runtime_primitives; + extern crate substrate_primitives as primitives; + + use runtime_primitives::traits::GetNodeBlockType; + use test_client::runtime::Block; + + /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` + /// trait are done by the `construct_runtime!` macro in a real runtime. + struct Runtime {} + impl GetNodeBlockType for Runtime { + type NodeBlock = Block; + } + + decl_runtime_apis! { + pub trait Api { + fn test(data: u64); + } + } + + impl_runtime_apis! { + impl self::Api for Runtime { + fn test(data: &u64) { + unimplemented!() + } + } + } + + fn main() {} + ``` + */ } mod impl_incorrect_method_signature { - /*! - ```compile_fail - #[macro_use] - extern crate client; - extern crate substrate_test_client as test_client; - extern crate sr_primitives as runtime_primitives; - extern crate substrate_primitives as primitives; - - use runtime_primitives::traits::GetNodeBlockType; - use test_client::runtime::Block; - - /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` - /// trait are done by the `construct_runtime!` macro in a real runtime. - struct Runtime {} - impl GetNodeBlockType for Runtime { - type NodeBlock = Block; - } - - decl_runtime_apis! { - pub trait Api { - fn test(data: u64); - } - } - - impl_runtime_apis! { - impl self::Api for Runtime { - fn test(data: String) {} - } - } - - fn main() {} - ``` - */ + /*! + ```compile_fail + #[macro_use] + extern crate client; + extern crate substrate_test_client as test_client; + extern crate sr_primitives as runtime_primitives; + extern crate substrate_primitives as primitives; + + use runtime_primitives::traits::GetNodeBlockType; + use test_client::runtime::Block; + + /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` + /// trait are done by the `construct_runtime!` macro in a real runtime. + struct Runtime {} + impl GetNodeBlockType for Runtime { + type NodeBlock = Block; + } + + decl_runtime_apis! { + pub trait Api { + fn test(data: u64); + } + } + + impl_runtime_apis! { + impl self::Api for Runtime { + fn test(data: String) {} + } + } + + fn main() {} + ``` + */ } mod impl_two_traits_with_same_name { - /*! - ```compile_fail - #[macro_use] - extern crate client; - extern crate substrate_test_client as test_client; - extern crate sr_primitives as runtime_primitives; - extern crate substrate_primitives as primitives; - - use runtime_primitives::traits::GetNodeBlockType; - use test_client::runtime::Block; - - /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` - /// trait are done by the `construct_runtime!` macro in a real runtime. - struct Runtime {} - impl GetNodeBlockType for Runtime { - type NodeBlock = Block; - } - - decl_runtime_apis! { - pub trait Api { - fn test(data: u64); - } - } - - mod second { - decl_runtime_apis! { - pub trait Api { - fn test2(data: u64); - } - } - } - - impl_runtime_apis! { - impl self::Api for Runtime { - fn test(data: u64) {} - } - - impl second::Api for Runtime { - fn test2(data: u64) {} - } - } - - fn main() {} - ``` - */ + /*! + ```compile_fail + #[macro_use] + extern crate client; + extern crate substrate_test_client as test_client; + extern crate sr_primitives as runtime_primitives; + extern crate substrate_primitives as primitives; + + use runtime_primitives::traits::GetNodeBlockType; + use test_client::runtime::Block; + + /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` + /// trait are done by the `construct_runtime!` macro in a real runtime. + struct Runtime {} + impl GetNodeBlockType for Runtime { + type NodeBlock = Block; + } + + decl_runtime_apis! { + pub trait Api { + fn test(data: u64); + } + } + + mod second { + decl_runtime_apis! { + pub trait Api { + fn test2(data: u64); + } + } + } + + impl_runtime_apis! { + impl self::Api for Runtime { + fn test(data: u64) {} + } + + impl second::Api for Runtime { + fn test2(data: u64) {} + } + } + + fn main() {} + ``` + */ } mod changed_at_unknown_version { - /*! - ```compile_fail - #[macro_use] - extern crate client; - extern crate substrate_test_client as test_client; - extern crate sr_primitives as runtime_primitives; - extern crate substrate_primitives as primitives; - - use runtime_primitives::traits::GetNodeBlockType; - use test_client::runtime::Block; - - /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` - /// trait are done by the `construct_runtime!` macro in a real runtime. - struct Runtime {} - impl GetNodeBlockType for Runtime { - type NodeBlock = Block; - } - - decl_runtime_apis! { - pub trait Api { - #[changed_in(2)] - fn test(data: u64); - fn test(data: u64); - } - } - - fn main() {} - ``` - */ + /*! + ```compile_fail + #[macro_use] + extern crate client; + extern crate substrate_test_client as test_client; + extern crate sr_primitives as runtime_primitives; + extern crate substrate_primitives as primitives; + + use runtime_primitives::traits::GetNodeBlockType; + use test_client::runtime::Block; + + /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` + /// trait are done by the `construct_runtime!` macro in a real runtime. + struct Runtime {} + impl GetNodeBlockType for Runtime { + type NodeBlock = Block; + } + + decl_runtime_apis! { + pub trait Api { + #[changed_in(2)] + fn test(data: u64); + fn test(data: u64); + } + } + + fn main() {} + ``` + */ } diff --git a/core/sr-api-macros/src/decl_runtime_apis.rs b/core/sr-api-macros/src/decl_runtime_apis.rs index 9e4c38f087..39975f8642 100644 --- a/core/sr-api-macros/src/decl_runtime_apis.rs +++ b/core/sr-api-macros/src/decl_runtime_apis.rs @@ -15,21 +15,24 @@ // along with Substrate. If not, see . use crate::utils::{ - generate_crate_access, generate_hidden_includes, generate_runtime_mod_name_for_trait, - fold_fn_decl_for_client_side, unwrap_or_error, extract_parameter_names_types_and_borrows, - generate_native_call_generator_fn_name, return_type_extract_type, - generate_method_runtime_api_impl_name + extract_parameter_names_types_and_borrows, fold_fn_decl_for_client_side, generate_crate_access, + generate_hidden_includes, generate_method_runtime_api_impl_name, + generate_native_call_generator_fn_name, generate_runtime_mod_name_for_trait, + return_type_extract_type, unwrap_or_error, }; -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; use quote::quote; use syn::{ - spanned::Spanned, parse_macro_input, parse::{Parse, ParseStream, Result, Error}, ReturnType, - fold::{self, Fold}, parse_quote, ItemTrait, Generics, GenericParam, Attribute, FnArg, - visit::{Visit, self}, Pat, TraitBound, Meta, NestedMeta, Lit, TraitItem, Ident, Type, - TraitItemMethod + fold::{self, Fold}, + parse::{Error, Parse, ParseStream, Result}, + parse_macro_input, parse_quote, + spanned::Spanned, + visit::{self, Visit}, + Attribute, FnArg, GenericParam, Generics, Ident, ItemTrait, Lit, Meta, NestedMeta, Pat, + ReturnType, TraitBound, TraitItem, TraitItemMethod, Type, }; use std::collections::HashMap; @@ -53,699 +56,725 @@ const API_VERSION_ATTRIBUTE: &str = "api_version"; const CHANGED_IN_ATTRIBUTE: &str = "changed_in"; /// All attributes that we support in the declaration of a runtime api trait. const SUPPORTED_ATTRIBUTE_NAMES: &[&str] = &[ - CORE_TRAIT_ATTRIBUTE, API_VERSION_ATTRIBUTE, CHANGED_IN_ATTRIBUTE + CORE_TRAIT_ATTRIBUTE, + API_VERSION_ATTRIBUTE, + CHANGED_IN_ATTRIBUTE, ]; /// The structure used for parsing the runtime api declarations. struct RuntimeApiDecls { - decls: Vec, + decls: Vec, } impl Parse for RuntimeApiDecls { - fn parse(input: ParseStream) -> Result { - let mut decls = Vec::new(); + fn parse(input: ParseStream) -> Result { + let mut decls = Vec::new(); - while !input.is_empty() { - decls.push(ItemTrait::parse(input)?); - } + while !input.is_empty() { + decls.push(ItemTrait::parse(input)?); + } - Ok(Self { decls }) - } + Ok(Self { decls }) + } } /// Extend the given generics with `Block: BlockT` as first generic parameter. fn extend_generics_with_block(generics: &mut Generics) { - let c = generate_crate_access(HIDDEN_INCLUDES_ID); + let c = generate_crate_access(HIDDEN_INCLUDES_ID); - generics.lt_token = Some(parse_quote!(<)); - generics.params.insert(0, parse_quote!( Block: #c::runtime_api::BlockT )); - generics.gt_token = Some(parse_quote!(>)); + generics.lt_token = Some(parse_quote!(<)); + generics + .params + .insert(0, parse_quote!( Block: #c::runtime_api::BlockT )); + generics.gt_token = Some(parse_quote!(>)); } /// Remove all attributes from the vector that are supported by us in the declaration of a runtime /// api trait. The returned hashmap contains all found attribute names as keys and the rest of the /// attribute body as `TokenStream`. fn remove_supported_attributes(attrs: &mut Vec) -> HashMap<&'static str, Attribute> { - let mut result = HashMap::new(); - attrs.retain(|v| { - match SUPPORTED_ATTRIBUTE_NAMES.iter().filter(|a| v.path.is_ident(a)).next() { - Some(attribute) => { - result.insert(*attribute, v.clone()); - false - }, - None => true, - } - }); - - result + let mut result = HashMap::new(); + attrs.retain(|v| { + match SUPPORTED_ATTRIBUTE_NAMES + .iter() + .filter(|a| v.path.is_ident(a)) + .next() + { + Some(attribute) => { + result.insert(*attribute, v.clone()); + false + } + None => true, + } + }); + + result } /// Visits the ast and checks if `Block` ident is used somewhere. struct IsUsingBlock { - result: bool, + result: bool, } impl<'ast> Visit<'ast> for IsUsingBlock { - fn visit_ident(&mut self, i: &'ast Ident) { - if i == BLOCK_GENERIC_IDENT { - self.result = true; - } - } + fn visit_ident(&mut self, i: &'ast Ident) { + if i == BLOCK_GENERIC_IDENT { + self.result = true; + } + } } /// Visits the ast and checks if `Block` ident is used somewhere. fn type_is_using_block(ty: &Type) -> bool { - let mut visitor = IsUsingBlock { result: false }; - visitor.visit_type(ty); - visitor.result + let mut visitor = IsUsingBlock { result: false }; + visitor.visit_type(ty); + visitor.result } /// Visits the ast and checks if `Block` ident is used somewhere. fn return_type_is_using_block(ty: &ReturnType) -> bool { - let mut visitor = IsUsingBlock { result: false }; - visitor.visit_return_type(ty); - visitor.result + let mut visitor = IsUsingBlock { result: false }; + visitor.visit_return_type(ty); + visitor.result } /// Replace all occurrences of `Block` with `NodeBlock` struct ReplaceBlockWithNodeBlock {} impl Fold for ReplaceBlockWithNodeBlock { - fn fold_ident(&mut self, input: Ident) -> Ident { - if input == BLOCK_GENERIC_IDENT { - Ident::new("NodeBlock", Span::call_site()) - } else { - input - } - } + fn fold_ident(&mut self, input: Ident) -> Ident { + if input == BLOCK_GENERIC_IDENT { + Ident::new("NodeBlock", Span::call_site()) + } else { + input + } + } } /// Replace all occurrences of `Block` with `NodeBlock` fn fn_arg_replace_block_with_node_block(fn_arg: FnArg) -> FnArg { - let mut replace = ReplaceBlockWithNodeBlock {}; - fold::fold_fn_arg(&mut replace, fn_arg) + let mut replace = ReplaceBlockWithNodeBlock {}; + fold::fold_fn_arg(&mut replace, fn_arg) } /// Replace all occurrences of `Block` with `NodeBlock` fn return_type_replace_block_with_node_block(return_type: ReturnType) -> ReturnType { - let mut replace = ReplaceBlockWithNodeBlock {}; - fold::fold_return_type(&mut replace, return_type) + let mut replace = ReplaceBlockWithNodeBlock {}; + fold::fold_return_type(&mut replace, return_type) } fn generate_native_call_generators(decl: &ItemTrait) -> Result { - let fns = decl.items.iter().filter_map(|i| match i { - TraitItem::Method(ref m) => Some(&m.sig), - _ => None, - }); - - let mut result = Vec::new(); - let trait_ = &decl.ident; - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - - // Auxiliary function that is used to convert between types that use different block types. - // The function expects that both a convertable by encoding the one and decoding the other. - result.push(quote!( - #[cfg(any(feature = "std", test))] - fn convert_between_block_types - ( - input: &I, error_desc: &'static str, - ) -> ::std::result::Result - { - ::decode( - &mut &#crate_::runtime_api::Encode::encode(input)[..] - ).ok_or_else(|| error_desc) - } - )); - - // Generate a native call generator for each function of the given trait. - for fn_ in fns { - let params = extract_parameter_names_types_and_borrows(&fn_.decl)?; - let trait_fn_name = &fn_.ident; - let fn_name = generate_native_call_generator_fn_name(&fn_.ident); - let output = return_type_replace_block_with_node_block(fn_.decl.output.clone()); - let output_ty = return_type_extract_type(&output); - let output = quote!( ::std::result::Result<#output_ty, &'static str> ); - - // Every type that is using the `Block` generic parameter, we need to encode/decode, - // to make it compatible between the runtime/node. - let conversions = params.iter().filter(|v| type_is_using_block(&v.1)).map(|(n, t, _)| { - let name_str = format!( - "Could not convert parameter `{}` between node and runtime!", quote!(#n) - ); - quote!( - let #n: #t = convert_between_block_types(&#n, #name_str)?; - ) - }); - // Same as for the input types, we need to check if we also need to convert the output, - // before returning it. - let output_conversion = if return_type_is_using_block(&fn_.decl.output) { - quote!( - convert_between_block_types( - &res, - "Could not convert return value from runtime to node!" - ) - ) - } else { - quote!( Ok(res) ) - }; - - let input_names = params.iter().map(|v| &v.0); - // If the type is using the block generic type, we will encode/decode it to make it - // compatible. To ensure that we forward it by ref/value, we use the value given by the - // the user. Otherwise if it is not using the block, we don't need to add anything. - let input_borrows = params - .iter() - .map(|v| if type_is_using_block(&v.1) { v.2.clone() } else { quote!() }); - - // Replace all `Block` with `NodeBlock`, add `'a` lifetime to references and collect - // all the function inputs. - let fn_inputs = fn_ - .decl - .inputs - .iter() - .map(|v| fn_arg_replace_block_with_node_block(v.clone())) - .map(|v| match v { - FnArg::Captured(ref arg) => { - let mut arg = arg.clone(); - match arg.ty { - Type::Reference(ref mut r) => { - r.lifetime = Some(parse_quote!( 'a )); - }, - _ => {} - } - FnArg::Captured(arg) - }, - r => r.clone(), - }); - - let (impl_generics, ty_generics, where_clause) = decl.generics.split_for_impl(); - // We need to parse them again, to get an easy access to the actual parameters. - let impl_generics: Generics = parse_quote!(#impl_generics); - let impl_generics_params = impl_generics.params.iter().map(|p| { - match p { - GenericParam::Type(ref ty) => { - let mut ty = ty.clone(); - ty.bounds.push(parse_quote!( 'a )); - GenericParam::Type(ty) - }, - // We should not see anything different than type params here. - r => r.clone(), - } - }); - - // Generate the generator function - result.push(quote!( - #[cfg(any(feature = "std", test))] - pub fn #fn_name< - 'a, ApiImpl: #trait_ #ty_generics, NodeBlock: #crate_::runtime_api::BlockT - #(, #impl_generics_params)* - >( - #( #fn_inputs ),* - ) -> impl FnOnce() -> #output + 'a #where_clause { - move || { - #( #conversions )* - let res = ApiImpl::#trait_fn_name(#( #input_borrows #input_names ),*); - #output_conversion - } - } - )); - } - - Ok(quote!( #( #result )* )) + let fns = decl.items.iter().filter_map(|i| match i { + TraitItem::Method(ref m) => Some(&m.sig), + _ => None, + }); + + let mut result = Vec::new(); + let trait_ = &decl.ident; + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + + // Auxiliary function that is used to convert between types that use different block types. + // The function expects that both a convertable by encoding the one and decoding the other. + result.push(quote!( + #[cfg(any(feature = "std", test))] + fn convert_between_block_types + ( + input: &I, error_desc: &'static str, + ) -> ::std::result::Result + { + ::decode( + &mut &#crate_::runtime_api::Encode::encode(input)[..] + ).ok_or_else(|| error_desc) + } + )); + + // Generate a native call generator for each function of the given trait. + for fn_ in fns { + let params = extract_parameter_names_types_and_borrows(&fn_.decl)?; + let trait_fn_name = &fn_.ident; + let fn_name = generate_native_call_generator_fn_name(&fn_.ident); + let output = return_type_replace_block_with_node_block(fn_.decl.output.clone()); + let output_ty = return_type_extract_type(&output); + let output = quote!( ::std::result::Result<#output_ty, &'static str> ); + + // Every type that is using the `Block` generic parameter, we need to encode/decode, + // to make it compatible between the runtime/node. + let conversions = params + .iter() + .filter(|v| type_is_using_block(&v.1)) + .map(|(n, t, _)| { + let name_str = format!( + "Could not convert parameter `{}` between node and runtime!", + quote!(#n) + ); + quote!( + let #n: #t = convert_between_block_types(&#n, #name_str)?; + ) + }); + // Same as for the input types, we need to check if we also need to convert the output, + // before returning it. + let output_conversion = if return_type_is_using_block(&fn_.decl.output) { + quote!(convert_between_block_types( + &res, + "Could not convert return value from runtime to node!" + )) + } else { + quote!(Ok(res)) + }; + + let input_names = params.iter().map(|v| &v.0); + // If the type is using the block generic type, we will encode/decode it to make it + // compatible. To ensure that we forward it by ref/value, we use the value given by the + // the user. Otherwise if it is not using the block, we don't need to add anything. + let input_borrows = params.iter().map(|v| { + if type_is_using_block(&v.1) { + v.2.clone() + } else { + quote!() + } + }); + + // Replace all `Block` with `NodeBlock`, add `'a` lifetime to references and collect + // all the function inputs. + let fn_inputs = fn_ + .decl + .inputs + .iter() + .map(|v| fn_arg_replace_block_with_node_block(v.clone())) + .map(|v| match v { + FnArg::Captured(ref arg) => { + let mut arg = arg.clone(); + match arg.ty { + Type::Reference(ref mut r) => { + r.lifetime = Some(parse_quote!( 'a )); + } + _ => {} + } + FnArg::Captured(arg) + } + r => r.clone(), + }); + + let (impl_generics, ty_generics, where_clause) = decl.generics.split_for_impl(); + // We need to parse them again, to get an easy access to the actual parameters. + let impl_generics: Generics = parse_quote!(#impl_generics); + let impl_generics_params = impl_generics.params.iter().map(|p| { + match p { + GenericParam::Type(ref ty) => { + let mut ty = ty.clone(); + ty.bounds.push(parse_quote!( 'a )); + GenericParam::Type(ty) + } + // We should not see anything different than type params here. + r => r.clone(), + } + }); + + // Generate the generator function + result.push(quote!( + #[cfg(any(feature = "std", test))] + pub fn #fn_name< + 'a, ApiImpl: #trait_ #ty_generics, NodeBlock: #crate_::runtime_api::BlockT + #(, #impl_generics_params)* + >( + #( #fn_inputs ),* + ) -> impl FnOnce() -> #output + 'a #where_clause { + move || { + #( #conversions )* + let res = ApiImpl::#trait_fn_name(#( #input_borrows #input_names ),*); + #output_conversion + } + } + )); + } + + Ok(quote!( #( #result )* )) } /// Generate the declaration of the trait for the runtime. fn generate_runtime_decls(decls: &[ItemTrait]) -> TokenStream { - let mut result = Vec::new(); - - for decl in decls { - let mut decl = decl.clone(); - extend_generics_with_block(&mut decl.generics); - let mod_name = generate_runtime_mod_name_for_trait(&decl.ident); - let found_attributes = remove_supported_attributes(&mut decl.attrs); - let api_version = unwrap_or_error(get_api_version(&found_attributes).map(|v| { - generate_runtime_api_version(v as u32) - })); - let id = generate_runtime_api_id(&decl.ident.to_string()); - - // Remove methods that have the `changed_in` attribute as they are not required for the - // runtime anymore. - decl.items = decl.items.iter_mut().filter_map(|i| match i { - TraitItem::Method(ref mut method) => { - if remove_supported_attributes(&mut method.attrs).contains_key(CHANGED_IN_ATTRIBUTE) { - None - } else { - Some(TraitItem::Method(method.clone())) - } - } - r => Some(r.clone()), - }).collect(); - - let native_call_generators = unwrap_or_error(generate_native_call_generators(&decl)); - - result.push(quote!( - #[doc(hidden)] - #[allow(dead_code)] - pub mod #mod_name { - use super::*; - - #decl - - pub #api_version - - pub #id - - #native_call_generators - } - )); - } - - quote!( #( #result )* ) + let mut result = Vec::new(); + + for decl in decls { + let mut decl = decl.clone(); + extend_generics_with_block(&mut decl.generics); + let mod_name = generate_runtime_mod_name_for_trait(&decl.ident); + let found_attributes = remove_supported_attributes(&mut decl.attrs); + let api_version = unwrap_or_error( + get_api_version(&found_attributes).map(|v| generate_runtime_api_version(v as u32)), + ); + let id = generate_runtime_api_id(&decl.ident.to_string()); + + // Remove methods that have the `changed_in` attribute as they are not required for the + // runtime anymore. + decl.items = decl + .items + .iter_mut() + .filter_map(|i| match i { + TraitItem::Method(ref mut method) => { + if remove_supported_attributes(&mut method.attrs) + .contains_key(CHANGED_IN_ATTRIBUTE) + { + None + } else { + Some(TraitItem::Method(method.clone())) + } + } + r => Some(r.clone()), + }) + .collect(); + + let native_call_generators = unwrap_or_error(generate_native_call_generators(&decl)); + + result.push(quote!( + #[doc(hidden)] + #[allow(dead_code)] + pub mod #mod_name { + use super::*; + + #decl + + pub #api_version + + pub #id + + #native_call_generators + } + )); + } + + quote!( #( #result )* ) } /// Modify the given runtime api declaration to be usable on the client side. struct ToClientSideDecl<'a> { - block_id: &'a TokenStream, - crate_: &'a TokenStream, - found_attributes: &'a mut HashMap<&'static str, Attribute>, - /// Any error that we found while converting this declaration. - errors: &'a mut Vec, + block_id: &'a TokenStream, + crate_: &'a TokenStream, + found_attributes: &'a mut HashMap<&'static str, Attribute>, + /// Any error that we found while converting this declaration. + errors: &'a mut Vec, } impl<'a> ToClientSideDecl<'a> { - fn fold_item_trait_items(&mut self, items: Vec) -> Vec { - let mut result = Vec::new(); - - items.into_iter().for_each(|i| match i { - TraitItem::Method(method) => { - let (fn_decl, fn_impl, fn_decl_ctx) = self.fold_trait_item_method(method); - result.push(fn_decl.into()); - result.push(fn_decl_ctx.into()); - - if let Some(fn_impl) = fn_impl { - result.push(fn_impl.into()); - } - }, - r => result.push(r), - }); - - result - } - - fn fold_trait_item_method(&mut self, method: TraitItemMethod) - -> (TraitItemMethod, Option, TraitItemMethod) { - let crate_ = self.crate_; - let context_other = quote!( #crate_::runtime_api::ExecutionContext::Other ); - let fn_impl = self.create_method_runtime_api_impl(method.clone()); - let fn_decl = self.create_method_decl(method.clone(), context_other); - let fn_decl_ctx = self.create_method_decl_with_context(method); - - (fn_decl, fn_impl, fn_decl_ctx) - } - - fn create_method_decl_with_context(&mut self, method: TraitItemMethod) -> TraitItemMethod { - let crate_ = self.crate_; - let context_arg: syn::FnArg = parse_quote!( context: #crate_::runtime_api::ExecutionContext ); - let mut fn_decl_ctx = self.create_method_decl(method, quote!( context )); - fn_decl_ctx.sig.ident = Ident::new(&format!("{}_with_context", &fn_decl_ctx.sig.ident), Span::call_site()); - fn_decl_ctx.sig.decl.inputs.insert(2, context_arg); - - fn_decl_ctx - } - - /// Takes the given method and creates a `method_runtime_api_impl` method that will be - /// implemented in the runtime for the client side. - fn create_method_runtime_api_impl(&mut self, mut method: TraitItemMethod) -> Option { - if remove_supported_attributes(&mut method.attrs).contains_key(CHANGED_IN_ATTRIBUTE) { - return None; - } - - let fn_decl = &method.sig.decl; - let ret_type = return_type_extract_type(&fn_decl.output); - - // Get types and if the value is borrowed from all parameters. - // If there is an error, we push it as the block to the user. - let param_types = match extract_parameter_names_types_and_borrows(fn_decl) { - Ok(res) => res.into_iter().map(|v| { - let ty = v.1; - let borrow = v.2; - quote!( #borrow #ty ) - }).collect::>(), - Err(e) => { - self.errors.push(e.to_compile_error()); - Vec::new() - } - }; - let name = generate_method_runtime_api_impl_name(&method.sig.ident); - let block_id = self.block_id; - let crate_ = self.crate_; - - Some( - parse_quote!{ - #[doc(hidden)] - fn #name( - &self, - at: &#block_id, - context: #crate_::runtime_api::ExecutionContext, - params: Option<( #( #param_types ),* )>, - params_encoded: Vec, - ) -> #crate_::error::Result<#crate_::runtime_api::NativeOrEncoded<#ret_type>>; - } - ) - } - - /// Takes the method declared by the user and creates the declaration we require for the runtime - /// api client side. This method will call by default the `method_runtime_api_impl` for doing - /// the actual call into the runtime. - fn create_method_decl(&mut self, mut method: TraitItemMethod, context: TokenStream) -> TraitItemMethod { - let params = match extract_parameter_names_types_and_borrows(&method.sig.decl) { - Ok(res) => res.into_iter().map(|v| v.0).collect::>(), - Err(e) => { - self.errors.push(e.to_compile_error()); - Vec::new() - } - }; - let params2 = params.clone(); - let ret_type = return_type_extract_type(&method.sig.decl.output); - - method.sig.decl = fold_fn_decl_for_client_side( - method.sig.decl.clone(), - &self.block_id, - &self.crate_ - ); - let name_impl = generate_method_runtime_api_impl_name(&method.sig.ident); - let crate_ = self.crate_; - - let found_attributes = remove_supported_attributes(&mut method.attrs); - // If the method has a `changed_in` attribute, we need to alter the method name to - // `method_before_version_VERSION`. - let (native_handling, param_tuple) = match get_changed_in(&found_attributes) { - Ok(Some(version)) => { - // Make sure that the `changed_in` version is at least the current `api_version`. - if get_api_version(&self.found_attributes).ok() < Some(version) { - self.errors.push( - Error::new( - method.span(), - "`changed_in` version can not be greater than the `api_version`", - ).to_compile_error() - ); - } - - let ident = Ident::new( - &format!("{}_before_version_{}", method.sig.ident, version), - method.sig.ident.span() - ); - method.sig.ident = ident; - method.attrs.push(parse_quote!( #[deprecated] )); - - let panic = format!("Calling `{}` should not return a native value!", method.sig.ident); - (quote!( panic!(#panic) ), quote!( None )) - }, - Ok(None) => (quote!( Ok(n) ), quote!( Some(( #( #params2 ),* )) )), - Err(e) => { - self.errors.push(e.to_compile_error()); - (quote!( unimplemented!() ), quote!( None )) - } - }; - - let function_name = method.sig.ident.to_string(); - - // Generate the default implementation that calls the `method_runtime_api_impl` method. - method.default = Some( - parse_quote! { - { - let runtime_api_impl_params_encoded = - #crate_::runtime_api::Encode::encode(&( #( &#params ),* )); - - self.#name_impl(at, #context, #param_tuple, runtime_api_impl_params_encoded) - .and_then(|r| - match r { - #crate_::runtime_api::NativeOrEncoded::Native(n) => { - #native_handling - }, - #crate_::runtime_api::NativeOrEncoded::Encoded(r) => { - <#ret_type as #crate_::runtime_api::Decode>::decode(&mut &r[..]) - .ok_or_else(|| - #crate_::error::ErrorKind::CallResultDecode( - #function_name - ).into() - ) - } - } - ) - } - } - ); - - method - } + fn fold_item_trait_items(&mut self, items: Vec) -> Vec { + let mut result = Vec::new(); + + items.into_iter().for_each(|i| match i { + TraitItem::Method(method) => { + let (fn_decl, fn_impl, fn_decl_ctx) = self.fold_trait_item_method(method); + result.push(fn_decl.into()); + result.push(fn_decl_ctx.into()); + + if let Some(fn_impl) = fn_impl { + result.push(fn_impl.into()); + } + } + r => result.push(r), + }); + + result + } + + fn fold_trait_item_method( + &mut self, + method: TraitItemMethod, + ) -> (TraitItemMethod, Option, TraitItemMethod) { + let crate_ = self.crate_; + let context_other = quote!( #crate_::runtime_api::ExecutionContext::Other ); + let fn_impl = self.create_method_runtime_api_impl(method.clone()); + let fn_decl = self.create_method_decl(method.clone(), context_other); + let fn_decl_ctx = self.create_method_decl_with_context(method); + + (fn_decl, fn_impl, fn_decl_ctx) + } + + fn create_method_decl_with_context(&mut self, method: TraitItemMethod) -> TraitItemMethod { + let crate_ = self.crate_; + let context_arg: syn::FnArg = + parse_quote!( context: #crate_::runtime_api::ExecutionContext ); + let mut fn_decl_ctx = self.create_method_decl(method, quote!(context)); + fn_decl_ctx.sig.ident = Ident::new( + &format!("{}_with_context", &fn_decl_ctx.sig.ident), + Span::call_site(), + ); + fn_decl_ctx.sig.decl.inputs.insert(2, context_arg); + + fn_decl_ctx + } + + /// Takes the given method and creates a `method_runtime_api_impl` method that will be + /// implemented in the runtime for the client side. + fn create_method_runtime_api_impl( + &mut self, + mut method: TraitItemMethod, + ) -> Option { + if remove_supported_attributes(&mut method.attrs).contains_key(CHANGED_IN_ATTRIBUTE) { + return None; + } + + let fn_decl = &method.sig.decl; + let ret_type = return_type_extract_type(&fn_decl.output); + + // Get types and if the value is borrowed from all parameters. + // If there is an error, we push it as the block to the user. + let param_types = match extract_parameter_names_types_and_borrows(fn_decl) { + Ok(res) => res + .into_iter() + .map(|v| { + let ty = v.1; + let borrow = v.2; + quote!( #borrow #ty ) + }) + .collect::>(), + Err(e) => { + self.errors.push(e.to_compile_error()); + Vec::new() + } + }; + let name = generate_method_runtime_api_impl_name(&method.sig.ident); + let block_id = self.block_id; + let crate_ = self.crate_; + + Some(parse_quote! { + #[doc(hidden)] + fn #name( + &self, + at: &#block_id, + context: #crate_::runtime_api::ExecutionContext, + params: Option<( #( #param_types ),* )>, + params_encoded: Vec, + ) -> #crate_::error::Result<#crate_::runtime_api::NativeOrEncoded<#ret_type>>; + }) + } + + /// Takes the method declared by the user and creates the declaration we require for the runtime + /// api client side. This method will call by default the `method_runtime_api_impl` for doing + /// the actual call into the runtime. + fn create_method_decl( + &mut self, + mut method: TraitItemMethod, + context: TokenStream, + ) -> TraitItemMethod { + let params = match extract_parameter_names_types_and_borrows(&method.sig.decl) { + Ok(res) => res.into_iter().map(|v| v.0).collect::>(), + Err(e) => { + self.errors.push(e.to_compile_error()); + Vec::new() + } + }; + let params2 = params.clone(); + let ret_type = return_type_extract_type(&method.sig.decl.output); + + method.sig.decl = + fold_fn_decl_for_client_side(method.sig.decl.clone(), &self.block_id, &self.crate_); + let name_impl = generate_method_runtime_api_impl_name(&method.sig.ident); + let crate_ = self.crate_; + + let found_attributes = remove_supported_attributes(&mut method.attrs); + // If the method has a `changed_in` attribute, we need to alter the method name to + // `method_before_version_VERSION`. + let (native_handling, param_tuple) = match get_changed_in(&found_attributes) { + Ok(Some(version)) => { + // Make sure that the `changed_in` version is at least the current `api_version`. + if get_api_version(&self.found_attributes).ok() < Some(version) { + self.errors.push( + Error::new( + method.span(), + "`changed_in` version can not be greater than the `api_version`", + ) + .to_compile_error(), + ); + } + + let ident = Ident::new( + &format!("{}_before_version_{}", method.sig.ident, version), + method.sig.ident.span(), + ); + method.sig.ident = ident; + method.attrs.push(parse_quote!( #[deprecated] )); + + let panic = format!( + "Calling `{}` should not return a native value!", + method.sig.ident + ); + (quote!(panic!(#panic)), quote!(None)) + } + Ok(None) => (quote!(Ok(n)), quote!( Some(( #( #params2 ),* )) )), + Err(e) => { + self.errors.push(e.to_compile_error()); + (quote!(unimplemented!()), quote!(None)) + } + }; + + let function_name = method.sig.ident.to_string(); + + // Generate the default implementation that calls the `method_runtime_api_impl` method. + method.default = Some(parse_quote! { + { + let runtime_api_impl_params_encoded = + #crate_::runtime_api::Encode::encode(&( #( &#params ),* )); + + self.#name_impl(at, #context, #param_tuple, runtime_api_impl_params_encoded) + .and_then(|r| + match r { + #crate_::runtime_api::NativeOrEncoded::Native(n) => { + #native_handling + }, + #crate_::runtime_api::NativeOrEncoded::Encoded(r) => { + <#ret_type as #crate_::runtime_api::Decode>::decode(&mut &r[..]) + .ok_or_else(|| + #crate_::error::ErrorKind::CallResultDecode( + #function_name + ).into() + ) + } + } + ) + } + }); + + method + } } impl<'a> Fold for ToClientSideDecl<'a> { - fn fold_item_trait(&mut self, mut input: ItemTrait) -> ItemTrait { - extend_generics_with_block(&mut input.generics); - - *self.found_attributes = remove_supported_attributes(&mut input.attrs); - // Check if this is the `Core` runtime api trait. - let is_core_trait = self.found_attributes.contains_key(CORE_TRAIT_ATTRIBUTE); - let block_ident = Ident::new(BLOCK_GENERIC_IDENT, Span::call_site()); - - if is_core_trait { - // Add all the supertraits we want to have for `Core`. - let crate_ = &self.crate_; - input.supertraits = parse_quote!( - 'static - + Send - + Sync - + #crate_::runtime_api::ApiExt<#block_ident> - ); - } else { - // Add the `Core` runtime api as super trait. - let crate_ = &self.crate_; - input.supertraits.push(parse_quote!( #crate_::runtime_api::Core<#block_ident> )); - } - - // The client side trait is only required when compiling with the feature `std` or `test`. - input.attrs.push(parse_quote!( #[cfg(any(feature = "std", test))] )); - input.items = self.fold_item_trait_items(input.items); - - fold::fold_item_trait(self, input) - } + fn fold_item_trait(&mut self, mut input: ItemTrait) -> ItemTrait { + extend_generics_with_block(&mut input.generics); + + *self.found_attributes = remove_supported_attributes(&mut input.attrs); + // Check if this is the `Core` runtime api trait. + let is_core_trait = self.found_attributes.contains_key(CORE_TRAIT_ATTRIBUTE); + let block_ident = Ident::new(BLOCK_GENERIC_IDENT, Span::call_site()); + + if is_core_trait { + // Add all the supertraits we want to have for `Core`. + let crate_ = &self.crate_; + input.supertraits = parse_quote!( + 'static + + Send + + Sync + + #crate_::runtime_api::ApiExt<#block_ident> + ); + } else { + // Add the `Core` runtime api as super trait. + let crate_ = &self.crate_; + input + .supertraits + .push(parse_quote!( #crate_::runtime_api::Core<#block_ident> )); + } + + // The client side trait is only required when compiling with the feature `std` or `test`. + input + .attrs + .push(parse_quote!( #[cfg(any(feature = "std", test))] )); + input.items = self.fold_item_trait_items(input.items); + + fold::fold_item_trait(self, input) + } } /// Parse the given attribute as `API_VERSION_ATTRIBUTE`. fn parse_runtime_api_version(version: &Attribute) -> Result { - let meta = version.parse_meta()?; - - let err = Err(Error::new( - meta.span(), - &format!( - "Unexpected `{api_version}` attribute. The supported format is `{api_version}(1)`", - api_version = API_VERSION_ATTRIBUTE - ) - ) - ); - - match meta { - Meta::List(list) => { - if list.nested.len() > 1 && list.nested.is_empty() { - err - } else { - match list.nested.first().as_ref().map(|v| v.value()) { - Some(NestedMeta::Literal(Lit::Int(i))) => { - Ok(i.value()) - }, - _ => err, - } - } - }, - _ => err, - } + let meta = version.parse_meta()?; + + let err = Err(Error::new( + meta.span(), + &format!( + "Unexpected `{api_version}` attribute. The supported format is `{api_version}(1)`", + api_version = API_VERSION_ATTRIBUTE + ), + )); + + match meta { + Meta::List(list) => { + if list.nested.len() > 1 && list.nested.is_empty() { + err + } else { + match list.nested.first().as_ref().map(|v| v.value()) { + Some(NestedMeta::Literal(Lit::Int(i))) => Ok(i.value()), + _ => err, + } + } + } + _ => err, + } } /// Generates the identifier as const variable for the given `trait_name` /// by hashing the `trait_name`. fn generate_runtime_api_id(trait_name: &str) -> TokenStream { - let mut res = [0; 8]; - res.copy_from_slice(blake2_rfc::blake2b::blake2b(8, &[], trait_name.as_bytes()).as_bytes()); + let mut res = [0; 8]; + res.copy_from_slice(blake2_rfc::blake2b::blake2b(8, &[], trait_name.as_bytes()).as_bytes()); - quote!( const ID: [u8; 8] = [ #( #res ),* ]; ) + quote!( const ID: [u8; 8] = [ #( #res ),* ]; ) } /// Generates the const variable that holds the runtime api version. fn generate_runtime_api_version(version: u32) -> TokenStream { - quote!( const VERSION: u32 = #version; ) + quote!( const VERSION: u32 = #version; ) } /// Generates the implementation of `RuntimeApiInfo` for the given trait. fn generate_runtime_info_impl(trait_: &ItemTrait, version: u64) -> TokenStream { - let trait_name = &trait_.ident; - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - let id = generate_runtime_api_id(&trait_name.to_string()); - let version = generate_runtime_api_version(version as u32); - let (impl_generics, ty_generics, where_clause) = trait_.generics.split_for_impl(); - - quote!( - #[cfg(any(feature = "std", test))] - impl #impl_generics #crate_::runtime_api::RuntimeApiInfo - for #trait_name #ty_generics #where_clause - { - #id - #version - } - ) + let trait_name = &trait_.ident; + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + let id = generate_runtime_api_id(&trait_name.to_string()); + let version = generate_runtime_api_version(version as u32); + let (impl_generics, ty_generics, where_clause) = trait_.generics.split_for_impl(); + + quote!( + #[cfg(any(feature = "std", test))] + impl #impl_generics #crate_::runtime_api::RuntimeApiInfo + for #trait_name #ty_generics #where_clause + { + #id + #version + } + ) } /// Get changed in version from the user given attribute or `Ok(None)`, if no attribute was given. fn get_changed_in(found_attributes: &HashMap<&'static str, Attribute>) -> Result> { - found_attributes.get(&CHANGED_IN_ATTRIBUTE) - .map(|v| parse_runtime_api_version(v).map(Some)) - .unwrap_or(Ok(None)) + found_attributes + .get(&CHANGED_IN_ATTRIBUTE) + .map(|v| parse_runtime_api_version(v).map(Some)) + .unwrap_or(Ok(None)) } /// Get the api version from the user given attribute or `Ok(1)`, if no attribute was given. fn get_api_version(found_attributes: &HashMap<&'static str, Attribute>) -> Result { - found_attributes.get(&API_VERSION_ATTRIBUTE).map(parse_runtime_api_version).unwrap_or(Ok(1)) + found_attributes + .get(&API_VERSION_ATTRIBUTE) + .map(parse_runtime_api_version) + .unwrap_or(Ok(1)) } /// Generate the declaration of the trait for the client side. fn generate_client_side_decls(decls: &[ItemTrait]) -> TokenStream { - let mut result = Vec::new(); + let mut result = Vec::new(); - for decl in decls { - let decl = decl.clone(); + for decl in decls { + let decl = decl.clone(); - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - let block_id = quote!( #crate_::runtime_api::BlockId ); - let mut found_attributes = HashMap::new(); - let mut errors = Vec::new(); + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + let block_id = quote!( #crate_::runtime_api::BlockId ); + let mut found_attributes = HashMap::new(); + let mut errors = Vec::new(); - let decl = { - let mut to_client_side = ToClientSideDecl { - crate_: &crate_, - block_id: &block_id, - found_attributes: &mut found_attributes, - errors: &mut errors, - }; - to_client_side.fold_item_trait(decl) - }; + let decl = { + let mut to_client_side = ToClientSideDecl { + crate_: &crate_, + block_id: &block_id, + found_attributes: &mut found_attributes, + errors: &mut errors, + }; + to_client_side.fold_item_trait(decl) + }; - let api_version = get_api_version(&found_attributes); + let api_version = get_api_version(&found_attributes); - let runtime_info = unwrap_or_error( - api_version.map(|v| generate_runtime_info_impl(&decl, v)) - ); + let runtime_info = + unwrap_or_error(api_version.map(|v| generate_runtime_info_impl(&decl, v))); - result.push(quote!( #decl #runtime_info #( #errors )* )); - } + result.push(quote!( #decl #runtime_info #( #errors )* )); + } - quote!( #( #result )* ) + quote!( #( #result )* ) } /// Checks that a trait declaration is in the format we expect. struct CheckTraitDecl { - errors: Vec, + errors: Vec, } impl<'ast> Visit<'ast> for CheckTraitDecl { - fn visit_fn_arg(&mut self, input: &'ast FnArg) { - match input { - FnArg::Captured(ref arg) => { - match arg.pat { - Pat::Ident(ref pat) if pat.ident == "at" => { - self.errors.push( - Error::new( - pat.span(), - "`decl_runtime_apis!` adds automatically a parameter \ - `at: &BlockId`. Please rename/remove your parameter." - ) - ) - }, - _ => {} - } - }, - FnArg::SelfRef(_) | FnArg::SelfValue(_) => { - self.errors.push(Error::new(input.span(), "Self values are not supported.")) - } - _ => { - self.errors.push( - Error::new( - input.span(), - "Only function arguments in the form `pat: type` are supported." - ) - ) - } - } - - visit::visit_fn_arg(self, input); - } - - fn visit_generic_param(&mut self, input: &'ast GenericParam) { - match input { - GenericParam::Type(ty) if &ty.ident == BLOCK_GENERIC_IDENT => { - self.errors.push( - Error::new( - input.span(), - "`Block: BlockT` generic parameter will be added automatically by the \ - `decl_runtime_apis!` macro!" - ) - ) - }, - _ => {} - } - - visit::visit_generic_param(self, input); - } - - fn visit_trait_bound(&mut self, input: &'ast TraitBound) { - if let Some(last_ident) = input.path.segments.last().map(|v| &v.value().ident) { - if last_ident == "BlockT" || last_ident == BLOCK_GENERIC_IDENT { - self.errors.push( - Error::new( - input.span(), - "`Block: BlockT` generic parameter will be added automatically by the \ - `decl_runtime_apis!` macro! If you try to use a different trait than the \ - substrate `Block` trait, please rename it locally." - ) - ) - } - } - - visit::visit_trait_bound(self, input) - } + fn visit_fn_arg(&mut self, input: &'ast FnArg) { + match input { + FnArg::Captured(ref arg) => match arg.pat { + Pat::Ident(ref pat) if pat.ident == "at" => self.errors.push(Error::new( + pat.span(), + "`decl_runtime_apis!` adds automatically a parameter \ + `at: &BlockId`. Please rename/remove your parameter.", + )), + _ => {} + }, + FnArg::SelfRef(_) | FnArg::SelfValue(_) => self + .errors + .push(Error::new(input.span(), "Self values are not supported.")), + _ => self.errors.push(Error::new( + input.span(), + "Only function arguments in the form `pat: type` are supported.", + )), + } + + visit::visit_fn_arg(self, input); + } + + fn visit_generic_param(&mut self, input: &'ast GenericParam) { + match input { + GenericParam::Type(ty) if &ty.ident == BLOCK_GENERIC_IDENT => { + self.errors.push(Error::new( + input.span(), + "`Block: BlockT` generic parameter will be added automatically by the \ + `decl_runtime_apis!` macro!", + )) + } + _ => {} + } + + visit::visit_generic_param(self, input); + } + + fn visit_trait_bound(&mut self, input: &'ast TraitBound) { + if let Some(last_ident) = input.path.segments.last().map(|v| &v.value().ident) { + if last_ident == "BlockT" || last_ident == BLOCK_GENERIC_IDENT { + self.errors.push(Error::new( + input.span(), + "`Block: BlockT` generic parameter will be added automatically by the \ + `decl_runtime_apis!` macro! If you try to use a different trait than the \ + substrate `Block` trait, please rename it locally.", + )) + } + } + + visit::visit_trait_bound(self, input) + } } /// Check that the trait declarations are in the format we expect. fn check_trait_decls(decls: &[ItemTrait]) -> Option { - let mut checker = CheckTraitDecl { errors: Vec::new() }; - decls.iter().for_each(|decl| visit::visit_item_trait(&mut checker, &decl)); - - if checker.errors.is_empty() { - None - } else { - let errors = checker.errors.into_iter().map(|e| e.to_compile_error()); - Some(quote!( #( #errors )* )) - } + let mut checker = CheckTraitDecl { errors: Vec::new() }; + decls + .iter() + .for_each(|decl| visit::visit_item_trait(&mut checker, &decl)); + + if checker.errors.is_empty() { + None + } else { + let errors = checker.errors.into_iter().map(|e| e.to_compile_error()); + Some(quote!( #( #errors )* )) + } } /// The implementation of the `decl_runtime_apis!` macro. pub fn decl_runtime_apis_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - // Parse all trait declarations - let RuntimeApiDecls { decls: api_decls } = parse_macro_input!(input as RuntimeApiDecls); + // Parse all trait declarations + let RuntimeApiDecls { decls: api_decls } = parse_macro_input!(input as RuntimeApiDecls); - if let Some(errors) = check_trait_decls(&api_decls) { - return errors.into(); - } + if let Some(errors) = check_trait_decls(&api_decls) { + return errors.into(); + } - let hidden_includes = generate_hidden_includes(HIDDEN_INCLUDES_ID); - let runtime_decls = generate_runtime_decls(&api_decls); - let client_side_decls = generate_client_side_decls(&api_decls); + let hidden_includes = generate_hidden_includes(HIDDEN_INCLUDES_ID); + let runtime_decls = generate_runtime_decls(&api_decls); + let client_side_decls = generate_client_side_decls(&api_decls); - quote!( - #hidden_includes + quote!( + #hidden_includes - #runtime_decls + #runtime_decls - #client_side_decls - ).into() + #client_side_decls + ) + .into() } diff --git a/core/sr-api-macros/src/impl_runtime_apis.rs b/core/sr-api-macros/src/impl_runtime_apis.rs index b5dd3f21c5..5ad1d4bfe7 100644 --- a/core/sr-api-macros/src/impl_runtime_apis.rs +++ b/core/sr-api-macros/src/impl_runtime_apis.rs @@ -15,10 +15,9 @@ // along with Substrate. If not, see . use crate::utils::{ - unwrap_or_error, generate_crate_access, generate_hidden_includes, - generate_runtime_mod_name_for_trait, generate_method_runtime_api_impl_name, - extract_parameter_names_types_and_borrows, generate_native_call_generator_fn_name, - return_type_extract_type + extract_parameter_names_types_and_borrows, generate_crate_access, generate_hidden_includes, + generate_method_runtime_api_impl_name, generate_native_call_generator_fn_name, + generate_runtime_mod_name_for_trait, return_type_extract_type, unwrap_or_error, }; use proc_macro2::{Span, TokenStream}; @@ -26,9 +25,11 @@ use proc_macro2::{Span, TokenStream}; use quote::quote; use syn::{ - spanned::Spanned, parse_macro_input, Ident, Type, ItemImpl, MethodSig, Path, - ImplItem, parse::{Parse, ParseStream, Result, Error}, PathArguments, GenericArgument, TypePath, - fold::{self, Fold}, parse_quote + fold::{self, Fold}, + parse::{Error, Parse, ParseStream, Result}, + parse_macro_input, parse_quote, + spanned::Spanned, + GenericArgument, Ident, ImplItem, ItemImpl, MethodSig, Path, PathArguments, Type, TypePath, }; use std::{collections::HashSet, iter}; @@ -38,609 +39,623 @@ const HIDDEN_INCLUDES_ID: &str = "IMPL_RUNTIME_APIS"; /// The structure used for parsing the runtime api implementations. struct RuntimeApiImpls { - impls: Vec, + impls: Vec, } impl Parse for RuntimeApiImpls { - fn parse(input: ParseStream) -> Result { - let mut impls = Vec::new(); + fn parse(input: ParseStream) -> Result { + let mut impls = Vec::new(); - while !input.is_empty() { - impls.push(ItemImpl::parse(input)?); - } + while !input.is_empty() { + impls.push(ItemImpl::parse(input)?); + } - Ok(Self { impls }) - } + Ok(Self { impls }) + } } /// Generates the call to the implementation of the requested function. /// The generated code includes decoding of the input arguments and encoding of the output. fn generate_impl_call( - signature: &MethodSig, - runtime: &Type, - input: &Ident, - impl_trait: &Path + signature: &MethodSig, + runtime: &Type, + input: &Ident, + impl_trait: &Path, ) -> Result { - let params = extract_parameter_names_types_and_borrows(&signature.decl)?; - - let c = generate_crate_access(HIDDEN_INCLUDES_ID); - let c_iter = iter::repeat(&c); - let fn_name = &signature.ident; - let fn_name_str = iter::repeat(fn_name.to_string()); - let input = iter::repeat(input); - let pnames = params.iter().map(|v| &v.0); - let pnames2 = params.iter().map(|v| &v.0); - let ptypes = params.iter().map(|v| &v.1); - let pborrow = params.iter().map(|v| &v.2); - - Ok( - quote!( - #( - let #pnames : #ptypes = match #c_iter::runtime_api::Decode::decode(&mut #input) { - Some(input) => input, - None => panic!("Bad input data provided to {}", #fn_name_str), - }; - )* - - let output = <#runtime as #impl_trait>::#fn_name(#( #pborrow #pnames2 ),*); - #c::runtime_api::Encode::encode(&output) - ).into() - ) + let params = extract_parameter_names_types_and_borrows(&signature.decl)?; + + let c = generate_crate_access(HIDDEN_INCLUDES_ID); + let c_iter = iter::repeat(&c); + let fn_name = &signature.ident; + let fn_name_str = iter::repeat(fn_name.to_string()); + let input = iter::repeat(input); + let pnames = params.iter().map(|v| &v.0); + let pnames2 = params.iter().map(|v| &v.0); + let ptypes = params.iter().map(|v| &v.1); + let pborrow = params.iter().map(|v| &v.2); + + Ok(quote!( + #( + let #pnames : #ptypes = match #c_iter::runtime_api::Decode::decode(&mut #input) { + Some(input) => input, + None => panic!("Bad input data provided to {}", #fn_name_str), + }; + )* + + let output = <#runtime as #impl_trait>::#fn_name(#( #pborrow #pnames2 ),*); + #c::runtime_api::Encode::encode(&output) + ) + .into()) } /// Extract the trait that is implemented in the given `ItemImpl`. fn extract_impl_trait<'a>(impl_: &'a ItemImpl) -> Result<&'a Path> { - impl_.trait_.as_ref().map(|v| &v.1).ok_or_else( - || Error::new(impl_.span(), "Only implementation of traits are supported!") - ).and_then(|p| { - if p.segments.len() > 1 { - Ok(p) - } else { - Err( - Error::new( - p.span(), - "The implemented trait has to be referenced with a path, \ - e.g. `impl client::Core for Runtime`." - ) - ) - } - }) + impl_ + .trait_ + .as_ref() + .map(|v| &v.1) + .ok_or_else(|| Error::new(impl_.span(), "Only implementation of traits are supported!")) + .and_then(|p| { + if p.segments.len() > 1 { + Ok(p) + } else { + Err(Error::new( + p.span(), + "The implemented trait has to be referenced with a path, \ + e.g. `impl client::Core for Runtime`.", + )) + } + }) } /// Extracts the runtime block identifier. fn extract_runtime_block_ident(trait_: &Path) -> Result<&TypePath> { - let span = trait_.span(); - let segment = trait_ - .segments - .last() - .ok_or_else( - || Error::new(span, "Empty path not supported") - )?; - let generics = segment.value(); - - match &generics.arguments { - PathArguments::AngleBracketed(ref args) => { - args.args.first().and_then(|v| match v.value() { - GenericArgument::Type(Type::Path(block)) => Some(block), - _ => None - }).ok_or_else(|| Error::new(args.span(), "Missing `Block` generic parameter.")) - }, - PathArguments::None => { - let span = trait_.segments.last().as_ref().unwrap().value().span(); - Err(Error::new(span, "Missing `Block` generic parameter.")) - }, - PathArguments::Parenthesized(_) => { - Err(Error::new(generics.arguments.span(), "Unexpected parentheses in path!")) - } - } + let span = trait_.span(); + let segment = trait_ + .segments + .last() + .ok_or_else(|| Error::new(span, "Empty path not supported"))?; + let generics = segment.value(); + + match &generics.arguments { + PathArguments::AngleBracketed(ref args) => args + .args + .first() + .and_then(|v| match v.value() { + GenericArgument::Type(Type::Path(block)) => Some(block), + _ => None, + }) + .ok_or_else(|| Error::new(args.span(), "Missing `Block` generic parameter.")), + PathArguments::None => { + let span = trait_.segments.last().as_ref().unwrap().value().span(); + Err(Error::new(span, "Missing `Block` generic parameter.")) + } + PathArguments::Parenthesized(_) => Err(Error::new( + generics.arguments.span(), + "Unexpected parentheses in path!", + )), + } } /// Generate all the implementation calls for the given functions. fn generate_impl_calls( - impls: &[ItemImpl], - input: &Ident + impls: &[ItemImpl], + input: &Ident, ) -> Result> { - let mut impl_calls = Vec::new(); - - for impl_ in impls { - let impl_trait_path = extract_impl_trait(impl_)?; - let impl_trait = extend_with_runtime_decl_path(impl_trait_path.clone()); - let impl_trait_ident = &impl_trait_path - .segments - .last() - .ok_or_else(|| Error::new(impl_trait_path.span(), "Empty trait path not possible!"))? - .value() - .ident; - - for item in &impl_.items { - match item { - ImplItem::Method(method) => { - let impl_call = generate_impl_call( - &method.sig, - &impl_.self_ty, - input, - &impl_trait - )?; - - impl_calls.push( - (impl_trait_ident.clone(), method.sig.ident.clone(), impl_call) - ); - }, - _ => {}, - } - } - } - - Ok(impl_calls) + let mut impl_calls = Vec::new(); + + for impl_ in impls { + let impl_trait_path = extract_impl_trait(impl_)?; + let impl_trait = extend_with_runtime_decl_path(impl_trait_path.clone()); + let impl_trait_ident = &impl_trait_path + .segments + .last() + .ok_or_else(|| Error::new(impl_trait_path.span(), "Empty trait path not possible!"))? + .value() + .ident; + + for item in &impl_.items { + match item { + ImplItem::Method(method) => { + let impl_call = + generate_impl_call(&method.sig, &impl_.self_ty, input, &impl_trait)?; + + impl_calls.push(( + impl_trait_ident.clone(), + method.sig.ident.clone(), + impl_call, + )); + } + _ => {} + } + } + } + + Ok(impl_calls) } fn prefix_function_with_trait(trait_: &Ident, function: &Ident) -> String { - format!("{}_{}", trait_.to_string(), function.to_string()) + format!("{}_{}", trait_.to_string(), function.to_string()) } /// Generate the dispatch function that is used in native to call into the runtime. fn generate_dispatch_function(impls: &[ItemImpl]) -> Result { - let data = Ident::new("data", Span::call_site()); - let impl_calls = generate_impl_calls(impls, &data)? - .into_iter() - .map(|(trait_, fn_name, impl_)| { - let name = prefix_function_with_trait(&trait_, &fn_name); - quote!( #name => Some({ #impl_ }), ) - }); - - Ok(quote!( - #[cfg(feature = "std")] - pub fn dispatch(method: &str, mut #data: &[u8]) -> Option> { - match method { - #( #impl_calls )* - _ => None, - } - } - ).into()) + let data = Ident::new("data", Span::call_site()); + let impl_calls = + generate_impl_calls(impls, &data)? + .into_iter() + .map(|(trait_, fn_name, impl_)| { + let name = prefix_function_with_trait(&trait_, &fn_name); + quote!( #name => Some({ #impl_ }), ) + }); + + Ok(quote!( + #[cfg(feature = "std")] + pub fn dispatch(method: &str, mut #data: &[u8]) -> Option> { + match method { + #( #impl_calls )* + _ => None, + } + } + ) + .into()) } /// Generate the interface functions that are used to call into the runtime in wasm. fn generate_wasm_interface(impls: &[ItemImpl]) -> Result { - let input = Ident::new("input", Span::call_site()); - let c = generate_crate_access(HIDDEN_INCLUDES_ID); - let impl_calls = generate_impl_calls(impls, &input)? - .into_iter() - .map(|(trait_, fn_name, impl_)| { - let fn_name = Ident::new( - &prefix_function_with_trait(&trait_, &fn_name), - Span::call_site() - ); - - quote!( - #[cfg(not(feature = "std"))] - #[no_mangle] - pub fn #fn_name(input_data: *mut u8, input_len: usize) -> u64 { - let mut #input = if input_len == 0 { - &[0u8; 0] - } else { - unsafe { - #c::runtime_api::slice::from_raw_parts(input_data, input_len) - } - }; - - let output = { #impl_ }; - let res = output.as_ptr() as u64 + ((output.len() as u64) << 32); - - // Leak the output vector to avoid it being freed. - // This is fine in a WASM context since the heap - // will be discarded after the call. - #c::runtime_api::mem::forget(output); - res - } - ) - }); - - Ok(quote!( #( #impl_calls )* )) + let input = Ident::new("input", Span::call_site()); + let c = generate_crate_access(HIDDEN_INCLUDES_ID); + let impl_calls = + generate_impl_calls(impls, &input)? + .into_iter() + .map(|(trait_, fn_name, impl_)| { + let fn_name = Ident::new( + &prefix_function_with_trait(&trait_, &fn_name), + Span::call_site(), + ); + + quote!( + #[cfg(not(feature = "std"))] + #[no_mangle] + pub fn #fn_name(input_data: *mut u8, input_len: usize) -> u64 { + let mut #input = if input_len == 0 { + &[0u8; 0] + } else { + unsafe { + #c::runtime_api::slice::from_raw_parts(input_data, input_len) + } + }; + + let output = { #impl_ }; + let res = output.as_ptr() as u64 + ((output.len() as u64) << 32); + + // Leak the output vector to avoid it being freed. + // This is fine in a WASM context since the heap + // will be discarded after the call. + #c::runtime_api::mem::forget(output); + res + } + ) + }); + + Ok(quote!( #( #impl_calls )* )) } fn generate_block_and_block_id_ty( - runtime: &Type, - trait_: &'static str, - assoc_type: &'static str, + runtime: &Type, + trait_: &'static str, + assoc_type: &'static str, ) -> (TokenStream, TokenStream) { - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - let trait_ = Ident::new(trait_, Span::call_site()); - let assoc_type = Ident::new(assoc_type, Span::call_site()); + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + let trait_ = Ident::new(trait_, Span::call_site()); + let assoc_type = Ident::new(assoc_type, Span::call_site()); - let block = quote!( <#runtime as #crate_::runtime_api::#trait_>::#assoc_type ); - let block_id = quote!( #crate_::runtime_api::BlockId<#block> ); + let block = quote!( <#runtime as #crate_::runtime_api::#trait_>::#assoc_type ); + let block_id = quote!( #crate_::runtime_api::BlockId<#block> ); - (block, block_id) + (block, block_id) } fn generate_node_block_and_block_id_ty(runtime: &Type) -> (TokenStream, TokenStream) { - generate_block_and_block_id_ty(runtime, "GetNodeBlockType", "NodeBlock") + generate_block_and_block_id_ty(runtime, "GetNodeBlockType", "NodeBlock") } fn generate_runtime_api_base_structures(impls: &[ItemImpl]) -> Result { - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - let runtime = &impls.get(0).ok_or_else(|| - Error::new(Span::call_site(), "No api implementation given!") - )?.self_ty; - let (block, block_id) = generate_node_block_and_block_id_ty(runtime); - - Ok(quote!( - pub struct RuntimeApi {} - /// Implements all runtime apis for the client side. - #[cfg(any(feature = "std", test))] - pub struct RuntimeApiImpl + 'static> { - call: &'static C, - commit_on_success: ::std::cell::RefCell, - initialized_block: ::std::cell::RefCell>, - changes: ::std::cell::RefCell<#crate_::runtime_api::OverlayedChanges>, - } - - // `RuntimeApi` itself is not threadsafe. However, an instance is only available in a - // `ApiRef` object and `ApiRef` also has an associated lifetime. This lifetimes makes it - // impossible to move `RuntimeApi` into another thread. - #[cfg(any(feature = "std", test))] - unsafe impl> Send for RuntimeApiImpl {} - #[cfg(any(feature = "std", test))] - unsafe impl> Sync for RuntimeApiImpl {} - - #[cfg(any(feature = "std", test))] - impl> #crate_::runtime_api::ApiExt<#block> - for RuntimeApiImpl - { - fn map_api_result ::std::result::Result, R, E>( - &self, - map_call: F - ) -> ::std::result::Result where Self: Sized { - *self.commit_on_success.borrow_mut() = false; - let res = map_call(self); - *self.commit_on_success.borrow_mut() = true; - - self.commit_on_ok(&res); - - res - } - - fn runtime_version_at( - &self, - at: &#block_id - ) -> #crate_::error::Result<#crate_::runtime_api::RuntimeVersion> { - self.call.runtime_version_at(at) - } - } - - #[cfg(any(feature = "std", test))] - impl + 'static> - #crate_::runtime_api::ConstructRuntimeApi<#block, C> for RuntimeApi - { - type RuntimeApi = RuntimeApiImpl; - - fn construct_runtime_api<'a>( - call: &'a C, - ) -> #crate_::runtime_api::ApiRef<'a, Self::RuntimeApi> { - RuntimeApiImpl { - call: unsafe { ::std::mem::transmute(call) }, - commit_on_success: true.into(), - initialized_block: None.into(), - changes: Default::default(), - }.into() - } - } - - #[cfg(any(feature = "std", test))] - impl> RuntimeApiImpl { - fn call_api_at< - R: #crate_::runtime_api::Encode + #crate_::runtime_api::Decode + PartialEq, - NC: FnOnce() -> ::std::result::Result + ::std::panic::UnwindSafe, - >( - &self, - at: &#block_id, - function: &'static str, - args: Vec, - native_call: Option, - context: #crate_::runtime_api::ExecutionContext - ) -> #crate_::error::Result<#crate_::runtime_api::NativeOrEncoded> { - let res = unsafe { - self.call.call_api_at( - at, - function, - args, - &mut *self.changes.borrow_mut(), - &mut *self.initialized_block.borrow_mut(), - native_call, - context - ) - }; - - self.commit_on_ok(&res); - res - } - - fn commit_on_ok(&self, res: &::std::result::Result) { - if *self.commit_on_success.borrow() { - if res.is_err() { - self.changes.borrow_mut().discard_prospective(); - } else { - self.changes.borrow_mut().commit_prospective(); - } - } - } - } - )) + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + let runtime = &impls + .get(0) + .ok_or_else(|| Error::new(Span::call_site(), "No api implementation given!"))? + .self_ty; + let (block, block_id) = generate_node_block_and_block_id_ty(runtime); + + Ok(quote!( + pub struct RuntimeApi {} + /// Implements all runtime apis for the client side. + #[cfg(any(feature = "std", test))] + pub struct RuntimeApiImpl + 'static> { + call: &'static C, + commit_on_success: ::std::cell::RefCell, + initialized_block: ::std::cell::RefCell>, + changes: ::std::cell::RefCell<#crate_::runtime_api::OverlayedChanges>, + } + + // `RuntimeApi` itself is not threadsafe. However, an instance is only available in a + // `ApiRef` object and `ApiRef` also has an associated lifetime. This lifetimes makes it + // impossible to move `RuntimeApi` into another thread. + #[cfg(any(feature = "std", test))] + unsafe impl> Send for RuntimeApiImpl {} + #[cfg(any(feature = "std", test))] + unsafe impl> Sync for RuntimeApiImpl {} + + #[cfg(any(feature = "std", test))] + impl> #crate_::runtime_api::ApiExt<#block> + for RuntimeApiImpl + { + fn map_api_result ::std::result::Result, R, E>( + &self, + map_call: F + ) -> ::std::result::Result where Self: Sized { + *self.commit_on_success.borrow_mut() = false; + let res = map_call(self); + *self.commit_on_success.borrow_mut() = true; + + self.commit_on_ok(&res); + + res + } + + fn runtime_version_at( + &self, + at: &#block_id + ) -> #crate_::error::Result<#crate_::runtime_api::RuntimeVersion> { + self.call.runtime_version_at(at) + } + } + + #[cfg(any(feature = "std", test))] + impl + 'static> + #crate_::runtime_api::ConstructRuntimeApi<#block, C> for RuntimeApi + { + type RuntimeApi = RuntimeApiImpl; + + fn construct_runtime_api<'a>( + call: &'a C, + ) -> #crate_::runtime_api::ApiRef<'a, Self::RuntimeApi> { + RuntimeApiImpl { + call: unsafe { ::std::mem::transmute(call) }, + commit_on_success: true.into(), + initialized_block: None.into(), + changes: Default::default(), + }.into() + } + } + + #[cfg(any(feature = "std", test))] + impl> RuntimeApiImpl { + fn call_api_at< + R: #crate_::runtime_api::Encode + #crate_::runtime_api::Decode + PartialEq, + NC: FnOnce() -> ::std::result::Result + ::std::panic::UnwindSafe, + >( + &self, + at: &#block_id, + function: &'static str, + args: Vec, + native_call: Option, + context: #crate_::runtime_api::ExecutionContext + ) -> #crate_::error::Result<#crate_::runtime_api::NativeOrEncoded> { + let res = unsafe { + self.call.call_api_at( + at, + function, + args, + &mut *self.changes.borrow_mut(), + &mut *self.initialized_block.borrow_mut(), + native_call, + context + ) + }; + + self.commit_on_ok(&res); + res + } + + fn commit_on_ok(&self, res: &::std::result::Result) { + if *self.commit_on_success.borrow() { + if res.is_err() { + self.changes.borrow_mut().discard_prospective(); + } else { + self.changes.borrow_mut().commit_prospective(); + } + } + } + } + )) } /// Extend the given trait path with module that contains the declaration of the trait for the /// runtime. fn extend_with_runtime_decl_path(mut trait_: Path) -> Path { - let runtime = { - let trait_name = &trait_ - .segments - .last() - .as_ref() - .expect("Trait path should always contain at least one item; qed") - .value() - .ident; - - generate_runtime_mod_name_for_trait(trait_name) - }; - - let pos = trait_.segments.len() - 1; - trait_.segments.insert(pos, runtime.clone().into()); - trait_ + let runtime = { + let trait_name = &trait_ + .segments + .last() + .as_ref() + .expect("Trait path should always contain at least one item; qed") + .value() + .ident; + + generate_runtime_mod_name_for_trait(trait_name) + }; + + let pos = trait_.segments.len() - 1; + trait_.segments.insert(pos, runtime.clone().into()); + trait_ } /// Generates the implementations of the apis for the runtime. fn generate_api_impl_for_runtime(impls: &[ItemImpl]) -> Result { - let mut impls_prepared = Vec::new(); + let mut impls_prepared = Vec::new(); - // We put `runtime` before each trait to get the trait that is intended for the runtime and - // we put the `RuntimeBlock` as first argument for the trait generics. - for impl_ in impls.iter() { - let mut impl_ = impl_.clone(); - let trait_ = extract_impl_trait(&impl_)?.clone(); - let trait_ = extend_with_runtime_decl_path(trait_); + // We put `runtime` before each trait to get the trait that is intended for the runtime and + // we put the `RuntimeBlock` as first argument for the trait generics. + for impl_ in impls.iter() { + let mut impl_ = impl_.clone(); + let trait_ = extract_impl_trait(&impl_)?.clone(); + let trait_ = extend_with_runtime_decl_path(trait_); - impl_.trait_.as_mut().unwrap().1 = trait_; - impls_prepared.push(impl_); - } + impl_.trait_.as_mut().unwrap().1 = trait_; + impls_prepared.push(impl_); + } - Ok(quote!( #( #impls_prepared )* )) + Ok(quote!( #( #impls_prepared )* )) } - /// Auxiliary data structure that is used to convert `impl Api for Runtime` to /// `impl Api for RuntimeApi`. /// This requires us to replace the runtime `Block` with the node `Block`, /// `impl Api for Runtime` with `impl Api for RuntimeApi` and replace the method implementations /// with code that calls into the runtime. struct ApiRuntimeImplToApiRuntimeApiImpl<'a> { - node_block: &'a TokenStream, - runtime_block: &'a TypePath, - node_block_id: &'a TokenStream, - impl_trait_ident: &'a Ident, - runtime_mod_path: &'a Path, - runtime_type: &'a Type, - trait_generic_arguments: &'a [GenericArgument] + node_block: &'a TokenStream, + runtime_block: &'a TypePath, + node_block_id: &'a TokenStream, + impl_trait_ident: &'a Ident, + runtime_mod_path: &'a Path, + runtime_type: &'a Type, + trait_generic_arguments: &'a [GenericArgument], } impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { - fn fold_type_path(&mut self, input: TypePath) -> TypePath { - let new_ty_path = if input == *self.runtime_block { - let node_block = self.node_block; - parse_quote!( #node_block ) - } else { - input - }; - - fold::fold_type_path(self, new_ty_path) - } - - fn fold_impl_item_method(&mut self, mut input: syn::ImplItemMethod) -> syn::ImplItemMethod { - let block = { - let runtime_mod_path = self.runtime_mod_path; - let runtime = self.runtime_type; - let fn_name = prefix_function_with_trait(self.impl_trait_ident, &input.sig.ident); - let native_call_generator_ident = - generate_native_call_generator_fn_name(&input.sig.ident); - let trait_generic_arguments = self.trait_generic_arguments; - let node_block = self.node_block; - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - let block_id = self.node_block_id; - - // Generate the access to the native parameters - let param_tuple_access = if input.sig.decl.inputs.len() == 1 { - vec![ quote!( p ) ] - } else { - input.sig.decl.inputs.iter().enumerate().map(|(i, _)| { - let i = syn::Index::from(i); - quote!( p.#i ) - }).collect::>() - }; - - let (param_types, error) = match extract_parameter_names_types_and_borrows(&input.sig.decl) { - Ok(res) => ( - res.into_iter().map(|v| { - let ty = v.1; - let borrow = v.2; - quote!( #borrow #ty ) - }).collect::>(), - None - ), - Err(e) => (Vec::new(), Some(e.to_compile_error())), - }; - - let context_arg: syn::FnArg = parse_quote!( context: #crate_::runtime_api::ExecutionContext ); - - // Rewrite the input parameters. - input.sig.decl.inputs = parse_quote! { - &self, at: &#block_id, #context_arg, params: Option<( #( #param_types ),* )>, params_encoded: Vec - }; - - input.sig.ident = generate_method_runtime_api_impl_name(&input.sig.ident); - let ret_type = return_type_extract_type(&input.sig.decl.output); - - // Generate the correct return type. - input.sig.decl.output = parse_quote!( - -> #crate_::error::Result<#crate_::runtime_api::NativeOrEncoded<#ret_type>> - ); - - // Generate the new method implementation that calls into the runtime. - parse_quote!( - { - // Get the error to the user (if we have one). - #( #error )* - - self.call_api_at( - at, - #fn_name, - params_encoded, - params.map(|p| { - #runtime_mod_path #native_call_generator_ident :: - <#runtime, #node_block #(, #trait_generic_arguments )*> ( - #( #param_tuple_access ),* - ) - }), - context, - ) - } - ) - }; - - let mut input = fold::fold_impl_item_method(self, input); - // We need to set the block, after we modified the rest of the ast, otherwise we would - // modify our generated block as well. - input.block = block; - input - } - - fn fold_item_impl(&mut self, mut input: ItemImpl) -> ItemImpl { - // Implement the trait for the `RuntimeApiImpl` - input.self_ty = Box::new(parse_quote!( RuntimeApiImpl )); - - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - let block = self.node_block; - input.generics.params.push( + fn fold_type_path(&mut self, input: TypePath) -> TypePath { + let new_ty_path = if input == *self.runtime_block { + let node_block = self.node_block; + parse_quote!( #node_block ) + } else { + input + }; + + fold::fold_type_path(self, new_ty_path) + } + + fn fold_impl_item_method(&mut self, mut input: syn::ImplItemMethod) -> syn::ImplItemMethod { + let block = { + let runtime_mod_path = self.runtime_mod_path; + let runtime = self.runtime_type; + let fn_name = prefix_function_with_trait(self.impl_trait_ident, &input.sig.ident); + let native_call_generator_ident = + generate_native_call_generator_fn_name(&input.sig.ident); + let trait_generic_arguments = self.trait_generic_arguments; + let node_block = self.node_block; + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + let block_id = self.node_block_id; + + // Generate the access to the native parameters + let param_tuple_access = if input.sig.decl.inputs.len() == 1 { + vec![quote!(p)] + } else { + input + .sig + .decl + .inputs + .iter() + .enumerate() + .map(|(i, _)| { + let i = syn::Index::from(i); + quote!( p.#i ) + }) + .collect::>() + }; + + let (param_types, error) = + match extract_parameter_names_types_and_borrows(&input.sig.decl) { + Ok(res) => ( + res.into_iter() + .map(|v| { + let ty = v.1; + let borrow = v.2; + quote!( #borrow #ty ) + }) + .collect::>(), + None, + ), + Err(e) => (Vec::new(), Some(e.to_compile_error())), + }; + + let context_arg: syn::FnArg = + parse_quote!( context: #crate_::runtime_api::ExecutionContext ); + + // Rewrite the input parameters. + input.sig.decl.inputs = parse_quote! { + &self, at: &#block_id, #context_arg, params: Option<( #( #param_types ),* )>, params_encoded: Vec + }; + + input.sig.ident = generate_method_runtime_api_impl_name(&input.sig.ident); + let ret_type = return_type_extract_type(&input.sig.decl.output); + + // Generate the correct return type. + input.sig.decl.output = parse_quote!( + -> #crate_::error::Result<#crate_::runtime_api::NativeOrEncoded<#ret_type>> + ); + + // Generate the new method implementation that calls into the runtime. + parse_quote!( + { + // Get the error to the user (if we have one). + #( #error )* + + self.call_api_at( + at, + #fn_name, + params_encoded, + params.map(|p| { + #runtime_mod_path #native_call_generator_ident :: + <#runtime, #node_block #(, #trait_generic_arguments )*> ( + #( #param_tuple_access ),* + ) + }), + context, + ) + } + ) + }; + + let mut input = fold::fold_impl_item_method(self, input); + // We need to set the block, after we modified the rest of the ast, otherwise we would + // modify our generated block as well. + input.block = block; + input + } + + fn fold_item_impl(&mut self, mut input: ItemImpl) -> ItemImpl { + // Implement the trait for the `RuntimeApiImpl` + input.self_ty = Box::new(parse_quote!(RuntimeApiImpl)); + + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + let block = self.node_block; + input.generics.params.push( parse_quote!( RuntimeApiImplCall: #crate_::runtime_api::CallRuntimeAt<#block> + 'static ) ); - // The implementation for the `RuntimeApiImpl` is only required when compiling with - // the feature `std` or `test`. - input.attrs.push(parse_quote!( #[cfg(any(feature = "std", test))] )); + // The implementation for the `RuntimeApiImpl` is only required when compiling with + // the feature `std` or `test`. + input + .attrs + .push(parse_quote!( #[cfg(any(feature = "std", test))] )); - fold::fold_item_impl(self, input) - } + fold::fold_item_impl(self, input) + } } /// Generate the implementations of the runtime apis for the `RuntimeApi` type. fn generate_api_impl_for_runtime_api(impls: &[ItemImpl]) -> Result { - let mut result = Vec::with_capacity(impls.len()); - - for impl_ in impls { - let impl_trait_path = extract_impl_trait(&impl_)?; - let impl_trait = &impl_trait_path - .segments - .last() - .ok_or_else(|| Error::new(impl_trait_path.span(), "Empty trait path not possible!"))? - .into_value(); - let impl_trait_ident = &impl_trait.ident; - let runtime_block = extract_runtime_block_ident(impl_trait_path)?; - let (node_block, node_block_id) = generate_node_block_and_block_id_ty(&impl_.self_ty); - let runtime_type = &impl_.self_ty; - let mut runtime_mod_path = extend_with_runtime_decl_path(impl_trait_path.clone()); - // remove the trait to get just the module path - runtime_mod_path.segments.pop(); - - let trait_generic_arguments = match impl_trait.arguments { - PathArguments::Parenthesized(_) | PathArguments::None => vec![], - PathArguments::AngleBracketed(ref b) => b.args.iter().cloned().collect(), - }; - - let mut visitor = ApiRuntimeImplToApiRuntimeApiImpl { - runtime_block, - node_block: &node_block, - node_block_id: &node_block_id, - impl_trait_ident: &impl_trait_ident, - runtime_mod_path: &runtime_mod_path, - runtime_type: &*runtime_type, - trait_generic_arguments: &trait_generic_arguments, - }; - - result.push(visitor.fold_item_impl(impl_.clone())); - } - Ok(quote!( #( #result )* )) + let mut result = Vec::with_capacity(impls.len()); + + for impl_ in impls { + let impl_trait_path = extract_impl_trait(&impl_)?; + let impl_trait = &impl_trait_path + .segments + .last() + .ok_or_else(|| Error::new(impl_trait_path.span(), "Empty trait path not possible!"))? + .into_value(); + let impl_trait_ident = &impl_trait.ident; + let runtime_block = extract_runtime_block_ident(impl_trait_path)?; + let (node_block, node_block_id) = generate_node_block_and_block_id_ty(&impl_.self_ty); + let runtime_type = &impl_.self_ty; + let mut runtime_mod_path = extend_with_runtime_decl_path(impl_trait_path.clone()); + // remove the trait to get just the module path + runtime_mod_path.segments.pop(); + + let trait_generic_arguments = match impl_trait.arguments { + PathArguments::Parenthesized(_) | PathArguments::None => vec![], + PathArguments::AngleBracketed(ref b) => b.args.iter().cloned().collect(), + }; + + let mut visitor = ApiRuntimeImplToApiRuntimeApiImpl { + runtime_block, + node_block: &node_block, + node_block_id: &node_block_id, + impl_trait_ident: &impl_trait_ident, + runtime_mod_path: &runtime_mod_path, + runtime_type: &*runtime_type, + trait_generic_arguments: &trait_generic_arguments, + }; + + result.push(visitor.fold_item_impl(impl_.clone())); + } + Ok(quote!( #( #result )* )) } /// Generates `RUNTIME_API_VERSIONS` that holds all version information about the implemented /// runtime apis. fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { - let mut result = Vec::with_capacity(impls.len()); - let mut processed_traits = HashSet::new(); - - for impl_ in impls { - let mut path = extend_with_runtime_decl_path(extract_impl_trait(&impl_)?.clone()); - // Remove the trait - let trait_ = path - .segments - .pop() - .expect("extract_impl_trait already checks that this is valid; qed") - .into_value() - .ident; - - let span = trait_.span(); - if !processed_traits.insert(trait_) { - return Err( - Error::new( - span, - "Two traits with the same name detected! \ - The trait name is used to generate its ID. \ - Please rename one trait at the declaration!" - ) - ) - } - - let id: Path = parse_quote!( #path ID ); - let version: Path = parse_quote!( #path VERSION ); - - result.push(quote!( (#id, #version) )); - } - - let c = generate_crate_access(HIDDEN_INCLUDES_ID); - - Ok(quote!( - const RUNTIME_API_VERSIONS: #c::runtime_api::ApisVec = - #c::runtime_api::create_apis_vec!([ #( #result ),* ]); - )) + let mut result = Vec::with_capacity(impls.len()); + let mut processed_traits = HashSet::new(); + + for impl_ in impls { + let mut path = extend_with_runtime_decl_path(extract_impl_trait(&impl_)?.clone()); + // Remove the trait + let trait_ = path + .segments + .pop() + .expect("extract_impl_trait already checks that this is valid; qed") + .into_value() + .ident; + + let span = trait_.span(); + if !processed_traits.insert(trait_) { + return Err(Error::new( + span, + "Two traits with the same name detected! \ + The trait name is used to generate its ID. \ + Please rename one trait at the declaration!", + )); + } + + let id: Path = parse_quote!( #path ID ); + let version: Path = parse_quote!( #path VERSION ); + + result.push(quote!( (#id, #version) )); + } + + let c = generate_crate_access(HIDDEN_INCLUDES_ID); + + Ok(quote!( + const RUNTIME_API_VERSIONS: #c::runtime_api::ApisVec = + #c::runtime_api::create_apis_vec!([ #( #result ),* ]); + )) } /// The implementation of the `impl_runtime_apis!` macro. pub fn impl_runtime_apis_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - // Parse all impl blocks - let RuntimeApiImpls { impls: api_impls } = parse_macro_input!(input as RuntimeApiImpls); + // Parse all impl blocks + let RuntimeApiImpls { impls: api_impls } = parse_macro_input!(input as RuntimeApiImpls); - let dispatch_impl = unwrap_or_error(generate_dispatch_function(&api_impls)); - let api_impls_for_runtime = unwrap_or_error(generate_api_impl_for_runtime(&api_impls)); - let base_runtime_api = unwrap_or_error(generate_runtime_api_base_structures(&api_impls)); - let hidden_includes = generate_hidden_includes(HIDDEN_INCLUDES_ID); - let runtime_api_versions = unwrap_or_error(generate_runtime_api_versions(&api_impls)); - let wasm_interface = unwrap_or_error(generate_wasm_interface(&api_impls)); - let api_impls_for_runtime_api = unwrap_or_error(generate_api_impl_for_runtime_api(&api_impls)); + let dispatch_impl = unwrap_or_error(generate_dispatch_function(&api_impls)); + let api_impls_for_runtime = unwrap_or_error(generate_api_impl_for_runtime(&api_impls)); + let base_runtime_api = unwrap_or_error(generate_runtime_api_base_structures(&api_impls)); + let hidden_includes = generate_hidden_includes(HIDDEN_INCLUDES_ID); + let runtime_api_versions = unwrap_or_error(generate_runtime_api_versions(&api_impls)); + let wasm_interface = unwrap_or_error(generate_wasm_interface(&api_impls)); + let api_impls_for_runtime_api = unwrap_or_error(generate_api_impl_for_runtime_api(&api_impls)); - quote!( - #hidden_includes + quote!( + #hidden_includes - #base_runtime_api + #base_runtime_api - #api_impls_for_runtime + #api_impls_for_runtime - #api_impls_for_runtime_api + #api_impls_for_runtime_api - #runtime_api_versions + #runtime_api_versions - pub mod api { - use super::*; + pub mod api { + use super::*; - #dispatch_impl + #dispatch_impl - #wasm_interface - } - ).into() + #wasm_interface + } + ) + .into() } diff --git a/core/sr-api-macros/src/lib.rs b/core/sr-api-macros/src/lib.rs index 72e143eb1a..27d87c8a97 100644 --- a/core/sr-api-macros/src/lib.rs +++ b/core/sr-api-macros/src/lib.rs @@ -21,10 +21,10 @@ extern crate proc_macro; use proc_macro::TokenStream; -mod impl_runtime_apis; +mod compile_fail_tests; mod decl_runtime_apis; +mod impl_runtime_apis; mod utils; -mod compile_fail_tests; /// Tags given trait implementations as runtime apis. /// @@ -110,7 +110,7 @@ mod compile_fail_tests; /// ``` #[proc_macro] pub fn impl_runtime_apis(input: TokenStream) -> TokenStream { - impl_runtime_apis::impl_runtime_apis_impl(input) + impl_runtime_apis::impl_runtime_apis_impl(input) } /// Declares given traits as runtime apis. @@ -191,5 +191,5 @@ pub fn impl_runtime_apis(input: TokenStream) -> TokenStream { /// check if the runtime at the given block id implements the requested runtime api trait. #[proc_macro] pub fn decl_runtime_apis(input: TokenStream) -> TokenStream { - decl_runtime_apis::decl_runtime_apis_impl(input) + decl_runtime_apis::decl_runtime_apis_impl(input) } diff --git a/core/sr-api-macros/src/utils.rs b/core/sr-api-macros/src/utils.rs index e593e41ebe..04506fcec9 100644 --- a/core/sr-api-macros/src/utils.rs +++ b/core/sr-api-macros/src/utils.rs @@ -14,147 +14,157 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use proc_macro2::{TokenStream, Span}; -use syn::{Result, Ident, FnDecl, parse_quote, Type, Pat, spanned::Spanned, FnArg, Error}; +use proc_macro2::{Span, TokenStream}; +use proc_macro_crate::crate_name; use quote::quote; use std::env; -use proc_macro_crate::crate_name; +use syn::{parse_quote, spanned::Spanned, Error, FnArg, FnDecl, Ident, Pat, Result, Type}; /// Unwrap the given result, if it is an error, `compile_error!` will be generated. pub fn unwrap_or_error(res: Result) -> TokenStream { - res.unwrap_or_else(|e| e.to_compile_error()) + res.unwrap_or_else(|e| e.to_compile_error()) } fn generate_hidden_includes_mod_name(unique_id: &'static str) -> Ident { - Ident::new(&format!("sr_api_hidden_includes_{}", unique_id), Span::call_site()) + Ident::new( + &format!("sr_api_hidden_includes_{}", unique_id), + Span::call_site(), + ) } /// Generates the hidden includes that are required to make the macro independent from its scope. pub fn generate_hidden_includes(unique_id: &'static str) -> TokenStream { - if env::var("CARGO_PKG_NAME").unwrap() == "substrate-client" { - TokenStream::new() - } else { - let mod_name = generate_hidden_includes_mod_name(unique_id); - match crate_name("substrate-client") { - Ok(client_name) => { - let client_name = Ident::new(&client_name, Span::call_site()); - quote!( - #[doc(hidden)] - mod #mod_name { - pub extern crate #client_name as sr_api_client; - } - ) - }, - Err(e) => { - let err = Error::new(Span::call_site(), &e).to_compile_error(); - quote!( #err ) - } - } - - }.into() + if env::var("CARGO_PKG_NAME").unwrap() == "substrate-client" { + TokenStream::new() + } else { + let mod_name = generate_hidden_includes_mod_name(unique_id); + match crate_name("substrate-client") { + Ok(client_name) => { + let client_name = Ident::new(&client_name, Span::call_site()); + quote!( + #[doc(hidden)] + mod #mod_name { + pub extern crate #client_name as sr_api_client; + } + ) + } + Err(e) => { + let err = Error::new(Span::call_site(), &e).to_compile_error(); + quote!( #err ) + } + } + } + .into() } /// Generates the access to the `substrate_client` crate. pub fn generate_crate_access(unique_id: &'static str) -> TokenStream { - if env::var("CARGO_PKG_NAME").unwrap() == "substrate-client" { - quote!( crate ) - } else { - let mod_name = generate_hidden_includes_mod_name(unique_id); - quote!( self::#mod_name::sr_api_client ) - }.into() + if env::var("CARGO_PKG_NAME").unwrap() == "substrate-client" { + quote!(crate) + } else { + let mod_name = generate_hidden_includes_mod_name(unique_id); + quote!( self::#mod_name::sr_api_client ) + } + .into() } /// Generates the name of the module that contains the trait declaration for the runtime. pub fn generate_runtime_mod_name_for_trait(trait_: &Ident) -> Ident { - Ident::new(&format!("runtime_decl_for_{}", trait_.to_string()), Span::call_site()) + Ident::new( + &format!("runtime_decl_for_{}", trait_.to_string()), + Span::call_site(), + ) } /// Generates a name for a method that needs to be implemented in the runtime for the client side. pub fn generate_method_runtime_api_impl_name(method: &Ident) -> Ident { - Ident::new(&format!("{}_runtime_api_impl", method.to_string()), Span::call_site()) + Ident::new( + &format!("{}_runtime_api_impl", method.to_string()), + Span::call_site(), + ) } /// Get the type of a `syn::ReturnType`. pub fn return_type_extract_type(rt: &syn::ReturnType) -> Type { - match rt { - syn::ReturnType::Default => parse_quote!( () ), - syn::ReturnType::Type(_, ref ty) => *ty.clone(), - } + match rt { + syn::ReturnType::Default => parse_quote!(()), + syn::ReturnType::Type(_, ref ty) => *ty.clone(), + } } /// Fold the given `FnDecl` to make it usable on the client side. pub fn fold_fn_decl_for_client_side( - mut input: FnDecl, - block_id: &TokenStream, - crate_: &TokenStream + mut input: FnDecl, + block_id: &TokenStream, + crate_: &TokenStream, ) -> FnDecl { - // Add `&self, at:& BlockId` as parameters to each function at the beginning. - input.inputs.insert(0, parse_quote!( at: &#block_id )); - input.inputs.insert(0, parse_quote!( &self )); + // Add `&self, at:& BlockId` as parameters to each function at the beginning. + input.inputs.insert(0, parse_quote!( at: &#block_id )); + input.inputs.insert(0, parse_quote!(&self)); - // Wrap the output in a `Result` - input.output = { - let ty = return_type_extract_type(&input.output); - parse_quote!( -> ::std::result::Result<#ty, #crate_::error::Error> ) - }; + // Wrap the output in a `Result` + input.output = { + let ty = return_type_extract_type(&input.output); + parse_quote!( -> ::std::result::Result<#ty, #crate_::error::Error> ) + }; - input + input } /// Generate an unique pattern based on the given counter, if the given pattern is a `_`. pub fn generate_unique_pattern(pat: Pat, counter: &mut u32) -> Pat { - match pat { - Pat::Wild(_) => { - let generated_name = Ident::new( - &format!("runtime_api_generated_name_{}", counter), - pat.span() - ); - *counter += 1; - - parse_quote!( #generated_name ) - }, - _ => pat, - } + match pat { + Pat::Wild(_) => { + let generated_name = Ident::new( + &format!("runtime_api_generated_name_{}", counter), + pat.span(), + ); + *counter += 1; + + parse_quote!( #generated_name ) + } + _ => pat, + } } /// Extracts the name, the type and `&` or ``(if it is a reference or not) /// for each parameter in the given function declaration. -pub fn extract_parameter_names_types_and_borrows(fn_decl: &FnDecl) - -> Result> -{ - let mut result = Vec::new(); - let mut generated_pattern_counter = 0; - for input in fn_decl.inputs.iter() { - match input { - FnArg::Captured(arg) => { - let (ty, borrow) = match &arg.ty { - Type::Reference(t) => { - let ty = &t.elem; - (parse_quote!( #ty ), quote!( & )) - }, - t => { (t.clone(), quote!()) }, - }; - - let name = - generate_unique_pattern(arg.pat.clone(), &mut generated_pattern_counter); - result.push((name, ty, borrow)); - }, - _ => { - return Err( - Error::new( - input.span(), - "Only function arguments with the following \ - pattern are accepted: `name: type`!" - ) - ) - } - } - } - - Ok(result) +pub fn extract_parameter_names_types_and_borrows( + fn_decl: &FnDecl, +) -> Result> { + let mut result = Vec::new(); + let mut generated_pattern_counter = 0; + for input in fn_decl.inputs.iter() { + match input { + FnArg::Captured(arg) => { + let (ty, borrow) = match &arg.ty { + Type::Reference(t) => { + let ty = &t.elem; + (parse_quote!( #ty ), quote!( & )) + } + t => (t.clone(), quote!()), + }; + + let name = generate_unique_pattern(arg.pat.clone(), &mut generated_pattern_counter); + result.push((name, ty, borrow)); + } + _ => { + return Err(Error::new( + input.span(), + "Only function arguments with the following \ + pattern are accepted: `name: type`!", + )); + } + } + } + + Ok(result) } /// Generates the name for the native call generator function. pub fn generate_native_call_generator_fn_name(fn_name: &Ident) -> Ident { - Ident::new(&format!("{}_native_call_generator", fn_name.to_string()), Span::call_site()) + Ident::new( + &format!("{}_native_call_generator", fn_name.to_string()), + Span::call_site(), + ) } diff --git a/core/sr-api-macros/tests/decl_and_impl.rs b/core/sr-api-macros/tests/decl_and_impl.rs index a8b3156123..11365ae5f6 100644 --- a/core/sr-api-macros/tests/decl_and_impl.rs +++ b/core/sr-api-macros/tests/decl_and_impl.rs @@ -14,112 +14,121 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use runtime_primitives::traits::{GetNodeBlockType, Block as BlockT}; -use runtime_primitives::generic::BlockId; use client::runtime_api::{self, RuntimeApiInfo}; -use client::{error::Result, decl_runtime_apis, impl_runtime_apis}; +use client::{decl_runtime_apis, error::Result, impl_runtime_apis}; +use runtime_primitives::generic::BlockId; +use runtime_primitives::traits::{Block as BlockT, GetNodeBlockType}; use test_client::runtime::Block; /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` /// trait are done by the `construct_runtime!` macro in a real runtime. pub struct Runtime {} impl GetNodeBlockType for Runtime { - type NodeBlock = Block; + type NodeBlock = Block; } decl_runtime_apis! { - pub trait Api { - fn test(data: u64); - fn something_with_block(block: Block) -> Block; - fn function_with_two_args(data: u64, block: Block); - fn same_name(); - } - - #[api_version(2)] - pub trait ApiWithCustomVersion { - fn same_name(); - #[changed_in(2)] - fn same_name() -> String; - } + pub trait Api { + fn test(data: u64); + fn something_with_block(block: Block) -> Block; + fn function_with_two_args(data: u64, block: Block); + fn same_name(); + } + + #[api_version(2)] + pub trait ApiWithCustomVersion { + fn same_name(); + #[changed_in(2)] + fn same_name() -> String; + } } impl_runtime_apis! { - impl self::Api for Runtime { - fn test(_: u64) { - unimplemented!() - } - - fn something_with_block(_: Block) -> Block { - unimplemented!() - } - - fn function_with_two_args(_: u64, _: Block) { - unimplemented!() - } - - fn same_name() {} - } - - impl self::ApiWithCustomVersion for Runtime { - fn same_name() {} - } - - impl runtime_api::Core for Runtime { - fn version() -> runtime_api::RuntimeVersion { - unimplemented!() - } - fn execute_block(_: Block) { - unimplemented!() - } - fn initialize_block(_: &::Header) { - unimplemented!() - } - } + impl self::Api for Runtime { + fn test(_: u64) { + unimplemented!() + } + + fn something_with_block(_: Block) -> Block { + unimplemented!() + } + + fn function_with_two_args(_: u64, _: Block) { + unimplemented!() + } + + fn same_name() {} + } + + impl self::ApiWithCustomVersion for Runtime { + fn same_name() {} + } + + impl runtime_api::Core for Runtime { + fn version() -> runtime_api::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) { + unimplemented!() + } + } } type TestClient = client::Client; #[test] fn test_client_side_function_signature() { - let _test: fn(&RuntimeApiImpl, &BlockId, u64) -> Result<()> = - RuntimeApiImpl::::test; - let _something_with_block: - fn(&RuntimeApiImpl, &BlockId, Block) -> Result = - RuntimeApiImpl::::something_with_block; - - #[allow(deprecated)] - let _same_name_before_version_2: - fn(&RuntimeApiImpl, &BlockId) -> Result = - RuntimeApiImpl::::same_name_before_version_2; + let _test: fn(&RuntimeApiImpl, &BlockId, u64) -> Result<()> = + RuntimeApiImpl::::test; + let _something_with_block: fn( + &RuntimeApiImpl, + &BlockId, + Block, + ) -> Result = RuntimeApiImpl::::something_with_block; + + #[allow(deprecated)] + let _same_name_before_version_2: fn( + &RuntimeApiImpl, + &BlockId, + ) -> Result = RuntimeApiImpl::::same_name_before_version_2; } #[test] fn test_runtime_side_function_signature() { - let _api_same_name: fn(input_data: *mut u8, input_len: usize) -> u64 = api::Api_same_name; - let _api_with_version_same_name: fn(input_data: *mut u8, input_len: usize) -> u64 = - api::ApiWithCustomVersion_same_name; + let _api_same_name: fn(input_data: *mut u8, input_len: usize) -> u64 = api::Api_same_name; + let _api_with_version_same_name: fn(input_data: *mut u8, input_len: usize) -> u64 = + api::ApiWithCustomVersion_same_name; } #[test] fn check_runtime_api_info() { - assert_eq!(&Api::::ID, &runtime_decl_for_Api::ID); - assert_eq!(Api::::VERSION, runtime_decl_for_Api::VERSION); - assert_eq!(Api::::VERSION, 1); - - assert_eq!( - ApiWithCustomVersion::::VERSION, runtime_decl_for_ApiWithCustomVersion::VERSION - ); - assert_eq!(&ApiWithCustomVersion::::ID, &runtime_decl_for_ApiWithCustomVersion::ID); - assert_eq!(ApiWithCustomVersion::::VERSION, 2); + assert_eq!(&Api::::ID, &runtime_decl_for_Api::ID); + assert_eq!(Api::::VERSION, runtime_decl_for_Api::VERSION); + assert_eq!(Api::::VERSION, 1); + + assert_eq!( + ApiWithCustomVersion::::VERSION, + runtime_decl_for_ApiWithCustomVersion::VERSION + ); + assert_eq!( + &ApiWithCustomVersion::::ID, + &runtime_decl_for_ApiWithCustomVersion::ID + ); + assert_eq!(ApiWithCustomVersion::::VERSION, 2); } fn check_runtime_api_versions_contains() { - assert!(RUNTIME_API_VERSIONS.iter().any(|v| v == &(T::ID, T::VERSION))); + assert!(RUNTIME_API_VERSIONS + .iter() + .any(|v| v == &(T::ID, T::VERSION))); } #[test] fn check_runtime_api_versions() { - check_runtime_api_versions_contains::>(); - check_runtime_api_versions_contains::>(); - check_runtime_api_versions_contains::>(); + check_runtime_api_versions_contains::>(); + check_runtime_api_versions_contains::>(); + check_runtime_api_versions_contains::>(); } diff --git a/core/sr-api-macros/tests/runtime_calls.rs b/core/sr-api-macros/tests/runtime_calls.rs index 92e7a38924..5b6e5cf8bd 100644 --- a/core/sr-api-macros/tests/runtime_calls.rs +++ b/core/sr-api-macros/tests/runtime_calls.rs @@ -14,95 +14,101 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use test_client::runtime::{TestAPI, DecodeFails}; use runtime_primitives::{generic::BlockId, traits::ProvideRuntimeApi}; use state_machine::ExecutionStrategy; +use test_client::runtime::{DecodeFails, TestAPI}; fn calling_function_with_strat(strat: ExecutionStrategy) { - let client = test_client::new_with_execution_strategy(strat); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.info().unwrap().chain.best_number); + let client = test_client::new_with_execution_strategy(strat); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.info().unwrap().chain.best_number); - assert_eq!(runtime_api.benchmark_add_one(&block_id, &1).unwrap(), 2); + assert_eq!(runtime_api.benchmark_add_one(&block_id, &1).unwrap(), 2); } #[test] fn calling_native_runtime_function() { - calling_function_with_strat(ExecutionStrategy::NativeWhenPossible); + calling_function_with_strat(ExecutionStrategy::NativeWhenPossible); } #[test] fn calling_wasm_runtime_function() { - calling_function_with_strat(ExecutionStrategy::AlwaysWasm); + calling_function_with_strat(ExecutionStrategy::AlwaysWasm); } #[test] #[should_panic(expected = "Could not convert parameter `param` between node and runtime!")] fn calling_native_runtime_function_with_non_decodable_parameter() { - let client = test_client::new_with_execution_strategy(ExecutionStrategy::NativeWhenPossible); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.info().unwrap().chain.best_number); - runtime_api.fail_convert_parameter(&block_id, DecodeFails::new()).unwrap(); + let client = test_client::new_with_execution_strategy(ExecutionStrategy::NativeWhenPossible); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.info().unwrap().chain.best_number); + runtime_api + .fail_convert_parameter(&block_id, DecodeFails::new()) + .unwrap(); } #[test] #[should_panic(expected = "Could not convert return value from runtime to node!")] fn calling_native_runtime_function_with_non_decodable_return_value() { - let client = test_client::new_with_execution_strategy(ExecutionStrategy::NativeWhenPossible); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.info().unwrap().chain.best_number); - runtime_api.fail_convert_return_value(&block_id).unwrap(); + let client = test_client::new_with_execution_strategy(ExecutionStrategy::NativeWhenPossible); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.info().unwrap().chain.best_number); + runtime_api.fail_convert_return_value(&block_id).unwrap(); } #[test] fn calling_native_runtime_signature_changed_function() { - let client = test_client::new_with_execution_strategy(ExecutionStrategy::NativeWhenPossible); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.info().unwrap().chain.best_number); - - assert_eq!(runtime_api.function_signature_changed(&block_id).unwrap(), 1); + let client = test_client::new_with_execution_strategy(ExecutionStrategy::NativeWhenPossible); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.info().unwrap().chain.best_number); + + assert_eq!( + runtime_api.function_signature_changed(&block_id).unwrap(), + 1 + ); } #[test] fn calling_wasm_runtime_signature_changed_old_function() { - let client = test_client::new_with_execution_strategy(ExecutionStrategy::AlwaysWasm); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.info().unwrap().chain.best_number); - - #[allow(deprecated)] - let res = runtime_api.function_signature_changed_before_version_2(&block_id).unwrap(); - assert_eq!(&res, &[1, 2]); + let client = test_client::new_with_execution_strategy(ExecutionStrategy::AlwaysWasm); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.info().unwrap().chain.best_number); + + #[allow(deprecated)] + let res = runtime_api + .function_signature_changed_before_version_2(&block_id) + .unwrap(); + assert_eq!(&res, &[1, 2]); } #[test] fn calling_with_both_strategy_and_fail_on_wasm_should_return_error() { - let client = test_client::new_with_execution_strategy(ExecutionStrategy::Both); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.info().unwrap().chain.best_number); - assert!(runtime_api.fail_on_wasm(&block_id).is_err()); + let client = test_client::new_with_execution_strategy(ExecutionStrategy::Both); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.info().unwrap().chain.best_number); + assert!(runtime_api.fail_on_wasm(&block_id).is_err()); } #[test] fn calling_with_both_strategy_and_fail_on_native_should_work() { - let client = test_client::new_with_execution_strategy(ExecutionStrategy::Both); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.info().unwrap().chain.best_number); - assert_eq!(runtime_api.fail_on_native(&block_id).unwrap(), 1); + let client = test_client::new_with_execution_strategy(ExecutionStrategy::Both); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.info().unwrap().chain.best_number); + assert_eq!(runtime_api.fail_on_native(&block_id).unwrap(), 1); } - #[test] fn calling_with_native_else_wasm_and_faild_on_wasm_should_work() { - let client = test_client::new_with_execution_strategy(ExecutionStrategy::NativeElseWasm); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.info().unwrap().chain.best_number); - assert_eq!(runtime_api.fail_on_wasm(&block_id).unwrap(), 1); + let client = test_client::new_with_execution_strategy(ExecutionStrategy::NativeElseWasm); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.info().unwrap().chain.best_number); + assert_eq!(runtime_api.fail_on_wasm(&block_id).unwrap(), 1); } #[test] fn calling_with_native_else_wasm_and_fail_on_native_should_work() { - let client = test_client::new_with_execution_strategy(ExecutionStrategy::NativeElseWasm); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.info().unwrap().chain.best_number); - assert_eq!(runtime_api.fail_on_native(&block_id).unwrap(), 1); + let client = test_client::new_with_execution_strategy(ExecutionStrategy::NativeElseWasm); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.info().unwrap().chain.best_number); + assert_eq!(runtime_api.fail_on_native(&block_id).unwrap(), 1); } diff --git a/core/sr-io/src/lib.rs b/core/sr-io/src/lib.rs index 6a00e6ca27..f84f8798ad 100644 --- a/core/sr-io/src/lib.rs +++ b/core/sr-io/src/lib.rs @@ -21,14 +21,19 @@ #![cfg_attr(not(feature = "std"), feature(alloc_error_handler))] #![cfg_attr(not(feature = "std"), feature(core_intrinsics))] #![cfg_attr(not(feature = "std"), feature(alloc))] - -#![cfg_attr(feature = "std", doc = "Substrate runtime standard library as compiled when linked with Rust's standard library.")] -#![cfg_attr(not(feature = "std"), doc = "Substrate's runtime standard library as compiled without Rust's standard library.")] +#![cfg_attr( + feature = "std", + doc = "Substrate runtime standard library as compiled when linked with Rust's standard library." +)] +#![cfg_attr( + not(feature = "std"), + doc = "Substrate's runtime standard library as compiled without Rust's standard library." +)] pub enum EcdsaVerifyError { - BadRS, - BadV, - BadSignature, + BadRS, + BadV, + BadSignature, } #[cfg(feature = "std")] diff --git a/core/sr-primitives/src/generic/block.rs b/core/sr-primitives/src/generic/block.rs index 5fb83a2a4f..50a52d9a16 100644 --- a/core/sr-primitives/src/generic/block.rs +++ b/core/sr-primitives/src/generic/block.rs @@ -22,10 +22,10 @@ use std::fmt; #[cfg(feature = "std")] use serde_derive::Serialize; -use rstd::prelude::*; -use crate::codec::{Codec, Encode, Decode}; -use crate::traits::{self, Member, Block as BlockT, Header as HeaderT, MaybeSerialize}; +use crate::codec::{Codec, Decode, Encode}; +use crate::traits::{self, Block as BlockT, Header as HeaderT, MaybeSerialize, Member}; use crate::Justification; +use rstd::prelude::*; /// Something to identify a block. #[derive(PartialEq, Eq, Clone)] @@ -33,31 +33,31 @@ use crate::Justification; #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] #[cfg_attr(feature = "std", serde(deny_unknown_fields))] pub enum BlockId { - /// Identify by block header hash. - Hash(<::Header as HeaderT>::Hash), - /// Identify by block number. - Number(<::Header as HeaderT>::Number), + /// Identify by block header hash. + Hash(<::Header as HeaderT>::Hash), + /// Identify by block number. + Number(<::Header as HeaderT>::Number), } impl BlockId { - /// Create a block ID from a hash. - pub fn hash(hash: Block::Hash) -> Self { - BlockId::Hash(hash) - } - - /// Create a block ID from a number. - pub fn number(number: ::Number) -> Self { - BlockId::Number(number) - } + /// Create a block ID from a hash. + pub fn hash(hash: Block::Hash) -> Self { + BlockId::Hash(hash) + } + + /// Create a block ID from a number. + pub fn number(number: ::Number) -> Self { + BlockId::Number(number) + } } impl Copy for BlockId {} #[cfg(feature = "std")] impl fmt::Display for BlockId { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self) + } } /// Abstraction over a substrate block. @@ -66,33 +66,33 @@ impl fmt::Display for BlockId { #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] #[cfg_attr(feature = "std", serde(deny_unknown_fields))] pub struct Block { - /// The block header. - pub header: Header, - /// The accompanying extrinsics. - pub extrinsics: Vec, + /// The block header. + pub header: Header, + /// The accompanying extrinsics. + pub extrinsics: Vec, } impl traits::Block for Block where - Header: HeaderT, - Extrinsic: Member + Codec + traits::Extrinsic, + Header: HeaderT, + Extrinsic: Member + Codec + traits::Extrinsic, { - type Extrinsic = Extrinsic; - type Header = Header; - type Hash = ::Hash; - - fn header(&self) -> &Self::Header { - &self.header - } - fn extrinsics(&self) -> &[Self::Extrinsic] { - &self.extrinsics[..] - } - fn deconstruct(self) -> (Self::Header, Vec) { - (self.header, self.extrinsics) - } - fn new(header: Self::Header, extrinsics: Vec) -> Self { - Block { header, extrinsics } - } + type Extrinsic = Extrinsic; + type Header = Header; + type Hash = ::Hash; + + fn header(&self) -> &Self::Header { + &self.header + } + fn extrinsics(&self) -> &[Self::Extrinsic] { + &self.extrinsics[..] + } + fn deconstruct(self) -> (Self::Header, Vec) { + (self.header, self.extrinsics) + } + fn new(header: Self::Header, extrinsics: Vec) -> Self { + Block { header, extrinsics } + } } /// Abstraction over a substrate block and justification. @@ -101,8 +101,8 @@ where #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] #[cfg_attr(feature = "std", serde(deny_unknown_fields))] pub struct SignedBlock { - /// Full block. - pub block: Block, - /// Block justification. - pub justification: Option, + /// Full block. + pub block: Block, + /// Block justification. + pub justification: Option, } diff --git a/core/sr-primitives/src/generic/checked_extrinsic.rs b/core/sr-primitives/src/generic/checked_extrinsic.rs index c0548c26e5..e43fda2612 100644 --- a/core/sr-primitives/src/generic/checked_extrinsic.rs +++ b/core/sr-primitives/src/generic/checked_extrinsic.rs @@ -17,7 +17,7 @@ //! Generic implementation of an extrinsic that has passed the verification //! stage. -use crate::traits::{self, Member, SimpleArithmetic, MaybeDisplay}; +use crate::traits::{self, MaybeDisplay, Member, SimpleArithmetic}; /// Definition of something that the external world might want to say; its /// existence implies that it has been checked and is good, particularly with @@ -25,33 +25,32 @@ use crate::traits::{self, Member, SimpleArithmetic, MaybeDisplay}; #[derive(PartialEq, Eq, Clone)] #[cfg_attr(feature = "std", derive(Debug))] pub struct CheckedExtrinsic { - /// Who this purports to be from and the number of extrinsics have come before - /// from the same signer, if anyone (note this is not a signature). - pub signed: Option<(AccountId, Index)>, - /// The function that should be called. - pub function: Call, + /// Who this purports to be from and the number of extrinsics have come before + /// from the same signer, if anyone (note this is not a signature). + pub signed: Option<(AccountId, Index)>, + /// The function that should be called. + pub function: Call, } -impl traits::Applyable - for CheckedExtrinsic +impl traits::Applyable for CheckedExtrinsic where - AccountId: Member + MaybeDisplay, - Index: Member + MaybeDisplay + SimpleArithmetic, - Call: Member, + AccountId: Member + MaybeDisplay, + Index: Member + MaybeDisplay + SimpleArithmetic, + Call: Member, { - type Index = Index; - type AccountId = AccountId; - type Call = Call; + type Index = Index; + type AccountId = AccountId; + type Call = Call; - fn index(&self) -> Option<&Self::Index> { - self.signed.as_ref().map(|x| &x.1) - } + fn index(&self) -> Option<&Self::Index> { + self.signed.as_ref().map(|x| &x.1) + } - fn sender(&self) -> Option<&Self::AccountId> { - self.signed.as_ref().map(|x| &x.0) - } + fn sender(&self) -> Option<&Self::AccountId> { + self.signed.as_ref().map(|x| &x.0) + } - fn deconstruct(self) -> (Self::Call, Option) { - (self.function, self.signed.map(|x| x.0)) - } + fn deconstruct(self) -> (Self::Call, Option) { + (self.function, self.signed.map(|x| x.0)) + } } diff --git a/core/sr-primitives/src/generic/digest.rs b/core/sr-primitives/src/generic/digest.rs index 8eed63900f..17ef3acc77 100644 --- a/core/sr-primitives/src/generic/digest.rs +++ b/core/sr-primitives/src/generic/digest.rs @@ -21,41 +21,42 @@ use serde_derive::Serialize; use rstd::prelude::*; +use crate::codec::{Codec, Decode, Encode, Input}; +use crate::traits::{self, DigestItem as DigestItemT, MaybeHash, Member}; use crate::ConsensusEngineId; -use crate::codec::{Decode, Encode, Codec, Input}; -use crate::traits::{self, Member, DigestItem as DigestItemT, MaybeHash}; /// Generic header digest. #[derive(PartialEq, Eq, Clone, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug, Serialize))] pub struct Digest { - /// A list of logs in the digest. - pub logs: Vec, + /// A list of logs in the digest. + pub logs: Vec, } impl Default for Digest { - fn default() -> Self { - Digest { logs: Vec::new(), } - } + fn default() -> Self { + Digest { logs: Vec::new() } + } } -impl traits::Digest for Digest where - Item: DigestItemT + Codec +impl traits::Digest for Digest +where + Item: DigestItemT + Codec, { - type Hash = Item::Hash; - type Item = Item; + type Hash = Item::Hash; + type Item = Item; - fn logs(&self) -> &[Self::Item] { - &self.logs - } + fn logs(&self) -> &[Self::Item] { + &self.logs + } - fn push(&mut self, item: Self::Item) { - self.logs.push(item); - } + fn push(&mut self, item: Self::Item) { + self.logs.push(item); + } - fn pop(&mut self) -> Option { - self.logs.pop() - } + fn pop(&mut self) -> Option { + self.logs.pop() + } } /// Digest item that is able to encode/decode 'system' digest items and @@ -64,50 +65,52 @@ impl traits::Digest for Digest where #[cfg_attr(feature = "std", derive(Debug))] #[allow(deprecated)] pub enum DigestItem { - /// System digest item announcing that authorities set has been changed - /// in the block. Contains the new set of authorities. - AuthoritiesChange(Vec), - /// System digest item that contains the root of changes trie at given - /// block. It is created for every block iff runtime supports changes - /// trie creation. - ChangesTrieRoot(Hash), - /// The old way to put a Seal on it. Deprecated. - #[deprecated] - Seal(u64, SealSignature), - /// Put a Seal on it - Consensus(ConsensusEngineId, Vec), - /// Any 'non-system' digest item, opaque to the native code. - Other(Vec), + /// System digest item announcing that authorities set has been changed + /// in the block. Contains the new set of authorities. + AuthoritiesChange(Vec), + /// System digest item that contains the root of changes trie at given + /// block. It is created for every block iff runtime supports changes + /// trie creation. + ChangesTrieRoot(Hash), + /// The old way to put a Seal on it. Deprecated. + #[deprecated] + Seal(u64, SealSignature), + /// Put a Seal on it + Consensus(ConsensusEngineId, Vec), + /// Any 'non-system' digest item, opaque to the native code. + Other(Vec), } #[cfg(feature = "std")] -impl ::serde::Serialize for DigestItem { - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { - self.using_encoded(|bytes| { - ::substrate_primitives::bytes::serialize(bytes, seq) - }) - } +impl ::serde::Serialize + for DigestItem +{ + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { + self.using_encoded(|bytes| ::substrate_primitives::bytes::serialize(bytes, seq)) + } } - /// A 'referencing view' for digest item. Does not own its contents. Used by /// final runtime implementations for encoding/decoding its log items. #[derive(PartialEq, Eq, Clone)] #[cfg_attr(feature = "std", derive(Debug))] #[allow(deprecated)] pub enum DigestItemRef<'a, Hash: 'a, AuthorityId: 'a, SealSignature: 'a> { - /// Reference to `DigestItem::AuthoritiesChange`. - AuthoritiesChange(&'a [AuthorityId]), - /// Reference to `DigestItem::ChangesTrieRoot`. - ChangesTrieRoot(&'a Hash), - /// A deprecated sealed signature for testing - #[deprecated] - Seal(&'a u64, &'a SealSignature), - /// A sealed signature for testing - Consensus(&'a ConsensusEngineId, &'a [u8]), - /// Any 'non-system' digest item, opaque to the native code. - /// Reference to `DigestItem::Other`. - Other(&'a [u8]), + /// Reference to `DigestItem::AuthoritiesChange`. + AuthoritiesChange(&'a [AuthorityId]), + /// Reference to `DigestItem::ChangesTrieRoot`. + ChangesTrieRoot(&'a Hash), + /// A deprecated sealed signature for testing + #[deprecated] + Seal(&'a u64, &'a SealSignature), + /// A sealed signature for testing + Consensus(&'a ConsensusEngineId, &'a [u8]), + /// Any 'non-system' digest item, opaque to the native code. + /// Reference to `DigestItem::Other`. + Other(&'a [u8]), } /// Type of the digest item. Used to gain explicit control over `DigestItem` encoding @@ -117,154 +120,161 @@ pub enum DigestItemRef<'a, Hash: 'a, AuthorityId: 'a, SealSignature: 'a> { #[repr(u32)] #[derive(Encode, Decode)] enum DigestItemType { - Other = 0, - AuthoritiesChange = 1, - ChangesTrieRoot = 2, - Seal = 3, - Consensus = 4, + Other = 0, + AuthoritiesChange = 1, + ChangesTrieRoot = 2, + Seal = 3, + Consensus = 4, } impl DigestItem { - /// Returns Some if `self` is a `DigestItem::Other`. - pub fn as_other(&self) -> Option<&Vec> { - match *self { - DigestItem::Other(ref v) => Some(v), - _ => None, - } - } - - /// Returns a 'referencing view' for this digest item. - #[allow(deprecated)] - fn dref<'a>(&'a self) -> DigestItemRef<'a, Hash, AuthorityId, SealSignature> { - match *self { - DigestItem::AuthoritiesChange(ref v) => DigestItemRef::AuthoritiesChange(v), - DigestItem::ChangesTrieRoot(ref v) => DigestItemRef::ChangesTrieRoot(v), - DigestItem::Seal(ref v, ref s) => DigestItemRef::Seal(v, s), - DigestItem::Consensus(ref v, ref s) => DigestItemRef::Consensus(v, s), - DigestItem::Other(ref v) => DigestItemRef::Other(v), - } - } + /// Returns Some if `self` is a `DigestItem::Other`. + pub fn as_other(&self) -> Option<&Vec> { + match *self { + DigestItem::Other(ref v) => Some(v), + _ => None, + } + } + + /// Returns a 'referencing view' for this digest item. + #[allow(deprecated)] + fn dref<'a>(&'a self) -> DigestItemRef<'a, Hash, AuthorityId, SealSignature> { + match *self { + DigestItem::AuthoritiesChange(ref v) => DigestItemRef::AuthoritiesChange(v), + DigestItem::ChangesTrieRoot(ref v) => DigestItemRef::ChangesTrieRoot(v), + DigestItem::Seal(ref v, ref s) => DigestItemRef::Seal(v, s), + DigestItem::Consensus(ref v, ref s) => DigestItemRef::Consensus(v, s), + DigestItem::Other(ref v) => DigestItemRef::Other(v), + } + } } impl< - Hash: Codec + Member, - AuthorityId: Codec + Member + MaybeHash, - SealSignature: Codec + Member, -> traits::DigestItem for DigestItem { - type Hash = Hash; - type AuthorityId = AuthorityId; - - fn as_authorities_change(&self) -> Option<&[Self::AuthorityId]> { - self.dref().as_authorities_change() - } - - fn as_changes_trie_root(&self) -> Option<&Self::Hash> { - self.dref().as_changes_trie_root() - } + Hash: Codec + Member, + AuthorityId: Codec + Member + MaybeHash, + SealSignature: Codec + Member, + > traits::DigestItem for DigestItem +{ + type Hash = Hash; + type AuthorityId = AuthorityId; + + fn as_authorities_change(&self) -> Option<&[Self::AuthorityId]> { + self.dref().as_authorities_change() + } + + fn as_changes_trie_root(&self) -> Option<&Self::Hash> { + self.dref().as_changes_trie_root() + } } -impl Encode for DigestItem { - fn encode(&self) -> Vec { - self.dref().encode() - } +impl Encode + for DigestItem +{ + fn encode(&self) -> Vec { + self.dref().encode() + } } -impl Decode for DigestItem { - #[allow(deprecated)] - fn decode(input: &mut I) -> Option { - let item_type: DigestItemType = Decode::decode(input)?; - match item_type { - DigestItemType::AuthoritiesChange => Some(DigestItem::AuthoritiesChange( - Decode::decode(input)?, - )), - DigestItemType::ChangesTrieRoot => Some(DigestItem::ChangesTrieRoot( - Decode::decode(input)?, - )), - DigestItemType::Seal => { - let vals: (u64, SealSignature) = Decode::decode(input)?; - Some(DigestItem::Seal(vals.0, vals.1)) - }, - DigestItemType::Consensus => { - let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; - Some(DigestItem::Consensus(vals.0, vals.1)) - } - DigestItemType::Other => Some(DigestItem::Other( - Decode::decode(input)?, - )), - } - } +impl Decode + for DigestItem +{ + #[allow(deprecated)] + fn decode(input: &mut I) -> Option { + let item_type: DigestItemType = Decode::decode(input)?; + match item_type { + DigestItemType::AuthoritiesChange => { + Some(DigestItem::AuthoritiesChange(Decode::decode(input)?)) + } + DigestItemType::ChangesTrieRoot => { + Some(DigestItem::ChangesTrieRoot(Decode::decode(input)?)) + } + DigestItemType::Seal => { + let vals: (u64, SealSignature) = Decode::decode(input)?; + Some(DigestItem::Seal(vals.0, vals.1)) + } + DigestItemType::Consensus => { + let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; + Some(DigestItem::Consensus(vals.0, vals.1)) + } + DigestItemType::Other => Some(DigestItem::Other(Decode::decode(input)?)), + } + } } -impl<'a, Hash: Codec + Member, AuthorityId: Codec + Member, SealSignature: Codec + Member> DigestItemRef<'a, Hash, AuthorityId, SealSignature> { - /// Cast this digest item into `AuthoritiesChange`. - pub fn as_authorities_change(&self) -> Option<&'a [AuthorityId]> { - match *self { - DigestItemRef::AuthoritiesChange(ref authorities) => Some(authorities), - _ => None, - } - } - - /// Cast this digest item into `ChangesTrieRoot`. - pub fn as_changes_trie_root(&self) -> Option<&'a Hash> { - match *self { - DigestItemRef::ChangesTrieRoot(ref changes_trie_root) => Some(changes_trie_root), - _ => None, - } - } +impl<'a, Hash: Codec + Member, AuthorityId: Codec + Member, SealSignature: Codec + Member> + DigestItemRef<'a, Hash, AuthorityId, SealSignature> +{ + /// Cast this digest item into `AuthoritiesChange`. + pub fn as_authorities_change(&self) -> Option<&'a [AuthorityId]> { + match *self { + DigestItemRef::AuthoritiesChange(ref authorities) => Some(authorities), + _ => None, + } + } + + /// Cast this digest item into `ChangesTrieRoot`. + pub fn as_changes_trie_root(&self) -> Option<&'a Hash> { + match *self { + DigestItemRef::ChangesTrieRoot(ref changes_trie_root) => Some(changes_trie_root), + _ => None, + } + } } #[allow(deprecated)] -impl<'a, Hash: Encode, AuthorityId: Encode, SealSignature: Encode> Encode for DigestItemRef<'a, Hash, AuthorityId, SealSignature> { - fn encode(&self) -> Vec { - let mut v = Vec::new(); - - match *self { - DigestItemRef::AuthoritiesChange(authorities) => { - DigestItemType::AuthoritiesChange.encode_to(&mut v); - authorities.encode_to(&mut v); - }, - DigestItemRef::ChangesTrieRoot(changes_trie_root) => { - DigestItemType::ChangesTrieRoot.encode_to(&mut v); - changes_trie_root.encode_to(&mut v); - }, - DigestItemRef::Seal(val, sig) => { - DigestItemType::Seal.encode_to(&mut v); - (val, sig).encode_to(&mut v); - }, - DigestItemRef::Consensus(val, sig) => { - DigestItemType::Consensus.encode_to(&mut v); - (val, sig).encode_to(&mut v); - }, - DigestItemRef::Other(val) => { - DigestItemType::Other.encode_to(&mut v); - val.encode_to(&mut v); - }, - } - - v - } +impl<'a, Hash: Encode, AuthorityId: Encode, SealSignature: Encode> Encode + for DigestItemRef<'a, Hash, AuthorityId, SealSignature> +{ + fn encode(&self) -> Vec { + let mut v = Vec::new(); + + match *self { + DigestItemRef::AuthoritiesChange(authorities) => { + DigestItemType::AuthoritiesChange.encode_to(&mut v); + authorities.encode_to(&mut v); + } + DigestItemRef::ChangesTrieRoot(changes_trie_root) => { + DigestItemType::ChangesTrieRoot.encode_to(&mut v); + changes_trie_root.encode_to(&mut v); + } + DigestItemRef::Seal(val, sig) => { + DigestItemType::Seal.encode_to(&mut v); + (val, sig).encode_to(&mut v); + } + DigestItemRef::Consensus(val, sig) => { + DigestItemType::Consensus.encode_to(&mut v); + (val, sig).encode_to(&mut v); + } + DigestItemRef::Other(val) => { + DigestItemType::Other.encode_to(&mut v); + val.encode_to(&mut v); + } + } + + v + } } #[cfg(test)] mod tests { - use super::*; - use substrate_primitives::hash::H512 as Signature; - - #[test] - #[allow(deprecated)] - fn should_serialize_digest() { - let digest = Digest { - logs: vec![ - DigestItem::AuthoritiesChange(vec![1]), - DigestItem::ChangesTrieRoot(4), - DigestItem::Seal(1, Signature::from_low_u64_be(15)), - DigestItem::Other(vec![1, 2, 3]), - ], - }; - - assert_eq!( + use super::*; + use substrate_primitives::hash::H512 as Signature; + + #[test] + #[allow(deprecated)] + fn should_serialize_digest() { + let digest = Digest { + logs: vec![ + DigestItem::AuthoritiesChange(vec![1]), + DigestItem::ChangesTrieRoot(4), + DigestItem::Seal(1, Signature::from_low_u64_be(15)), + DigestItem::Other(vec![1, 2, 3]), + ], + }; + + assert_eq!( ::serde_json::to_string(&digest).unwrap(), r#"{"logs":["0x010401000000","0x0204000000","0x0301000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f","0x000c010203"]}"# ); - } + } } diff --git a/core/sr-primitives/src/generic/era.rs b/core/sr-primitives/src/generic/era.rs index e5a7b24f0c..b5e106229d 100644 --- a/core/sr-primitives/src/generic/era.rs +++ b/core/sr-primitives/src/generic/era.rs @@ -17,7 +17,7 @@ //! Generic implementation of an unchecked (pre-verification) extrinsic. #[cfg(feature = "std")] -use serde_derive::{Serialize, Deserialize}; +use serde_derive::{Deserialize, Serialize}; use crate::codec::{Decode, Encode, Input, Output}; @@ -28,16 +28,16 @@ pub type Phase = u64; #[derive(PartialEq, Eq, Clone, Copy)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))] pub enum Era { - /// The transaction is valid forever. The genesis hash must be present in the signed content. - Immortal, - - /// Period and phase are encoded: - /// - The period of validity from the block hash found in the signing material. - /// - The phase in the period that this transaction's lifetime begins (and, importantly, - /// implies which block hash is included in the signature material). If the `period` is - /// greater than 1 << 12, then it will be a factor of the times greater than 1<<12 that - /// `period` is. - Mortal(Period, Phase), + /// The transaction is valid forever. The genesis hash must be present in the signed content. + Immortal, + + /// Period and phase are encoded: + /// - The period of validity from the block hash found in the signing material. + /// - The phase in the period that this transaction's lifetime begins (and, importantly, + /// implies which block hash is included in the signature material). If the `period` is + /// greater than 1 << 12, then it will be a factor of the times greater than 1<<12 that + /// `period` is. + Mortal(Period, Phase), } /* @@ -50,152 +50,157 @@ phase = 1 n = Q(current - phase, period) + phase */ impl Era { - /// Create a new era based on a period (which should be a power of two between 4 and 65536 inclusive) - /// and a block number on which it should start (or, for long periods, be shortly after the start). - pub fn mortal(period: u64, current: u64) -> Self { - let period = period.checked_next_power_of_two() - .unwrap_or(1 << 16) - .max(4) - .min(1 << 16); - let phase = current % period; - let quantize_factor = (period >> 12).max(1); - let quantized_phase = phase / quantize_factor * quantize_factor; - - Era::Mortal(period, quantized_phase) - } - - /// Create an "immortal" transaction. - pub fn immortal() -> Self { - Era::Immortal - } - - /// `true` if this is an immortal transaction. - pub fn is_immortal(&self) -> bool { - match self { - Era::Immortal => true, - _ => false, - } - } - - /// Get the block number of the start of the era whose properties this object - /// describes that `current` belongs to. - pub fn birth(self, current: u64) -> u64 { - match self { - Era::Immortal => 0, - Era::Mortal(period, phase) => (current.max(phase) - phase) / period * period + phase, - } - } - - /// Get the block number of the first block at which the era has ended. - pub fn death(self, current: u64) -> u64 { - match self { - Era::Immortal => u64::max_value(), - Era::Mortal(period, _) => self.birth(current) + period, - } - } + /// Create a new era based on a period (which should be a power of two between 4 and 65536 inclusive) + /// and a block number on which it should start (or, for long periods, be shortly after the start). + pub fn mortal(period: u64, current: u64) -> Self { + let period = period + .checked_next_power_of_two() + .unwrap_or(1 << 16) + .max(4) + .min(1 << 16); + let phase = current % period; + let quantize_factor = (period >> 12).max(1); + let quantized_phase = phase / quantize_factor * quantize_factor; + + Era::Mortal(period, quantized_phase) + } + + /// Create an "immortal" transaction. + pub fn immortal() -> Self { + Era::Immortal + } + + /// `true` if this is an immortal transaction. + pub fn is_immortal(&self) -> bool { + match self { + Era::Immortal => true, + _ => false, + } + } + + /// Get the block number of the start of the era whose properties this object + /// describes that `current` belongs to. + pub fn birth(self, current: u64) -> u64 { + match self { + Era::Immortal => 0, + Era::Mortal(period, phase) => (current.max(phase) - phase) / period * period + phase, + } + } + + /// Get the block number of the first block at which the era has ended. + pub fn death(self, current: u64) -> u64 { + match self { + Era::Immortal => u64::max_value(), + Era::Mortal(period, _) => self.birth(current) + period, + } + } } impl Encode for Era { - fn encode_to(&self, output: &mut T) { - match self { - Era::Immortal => output.push_byte(0), - Era::Mortal(period, phase) => { - let quantize_factor = (*period as u64 >> 12).max(1); - let encoded = (period.trailing_zeros() - 1).max(1).min(15) as u16 | ((phase / quantize_factor) << 4) as u16; - output.push(&encoded); - } - } - } + fn encode_to(&self, output: &mut T) { + match self { + Era::Immortal => output.push_byte(0), + Era::Mortal(period, phase) => { + let quantize_factor = (*period as u64 >> 12).max(1); + let encoded = (period.trailing_zeros() - 1).max(1).min(15) as u16 + | ((phase / quantize_factor) << 4) as u16; + output.push(&encoded); + } + } + } } impl Decode for Era { - fn decode(input: &mut I) -> Option { - let first = input.read_byte()?; - if first == 0 { - Some(Era::Immortal) - } else { - let encoded = first as u64 + ((input.read_byte()? as u64) << 8); - let period = 2 << (encoded % (1 << 4)); - let quantize_factor = (period >> 12).max(1); - let phase = (encoded >> 4) * quantize_factor; - if period >= 4 && phase < period { - Some(Era::Mortal(period, phase)) - } else { - None - } - } - } + fn decode(input: &mut I) -> Option { + let first = input.read_byte()?; + if first == 0 { + Some(Era::Immortal) + } else { + let encoded = first as u64 + ((input.read_byte()? as u64) << 8); + let period = 2 << (encoded % (1 << 4)); + let quantize_factor = (period >> 12).max(1); + let phase = (encoded >> 4) * quantize_factor; + if period >= 4 && phase < period { + Some(Era::Mortal(period, phase)) + } else { + None + } + } + } } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn immortal_works() { - let e = Era::immortal(); - assert_eq!(e.birth(0), 0); - assert_eq!(e.death(0), u64::max_value()); - assert_eq!(e.birth(1), 0); - assert_eq!(e.death(1), u64::max_value()); - assert_eq!(e.birth(u64::max_value()), 0); - assert_eq!(e.death(u64::max_value()), u64::max_value()); - assert!(e.is_immortal()); - - assert_eq!(e.encode(), vec![0u8]); - assert_eq!(e, Era::decode(&mut&[0u8][..]).unwrap()); - } - - #[test] - fn mortal_codec_works() { - let e = Era::mortal(64, 42); - assert!(!e.is_immortal()); - - let expected = vec![5 + 42 % 16 * 16, 42 / 16]; - assert_eq!(e.encode(), expected); - assert_eq!(e, Era::decode(&mut&expected[..]).unwrap()); - } - - #[test] - fn long_period_mortal_codec_works() { - let e = Era::mortal(32768, 20000); - - let expected = vec![(14 + 2500 % 16 * 16) as u8, (2500 / 16) as u8]; - assert_eq!(e.encode(), expected); - assert_eq!(e, Era::decode(&mut&expected[..]).unwrap()); - } - - #[test] - fn era_initialization_works() { - assert_eq!(Era::mortal(64, 42), Era::Mortal(64, 42)); - assert_eq!(Era::mortal(32768, 20000), Era::Mortal(32768, 20000)); - assert_eq!(Era::mortal(200, 513), Era::Mortal(256, 1)); - assert_eq!(Era::mortal(2, 1), Era::Mortal(4, 1)); - assert_eq!(Era::mortal(4, 5), Era::Mortal(4, 1)); - } - - #[test] - fn quantized_clamped_era_initialization_works() { - // clamp 1000000 to 65536, quantize 1000001 % 65536 to the nearest 4 - assert_eq!(Era::mortal(1000000, 1000001), Era::Mortal(65536, 1000001 % 65536 / 4 * 4)); - } - - #[test] - fn mortal_birth_death_works() { - let e = Era::mortal(4, 6); - for i in 6..10 { - assert_eq!(e.birth(i), 6); - assert_eq!(e.death(i), 10); - } - - // wrong because it's outside of the (current...current + period) range - assert_ne!(e.birth(10), 6); - assert_ne!(e.birth(5), 6); - } - - #[test] - fn current_less_than_phase() { - // should not panic - Era::mortal(4, 3).birth(1); - } + use super::*; + + #[test] + fn immortal_works() { + let e = Era::immortal(); + assert_eq!(e.birth(0), 0); + assert_eq!(e.death(0), u64::max_value()); + assert_eq!(e.birth(1), 0); + assert_eq!(e.death(1), u64::max_value()); + assert_eq!(e.birth(u64::max_value()), 0); + assert_eq!(e.death(u64::max_value()), u64::max_value()); + assert!(e.is_immortal()); + + assert_eq!(e.encode(), vec![0u8]); + assert_eq!(e, Era::decode(&mut &[0u8][..]).unwrap()); + } + + #[test] + fn mortal_codec_works() { + let e = Era::mortal(64, 42); + assert!(!e.is_immortal()); + + let expected = vec![5 + 42 % 16 * 16, 42 / 16]; + assert_eq!(e.encode(), expected); + assert_eq!(e, Era::decode(&mut &expected[..]).unwrap()); + } + + #[test] + fn long_period_mortal_codec_works() { + let e = Era::mortal(32768, 20000); + + let expected = vec![(14 + 2500 % 16 * 16) as u8, (2500 / 16) as u8]; + assert_eq!(e.encode(), expected); + assert_eq!(e, Era::decode(&mut &expected[..]).unwrap()); + } + + #[test] + fn era_initialization_works() { + assert_eq!(Era::mortal(64, 42), Era::Mortal(64, 42)); + assert_eq!(Era::mortal(32768, 20000), Era::Mortal(32768, 20000)); + assert_eq!(Era::mortal(200, 513), Era::Mortal(256, 1)); + assert_eq!(Era::mortal(2, 1), Era::Mortal(4, 1)); + assert_eq!(Era::mortal(4, 5), Era::Mortal(4, 1)); + } + + #[test] + fn quantized_clamped_era_initialization_works() { + // clamp 1000000 to 65536, quantize 1000001 % 65536 to the nearest 4 + assert_eq!( + Era::mortal(1000000, 1000001), + Era::Mortal(65536, 1000001 % 65536 / 4 * 4) + ); + } + + #[test] + fn mortal_birth_death_works() { + let e = Era::mortal(4, 6); + for i in 6..10 { + assert_eq!(e.birth(i), 6); + assert_eq!(e.death(i), 10); + } + + // wrong because it's outside of the (current...current + period) range + assert_ne!(e.birth(10), 6); + assert_ne!(e.birth(5), 6); + } + + #[test] + fn current_less_than_phase() { + // should not panic + Era::mortal(4, 3).birth(1); + } } diff --git a/core/sr-primitives/src/generic/header.rs b/core/sr-primitives/src/generic/header.rs index 60ccd93b3d..9ce7f4fdad 100644 --- a/core/sr-primitives/src/generic/header.rs +++ b/core/sr-primitives/src/generic/header.rs @@ -16,12 +16,14 @@ //! Generic implementation of a block header. +use crate::codec::{Codec, Decode, Encode, EncodeAsRef, HasCompact, Input, Output}; +use crate::generic::Digest; +use crate::traits::{ + self, DigestItem as DigestItemT, Hash as HashT, MaybeDisplay, MaybeSerializeDebug, + MaybeSerializeDebugButNotDeserialize, Member, SimpleArithmetic, SimpleBitOps, +}; #[cfg(feature = "std")] use serde_derive::Serialize; -use crate::codec::{Decode, Encode, Codec, Input, Output, HasCompact, EncodeAsRef}; -use crate::traits::{self, Member, SimpleArithmetic, SimpleBitOps, MaybeDisplay, - Hash as HashT, DigestItem as DigestItemT, MaybeSerializeDebug, MaybeSerializeDebugButNotDeserialize}; -use crate::generic::Digest; /// Abstraction over a block header for a substrate chain. #[derive(PartialEq, Eq, Clone)] @@ -29,136 +31,187 @@ use crate::generic::Digest; #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] #[cfg_attr(feature = "std", serde(deny_unknown_fields))] pub struct Header, Hash: HashT, DigestItem> { - /// The parent hash. - pub parent_hash: ::Output, - /// The block number. - #[cfg_attr(feature = "std", serde(serialize_with = "serialize_number"))] - pub number: Number, - /// The state trie merkle root - pub state_root: ::Output, - /// The merkle root of the extrinsics. - pub extrinsics_root: ::Output, - /// A chain-specific digest of data useful for light clients or referencing auxiliary data. - pub digest: Digest, + /// The parent hash. + pub parent_hash: ::Output, + /// The block number. + #[cfg_attr(feature = "std", serde(serialize_with = "serialize_number"))] + pub number: Number, + /// The state trie merkle root + pub state_root: ::Output, + /// The merkle root of the extrinsics. + pub extrinsics_root: ::Output, + /// A chain-specific digest of data useful for light clients or referencing auxiliary data. + pub digest: Digest, } #[cfg(feature = "std")] -pub fn serialize_number>(val: &T, s: S) -> Result where S: ::serde::Serializer { - use substrate_primitives::uint::U256; - let v: u128 = (*val).into(); - let lower = U256::from(v as u64); - let upper = U256::from(v.rotate_left(64) as u64) << 64; - ::serde::Serialize::serialize(&(upper + lower), s) +pub fn serialize_number>(val: &T, s: S) -> Result +where + S: ::serde::Serializer, +{ + use substrate_primitives::uint::U256; + let v: u128 = (*val).into(); + let lower = U256::from(v as u64); + let upper = U256::from(v.rotate_left(64) as u64) << 64; + ::serde::Serialize::serialize(&(upper + lower), s) } -impl Decode for Header where - Number: HasCompact + Copy + Into, - Hash: HashT, - Hash::Output: Decode, - DigestItem: DigestItemT + Decode, +impl Decode for Header +where + Number: HasCompact + Copy + Into, + Hash: HashT, + Hash::Output: Decode, + DigestItem: DigestItemT + Decode, { - fn decode(input: &mut I) -> Option { - Some(Header { - parent_hash: Decode::decode(input)?, - number: <::Type>::decode(input)?.into(), - state_root: Decode::decode(input)?, - extrinsics_root: Decode::decode(input)?, - digest: Decode::decode(input)?, - }) - } + fn decode(input: &mut I) -> Option { + Some(Header { + parent_hash: Decode::decode(input)?, + number: <::Type>::decode(input)?.into(), + state_root: Decode::decode(input)?, + extrinsics_root: Decode::decode(input)?, + digest: Decode::decode(input)?, + }) + } } -impl Encode for Header where - Number: HasCompact + Copy + Into, - Hash: HashT, - Hash::Output: Encode, - DigestItem: DigestItemT + Encode, +impl Encode for Header +where + Number: HasCompact + Copy + Into, + Hash: HashT, + Hash::Output: Encode, + DigestItem: DigestItemT + Encode, { - fn encode_to(&self, dest: &mut T) { - dest.push(&self.parent_hash); - dest.push(&<<::Type as EncodeAsRef<_>>::RefType>::from(&self.number)); - dest.push(&self.state_root); - dest.push(&self.extrinsics_root); - dest.push(&self.digest); - } + fn encode_to(&self, dest: &mut T) { + dest.push(&self.parent_hash); + dest.push(&<<::Type as EncodeAsRef<_>>::RefType>::from(&self.number)); + dest.push(&self.state_root); + dest.push(&self.extrinsics_root); + dest.push(&self.digest); + } } -impl traits::Header for Header where - Number: Member + MaybeSerializeDebug + ::rstd::hash::Hash + MaybeDisplay + SimpleArithmetic + Codec + Copy + Into, - Hash: HashT, - DigestItem: DigestItemT + Codec, - Hash::Output: Default + ::rstd::hash::Hash + Copy + Member + MaybeSerializeDebugButNotDeserialize + MaybeDisplay + SimpleBitOps + Codec, +impl traits::Header for Header +where + Number: Member + + MaybeSerializeDebug + + ::rstd::hash::Hash + + MaybeDisplay + + SimpleArithmetic + + Codec + + Copy + + Into, + Hash: HashT, + DigestItem: DigestItemT + Codec, + Hash::Output: Default + + ::rstd::hash::Hash + + Copy + + Member + + MaybeSerializeDebugButNotDeserialize + + MaybeDisplay + + SimpleBitOps + + Codec, { - type Number = Number; - type Hash = ::Output; - type Hashing = Hash; - type Digest = Digest; - - fn number(&self) -> &Self::Number { &self.number } - fn set_number(&mut self, num: Self::Number) { self.number = num } - - fn extrinsics_root(&self) -> &Self::Hash { &self.extrinsics_root } - fn set_extrinsics_root(&mut self, root: Self::Hash) { self.extrinsics_root = root } - - fn state_root(&self) -> &Self::Hash { &self.state_root } - fn set_state_root(&mut self, root: Self::Hash) { self.state_root = root } - - fn parent_hash(&self) -> &Self::Hash { &self.parent_hash } - fn set_parent_hash(&mut self, hash: Self::Hash) { self.parent_hash = hash } - - fn digest(&self) -> &Self::Digest { &self.digest } - fn digest_mut(&mut self) -> &mut Self::Digest { &mut self.digest } - fn set_digest(&mut self, digest: Self::Digest) { self.digest = digest } - - fn new( - number: Self::Number, - extrinsics_root: Self::Hash, - state_root: Self::Hash, - parent_hash: Self::Hash, - digest: Self::Digest - ) -> Self { - Header { - number, - extrinsics_root, - state_root, - parent_hash, - digest - } - } + type Number = Number; + type Hash = ::Output; + type Hashing = Hash; + type Digest = Digest; + + fn number(&self) -> &Self::Number { + &self.number + } + fn set_number(&mut self, num: Self::Number) { + self.number = num + } + + fn extrinsics_root(&self) -> &Self::Hash { + &self.extrinsics_root + } + fn set_extrinsics_root(&mut self, root: Self::Hash) { + self.extrinsics_root = root + } + + fn state_root(&self) -> &Self::Hash { + &self.state_root + } + fn set_state_root(&mut self, root: Self::Hash) { + self.state_root = root + } + + fn parent_hash(&self) -> &Self::Hash { + &self.parent_hash + } + fn set_parent_hash(&mut self, hash: Self::Hash) { + self.parent_hash = hash + } + + fn digest(&self) -> &Self::Digest { + &self.digest + } + fn digest_mut(&mut self) -> &mut Self::Digest { + &mut self.digest + } + fn set_digest(&mut self, digest: Self::Digest) { + self.digest = digest + } + + fn new( + number: Self::Number, + extrinsics_root: Self::Hash, + state_root: Self::Hash, + parent_hash: Self::Hash, + digest: Self::Digest, + ) -> Self { + Header { + number, + extrinsics_root, + state_root, + parent_hash, + digest, + } + } } -impl Header where - Number: Member + ::rstd::hash::Hash + Copy + MaybeDisplay + SimpleArithmetic + Codec + Into, - Hash: HashT, - DigestItem: DigestItemT + Codec, - Hash::Output: Default + ::rstd::hash::Hash + Copy + Member + MaybeDisplay + SimpleBitOps + Codec, - { - /// Convenience helper for computing the hash of the header without having - /// to import the trait. - pub fn hash(&self) -> Hash::Output { - Hash::hash_of(self) - } +impl Header +where + Number: + Member + ::rstd::hash::Hash + Copy + MaybeDisplay + SimpleArithmetic + Codec + Into, + Hash: HashT, + DigestItem: DigestItemT + Codec, + Hash::Output: + Default + ::rstd::hash::Hash + Copy + Member + MaybeDisplay + SimpleBitOps + Codec, +{ + /// Convenience helper for computing the hash of the header without having + /// to import the trait. + pub fn hash(&self) -> Hash::Output { + Hash::hash_of(self) + } } #[cfg(all(test, feature = "std"))] mod tests { - use super::*; - - #[test] - fn should_serialize_numbers() { - fn serialize(num: u128) -> String { - let mut v = vec![]; - { - let mut ser = ::serde_json::Serializer::new(::std::io::Cursor::new(&mut v)); - serialize_number(&num, &mut ser).unwrap(); - } - String::from_utf8(v).unwrap() - } - - assert_eq!(serialize(0), "\"0x0\"".to_owned()); - assert_eq!(serialize(1), "\"0x1\"".to_owned()); - assert_eq!(serialize(u64::max_value() as u128), "\"0xffffffffffffffff\"".to_owned()); - assert_eq!(serialize(u64::max_value() as u128 + 1), "\"0x10000000000000000\"".to_owned()); - } + use super::*; + + #[test] + fn should_serialize_numbers() { + fn serialize(num: u128) -> String { + let mut v = vec![]; + { + let mut ser = ::serde_json::Serializer::new(::std::io::Cursor::new(&mut v)); + serialize_number(&num, &mut ser).unwrap(); + } + String::from_utf8(v).unwrap() + } + + assert_eq!(serialize(0), "\"0x0\"".to_owned()); + assert_eq!(serialize(1), "\"0x1\"".to_owned()); + assert_eq!( + serialize(u64::max_value() as u128), + "\"0xffffffffffffffff\"".to_owned() + ); + assert_eq!( + serialize(u64::max_value() as u128 + 1), + "\"0x10000000000000000\"".to_owned() + ); + } } diff --git a/core/sr-primitives/src/generic/mod.rs b/core/sr-primitives/src/generic/mod.rs index 47ce3cb251..8f59a20d7a 100644 --- a/core/sr-primitives/src/generic/mod.rs +++ b/core/sr-primitives/src/generic/mod.rs @@ -18,47 +18,47 @@ //! Generic implementations of Extrinsic/Header/Block. // end::description[] -mod unchecked_extrinsic; -mod unchecked_mortal_extrinsic; -mod unchecked_mortal_compact_extrinsic; -mod era; -mod checked_extrinsic; -mod header; mod block; +mod checked_extrinsic; mod digest; +mod era; +mod header; #[cfg(test)] mod tests; +mod unchecked_extrinsic; +mod unchecked_mortal_compact_extrinsic; +mod unchecked_mortal_extrinsic; -pub use self::unchecked_extrinsic::UncheckedExtrinsic; -pub use self::unchecked_mortal_extrinsic::UncheckedMortalExtrinsic; -pub use self::unchecked_mortal_compact_extrinsic::UncheckedMortalCompactExtrinsic; -pub use self::era::Era; +pub use self::block::{Block, BlockId, SignedBlock}; pub use self::checked_extrinsic::CheckedExtrinsic; -pub use self::header::Header; -pub use self::block::{Block, SignedBlock, BlockId}; pub use self::digest::{Digest, DigestItem, DigestItemRef}; +pub use self::era::Era; +pub use self::header::Header; +pub use self::unchecked_extrinsic::UncheckedExtrinsic; +pub use self::unchecked_mortal_compact_extrinsic::UncheckedMortalCompactExtrinsic; +pub use self::unchecked_mortal_extrinsic::UncheckedMortalExtrinsic; use crate::codec::Encode; use rstd::prelude::*; fn encode_with_vec_prefix)>(encoder: F) -> Vec { - let size = ::rstd::mem::size_of::(); - let reserve = match size { - 0...0b00111111 => 1, - 0...0b00111111_11111111 => 2, - _ => 4, - }; - let mut v = Vec::with_capacity(reserve + size); - v.resize(reserve, 0); - encoder(&mut v); + let size = ::rstd::mem::size_of::(); + let reserve = match size { + 0...0b00111111 => 1, + 0...0b00111111_11111111 => 2, + _ => 4, + }; + let mut v = Vec::with_capacity(reserve + size); + v.resize(reserve, 0); + encoder(&mut v); - // need to prefix with the total length to ensure it's binary comptible with - // Vec. - let mut length: Vec<()> = Vec::new(); - length.resize(v.len() - reserve, ()); - length.using_encoded(|s| { - v.splice(0..reserve, s.iter().cloned()); - }); + // need to prefix with the total length to ensure it's binary comptible with + // Vec. + let mut length: Vec<()> = Vec::new(); + length.resize(v.len() - reserve, ()); + length.using_encoded(|s| { + v.splice(0..reserve, s.iter().cloned()); + }); - v + v } diff --git a/core/sr-primitives/src/generic/tests.rs b/core/sr-primitives/src/generic/tests.rs index 91fc8f3faf..43d95b9a79 100644 --- a/core/sr-primitives/src/generic/tests.rs +++ b/core/sr-primitives/src/generic/tests.rs @@ -16,42 +16,42 @@ //! Tests for the generic implementations of Extrinsic/Header/Block. +use super::DigestItem; use crate::codec::{Decode, Encode}; use substrate_primitives::{H256, H512}; -use super::DigestItem; #[test] fn system_digest_item_encoding() { - let item = DigestItem::AuthoritiesChange::(vec![10, 20, 30]); - let encoded = item.encode(); - assert_eq!(encoded, vec![ - // type = DigestItemType::AuthoritiesChange - 1, - // number of items in athorities set - 12, - // authorities - 10, 0, 0, 0, - 20, 0, 0, 0, - 30, 0, 0, 0, - ]); - - let decoded: DigestItem = Decode::decode(&mut &encoded[..]).unwrap(); - assert_eq!(item, decoded); + let item = DigestItem::AuthoritiesChange::(vec![10, 20, 30]); + let encoded = item.encode(); + assert_eq!( + encoded, + vec![ + // type = DigestItemType::AuthoritiesChange + 1, // number of items in athorities set + 12, // authorities + 10, 0, 0, 0, 20, 0, 0, 0, 30, 0, 0, 0, + ] + ); + + let decoded: DigestItem = Decode::decode(&mut &encoded[..]).unwrap(); + assert_eq!(item, decoded); } #[test] fn non_system_digest_item_encoding() { - let item = DigestItem::Other::(vec![10, 20, 30]); - let encoded = item.encode(); - assert_eq!(encoded, vec![ - // type = DigestItemType::Other - 0, - // length of other data - 12, - // authorities - 10, 20, 30, - ]); - - let decoded: DigestItem = Decode::decode(&mut &encoded[..]).unwrap(); - assert_eq!(item, decoded); + let item = DigestItem::Other::(vec![10, 20, 30]); + let encoded = item.encode(); + assert_eq!( + encoded, + vec![ + // type = DigestItemType::Other + 0, // length of other data + 12, // authorities + 10, 20, 30, + ] + ); + + let decoded: DigestItem = Decode::decode(&mut &encoded[..]).unwrap(); + assert_eq!(item, decoded); } diff --git a/core/sr-primitives/src/generic/unchecked_extrinsic.rs b/core/sr-primitives/src/generic/unchecked_extrinsic.rs index d6e0d60e2c..7ac980df4a 100644 --- a/core/sr-primitives/src/generic/unchecked_extrinsic.rs +++ b/core/sr-primitives/src/generic/unchecked_extrinsic.rs @@ -19,21 +19,21 @@ #[cfg(feature = "std")] use std::fmt; -use rstd::prelude::*; -use crate::codec::{Decode, Encode, Codec, Input, HasCompact}; -use crate::traits::{self, Member, SimpleArithmetic, MaybeDisplay, Lookup, Extrinsic}; use super::CheckedExtrinsic; +use crate::codec::{Codec, Decode, Encode, HasCompact, Input}; +use crate::traits::{self, Extrinsic, Lookup, MaybeDisplay, Member, SimpleArithmetic}; +use rstd::prelude::*; #[derive(PartialEq, Eq, Clone, Encode, Decode)] pub struct SignatureContent where - Address: Codec, - Index: HasCompact + Codec, - Signature: Codec, + Address: Codec, + Index: HasCompact + Codec, + Signature: Codec, { - signed: Address, - signature: Signature, - index: Index, + signed: Address, + signature: Signature, + index: Index, } /// A extrinsic right from the external world. This is unchecked and so @@ -41,158 +41,170 @@ where #[derive(PartialEq, Eq, Clone)] pub struct UncheckedExtrinsic where - Address: Codec, - Index: HasCompact + Codec, - Signature: Codec, + Address: Codec, + Index: HasCompact + Codec, + Signature: Codec, { - /// The signature, address and number of extrinsics have come before from - /// the same signer, if this is a signed extrinsic. - pub signature: Option>, - /// The function that should be called. - pub function: Call, + /// The signature, address and number of extrinsics have come before from + /// the same signer, if this is a signed extrinsic. + pub signature: Option>, + /// The function that should be called. + pub function: Call, } impl UncheckedExtrinsic where - Address: Codec, - Index: HasCompact + Codec, - Signature: Codec, + Address: Codec, + Index: HasCompact + Codec, + Signature: Codec, { - /// New instance of a signed extrinsic aka "transaction". - pub fn new_signed(index: Index, function: Call, signed: Address, signature: Signature) -> Self { - UncheckedExtrinsic { - signature: Some(SignatureContent{signed, signature, index}), - function, - } - } - - /// New instance of an unsigned extrinsic aka "inherent". - pub fn new_unsigned(function: Call) -> Self { - UncheckedExtrinsic { - signature: None, - function, - } - } + /// New instance of a signed extrinsic aka "transaction". + pub fn new_signed(index: Index, function: Call, signed: Address, signature: Signature) -> Self { + UncheckedExtrinsic { + signature: Some(SignatureContent { + signed, + signature, + index, + }), + function, + } + } + + /// New instance of an unsigned extrinsic aka "inherent". + pub fn new_unsigned(function: Call) -> Self { + UncheckedExtrinsic { + signature: None, + function, + } + } } impl traits::Checkable - for UncheckedExtrinsic + for UncheckedExtrinsic where - Address: Member + MaybeDisplay + Codec, - Index: Member + MaybeDisplay + SimpleArithmetic + Codec, - Call: Encode + Member, - Signature: Member + traits::Verify + Codec, - AccountId: Member + MaybeDisplay, - Context: Lookup, + Address: Member + MaybeDisplay + Codec, + Index: Member + MaybeDisplay + SimpleArithmetic + Codec, + Call: Encode + Member, + Signature: Member + traits::Verify + Codec, + AccountId: Member + MaybeDisplay, + Context: Lookup, { - type Checked = CheckedExtrinsic; - - fn check(self, context: &Context) -> Result { - Ok(match self.signature { - Some(SignatureContent{signed, signature, index}) => { - let payload = (index, self.function); - let signed = context.lookup(signed)?; - if !crate::verify_encoded_lazy(&signature, &payload, &signed) { - return Err(crate::BAD_SIGNATURE) - } - CheckedExtrinsic { - signed: Some((signed, payload.0)), - function: payload.1, - } - } - None => CheckedExtrinsic { - signed: None, - function: self.function, - }, - }) - } + type Checked = CheckedExtrinsic; + + fn check(self, context: &Context) -> Result { + Ok(match self.signature { + Some(SignatureContent { + signed, + signature, + index, + }) => { + let payload = (index, self.function); + let signed = context.lookup(signed)?; + if !crate::verify_encoded_lazy(&signature, &payload, &signed) { + return Err(crate::BAD_SIGNATURE); + } + CheckedExtrinsic { + signed: Some((signed, payload.0)), + function: payload.1, + } + } + None => CheckedExtrinsic { + signed: None, + function: self.function, + }, + }) + } } -impl< - Address: Codec, - Index: HasCompact + Codec, - Signature: Codec, - Call, -> Extrinsic for UncheckedExtrinsic { - fn is_signed(&self) -> Option { - Some(self.signature.is_some()) - } +impl Extrinsic + for UncheckedExtrinsic +{ + fn is_signed(&self) -> Option { + Some(self.signature.is_some()) + } } impl Decode - for UncheckedExtrinsic + for UncheckedExtrinsic { - fn decode(input: &mut I) -> Option { - // This is a little more complicated than usual since the binary format must be compatible - // with substrate's generic `Vec` type. Basically this just means accepting that there - // will be a prefix of vector length (we don't need - // to use this). - let _length_do_not_remove_me_see_above: Vec<()> = Decode::decode(input)?; - - Some(UncheckedExtrinsic { - signature: Decode::decode(input)?, - function: Decode::decode(input)?, - }) - } + fn decode(input: &mut I) -> Option { + // This is a little more complicated than usual since the binary format must be compatible + // with substrate's generic `Vec` type. Basically this just means accepting that there + // will be a prefix of vector length (we don't need + // to use this). + let _length_do_not_remove_me_see_above: Vec<()> = Decode::decode(input)?; + + Some(UncheckedExtrinsic { + signature: Decode::decode(input)?, + function: Decode::decode(input)?, + }) + } } impl Encode - for UncheckedExtrinsic + for UncheckedExtrinsic { - fn encode(&self) -> Vec { - super::encode_with_vec_prefix::(|v| { - self.signature.encode_to(v); - self.function.encode_to(v); - }) - } + fn encode(&self) -> Vec { + super::encode_with_vec_prefix::(|v| { + self.signature.encode_to(v); + self.function.encode_to(v); + }) + } } #[cfg(feature = "std")] impl serde::Serialize - for UncheckedExtrinsic + for UncheckedExtrinsic { - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { - self.using_encoded(|bytes| ::substrate_primitives::bytes::serialize(bytes, seq)) - } + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { + self.using_encoded(|bytes| ::substrate_primitives::bytes::serialize(bytes, seq)) + } } #[cfg(feature = "std")] impl fmt::Debug - for UncheckedExtrinsic + for UncheckedExtrinsic where - Address: fmt::Debug + Codec, - Index: fmt::Debug + HasCompact + Codec, - Signature: Codec, - Call: fmt::Debug, + Address: fmt::Debug + Codec, + Index: fmt::Debug + HasCompact + Codec, + Signature: Codec, + Call: fmt::Debug, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "UncheckedExtrinsic({:?}, {:?})", self.signature.as_ref().map(|x| (&x.signed, &x.index)), self.function) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "UncheckedExtrinsic({:?}, {:?})", + self.signature.as_ref().map(|x| (&x.signed, &x.index)), + self.function + ) + } } #[cfg(test)] mod test { - use crate::codec::{Decode, Encode}; - use super::UncheckedExtrinsic; - - #[test] - fn encoding_matches_vec() { - type Extrinsic = UncheckedExtrinsic; - let ex = Extrinsic::new_unsigned(42); - let encoded = ex.encode(); - let decoded = Extrinsic::decode(&mut encoded.as_slice()).unwrap(); - assert_eq!(decoded, ex); - let as_vec: Vec = Decode::decode(&mut encoded.as_slice()).unwrap(); - assert_eq!(as_vec.encode(), encoded); - } - - - #[test] - #[cfg(feature = "std")] - fn serialization_of_unchecked_extrinsics() { - type Extrinsic = UncheckedExtrinsic; - let ex = Extrinsic::new_unsigned(42); - - assert_eq!(serde_json::to_string(&ex).unwrap(), "\"0x14002a000000\""); - } + use super::UncheckedExtrinsic; + use crate::codec::{Decode, Encode}; + + #[test] + fn encoding_matches_vec() { + type Extrinsic = UncheckedExtrinsic; + let ex = Extrinsic::new_unsigned(42); + let encoded = ex.encode(); + let decoded = Extrinsic::decode(&mut encoded.as_slice()).unwrap(); + assert_eq!(decoded, ex); + let as_vec: Vec = Decode::decode(&mut encoded.as_slice()).unwrap(); + assert_eq!(as_vec.encode(), encoded); + } + + #[test] + #[cfg(feature = "std")] + fn serialization_of_unchecked_extrinsics() { + type Extrinsic = UncheckedExtrinsic; + let ex = Extrinsic::new_unsigned(42); + + assert_eq!(serde_json::to_string(&ex).unwrap(), "\"0x14002a000000\""); + } } diff --git a/core/sr-primitives/src/generic/unchecked_mortal_compact_extrinsic.rs b/core/sr-primitives/src/generic/unchecked_mortal_compact_extrinsic.rs index 243747092c..dd72e10e84 100644 --- a/core/sr-primitives/src/generic/unchecked_mortal_compact_extrinsic.rs +++ b/core/sr-primitives/src/generic/unchecked_mortal_compact_extrinsic.rs @@ -19,12 +19,14 @@ #[cfg(feature = "std")] use std::fmt; +use super::{CheckedExtrinsic, Era}; +use crate::codec::{Compact, Decode, Encode, Input}; +use crate::traits::{ + self, BlockNumberToHash, Checkable, CurrentHeight, Extrinsic, Lookup, MaybeDisplay, Member, + SimpleArithmetic, +}; use rstd::prelude::*; use runtime_io::blake2_256; -use crate::codec::{Decode, Encode, Input, Compact}; -use crate::traits::{self, Member, SimpleArithmetic, MaybeDisplay, CurrentHeight, BlockNumberToHash, Lookup, - Checkable, Extrinsic}; -use super::{CheckedExtrinsic, Era}; const TRANSACTION_VERSION: u8 = 1; @@ -32,274 +34,424 @@ const TRANSACTION_VERSION: u8 = 1; /// can contain a signature. #[derive(PartialEq, Eq, Clone)] pub struct UncheckedMortalCompactExtrinsic { - /// The signature, address, number of extrinsics have come before from - /// the same signer and an era describing the longevity of this transaction, - /// if this is a signed extrinsic. - pub signature: Option<(Address, Signature, Compact, Era)>, - /// The function that should be called. - pub function: Call, + /// The signature, address, number of extrinsics have come before from + /// the same signer and an era describing the longevity of this transaction, + /// if this is a signed extrinsic. + pub signature: Option<(Address, Signature, Compact, Era)>, + /// The function that should be called. + pub function: Call, } -impl UncheckedMortalCompactExtrinsic { - /// New instance of a signed extrinsic aka "transaction". - pub fn new_signed(index: Index, function: Call, signed: Address, signature: Signature, era: Era) -> Self { - UncheckedMortalCompactExtrinsic { - signature: Some((signed, signature, index.into(), era)), - function, - } - } - - /// New instance of an unsigned extrinsic aka "inherent". - pub fn new_unsigned(function: Call) -> Self { - UncheckedMortalCompactExtrinsic { - signature: None, - function, - } - } +impl + UncheckedMortalCompactExtrinsic +{ + /// New instance of a signed extrinsic aka "transaction". + pub fn new_signed( + index: Index, + function: Call, + signed: Address, + signature: Signature, + era: Era, + ) -> Self { + UncheckedMortalCompactExtrinsic { + signature: Some((signed, signature, index.into(), era)), + function, + } + } + + /// New instance of an unsigned extrinsic aka "inherent". + pub fn new_unsigned(function: Call) -> Self { + UncheckedMortalCompactExtrinsic { + signature: None, + function, + } + } } -impl Extrinsic for UncheckedMortalCompactExtrinsic { - fn is_signed(&self) -> Option { - Some(self.signature.is_some()) - } +impl Extrinsic + for UncheckedMortalCompactExtrinsic +{ + fn is_signed(&self) -> Option { + Some(self.signature.is_some()) + } } impl Checkable - for UncheckedMortalCompactExtrinsic + for UncheckedMortalCompactExtrinsic where - Address: Member + MaybeDisplay, - Index: Member + MaybeDisplay + SimpleArithmetic, - Compact: Encode, - Call: Encode + Member, - Signature: Member + traits::Verify, - AccountId: Member + MaybeDisplay, - BlockNumber: SimpleArithmetic, - Hash: Encode, - Context: Lookup - + CurrentHeight - + BlockNumberToHash, + Address: Member + MaybeDisplay, + Index: Member + MaybeDisplay + SimpleArithmetic, + Compact: Encode, + Call: Encode + Member, + Signature: Member + traits::Verify, + AccountId: Member + MaybeDisplay, + BlockNumber: SimpleArithmetic, + Hash: Encode, + Context: Lookup + + CurrentHeight + + BlockNumberToHash, { - type Checked = CheckedExtrinsic; - - fn check(self, context: &Context) -> Result { - Ok(match self.signature { - Some((signed, signature, index, era)) => { - let h = context.block_number_to_hash(BlockNumber::sa(era.birth(context.current_height().as_()))) - .ok_or("transaction birth block ancient")?; - let signed = context.lookup(signed)?; - let raw_payload = (index, self.function, era, h); - if !raw_payload.using_encoded(|payload| { - if payload.len() > 256 { - signature.verify(&blake2_256(payload)[..], &signed) - } else { - signature.verify(payload, &signed) - } - }) { - return Err(crate::BAD_SIGNATURE) - } - CheckedExtrinsic { - signed: Some((signed, (raw_payload.0).0)), - function: raw_payload.1, - } - } - None => CheckedExtrinsic { - signed: None, - function: self.function, - }, - }) - } + type Checked = CheckedExtrinsic; + + fn check(self, context: &Context) -> Result { + Ok(match self.signature { + Some((signed, signature, index, era)) => { + let h = context + .block_number_to_hash(BlockNumber::sa( + era.birth(context.current_height().as_()), + )) + .ok_or("transaction birth block ancient")?; + let signed = context.lookup(signed)?; + let raw_payload = (index, self.function, era, h); + if !raw_payload.using_encoded(|payload| { + if payload.len() > 256 { + signature.verify(&blake2_256(payload)[..], &signed) + } else { + signature.verify(payload, &signed) + } + }) { + return Err(crate::BAD_SIGNATURE); + } + CheckedExtrinsic { + signed: Some((signed, (raw_payload.0).0)), + function: raw_payload.1, + } + } + None => CheckedExtrinsic { + signed: None, + function: self.function, + }, + }) + } } impl Decode - for UncheckedMortalCompactExtrinsic + for UncheckedMortalCompactExtrinsic where - Address: Decode, - Signature: Decode, - Compact: Decode, - Call: Decode, + Address: Decode, + Signature: Decode, + Compact: Decode, + Call: Decode, { - fn decode(input: &mut I) -> Option { - // This is a little more complicated than usual since the binary format must be compatible - // with substrate's generic `Vec` type. Basically this just means accepting that there - // will be a prefix of vector length (we don't need - // to use this). - let _length_do_not_remove_me_see_above: Vec<()> = Decode::decode(input)?; - - let version = input.read_byte()?; - - let is_signed = version & 0b1000_0000 != 0; - let version = version & 0b0111_1111; - if version != TRANSACTION_VERSION { - return None - } - - Some(UncheckedMortalCompactExtrinsic { - signature: if is_signed { Some(Decode::decode(input)?) } else { None }, - function: Decode::decode(input)?, - }) - } + fn decode(input: &mut I) -> Option { + // This is a little more complicated than usual since the binary format must be compatible + // with substrate's generic `Vec` type. Basically this just means accepting that there + // will be a prefix of vector length (we don't need + // to use this). + let _length_do_not_remove_me_see_above: Vec<()> = Decode::decode(input)?; + + let version = input.read_byte()?; + + let is_signed = version & 0b1000_0000 != 0; + let version = version & 0b0111_1111; + if version != TRANSACTION_VERSION { + return None; + } + + Some(UncheckedMortalCompactExtrinsic { + signature: if is_signed { + Some(Decode::decode(input)?) + } else { + None + }, + function: Decode::decode(input)?, + }) + } } impl Encode - for UncheckedMortalCompactExtrinsic + for UncheckedMortalCompactExtrinsic where - Address: Encode, - Signature: Encode, - Compact: Encode, - Call: Encode, + Address: Encode, + Signature: Encode, + Compact: Encode, + Call: Encode, { - fn encode(&self) -> Vec { - super::encode_with_vec_prefix::(|v| { - // 1 byte version id. - match self.signature.as_ref() { - Some(s) => { - v.push(TRANSACTION_VERSION | 0b1000_0000); - s.encode_to(v); - } - None => { - v.push(TRANSACTION_VERSION & 0b0111_1111); - } - } - self.function.encode_to(v); - }) - } + fn encode(&self) -> Vec { + super::encode_with_vec_prefix::(|v| { + // 1 byte version id. + match self.signature.as_ref() { + Some(s) => { + v.push(TRANSACTION_VERSION | 0b1000_0000); + s.encode_to(v); + } + None => { + v.push(TRANSACTION_VERSION & 0b0111_1111); + } + } + self.function.encode_to(v); + }) + } } #[cfg(feature = "std")] impl serde::Serialize - for UncheckedMortalCompactExtrinsic - where Compact: Encode + for UncheckedMortalCompactExtrinsic +where + Compact: Encode, { - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { - self.using_encoded(|bytes| seq.serialize_bytes(bytes)) - } + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { + self.using_encoded(|bytes| seq.serialize_bytes(bytes)) + } } #[cfg(feature = "std")] -impl fmt::Debug for UncheckedMortalCompactExtrinsic where - Address: fmt::Debug, - Index: fmt::Debug, - Call: fmt::Debug, +impl fmt::Debug + for UncheckedMortalCompactExtrinsic +where + Address: fmt::Debug, + Index: fmt::Debug, + Call: fmt::Debug, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "UncheckedMortalCompactExtrinsic({:?}, {:?})", self.signature.as_ref().map(|x| (&x.0, &x.2)), self.function) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "UncheckedMortalCompactExtrinsic({:?}, {:?})", + self.signature.as_ref().map(|x| (&x.0, &x.2)), + self.function + ) + } } #[cfg(test)] mod tests { - use super::*; - use runtime_io::blake2_256; - use crate::codec::{Encode, Decode}; - use serde_derive::{Serialize, Deserialize}; - - struct TestContext; - impl Lookup for TestContext { - type Source = u64; - type Target = u64; - fn lookup(&self, s: u64) -> Result { Ok(s) } - } - impl CurrentHeight for TestContext { - type BlockNumber = u64; - fn current_height(&self) -> u64 { 42 } - } - impl BlockNumberToHash for TestContext { - type BlockNumber = u64; - type Hash = u64; - fn block_number_to_hash(&self, n: u64) -> Option { Some(n) } - } - - #[derive(Eq, PartialEq, Clone, Debug, Serialize, Deserialize, Encode, Decode)] - struct TestSig(u64, Vec); - impl traits::Verify for TestSig { - type Signer = u64; - fn verify>(&self, mut msg: L, signer: &Self::Signer) -> bool { - *signer == self.0 && msg.get() == &self.1[..] - } - } - - const DUMMY_ACCOUNTID: u64 = 0; - - type Ex = UncheckedMortalCompactExtrinsic, TestSig>; - type CEx = CheckedExtrinsic>; - - #[test] - fn unsigned_codec_should_work() { - let ux = Ex::new_unsigned(vec![0u8;0]); - let encoded = ux.encode(); - assert_eq!(Ex::decode(&mut &encoded[..]), Some(ux)); - } - - #[test] - fn signed_codec_should_work() { - let ux = Ex::new_signed(0, vec![0u8;0], DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, vec![0u8;0], Era::immortal(), 0u64).encode()), Era::immortal()); - let encoded = ux.encode(); - assert_eq!(Ex::decode(&mut &encoded[..]), Some(ux)); - } - - #[test] - fn large_signed_codec_should_work() { - let ux = Ex::new_signed(0, vec![0u8;0], DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, vec![0u8; 257], Era::immortal(), 0u64).using_encoded(blake2_256)[..].to_owned()), Era::immortal()); - let encoded = ux.encode(); - assert_eq!(Ex::decode(&mut &encoded[..]), Some(ux)); - } - - #[test] - fn unsigned_check_should_work() { - let ux = Ex::new_unsigned(vec![0u8;0]); - assert!(!ux.is_signed().unwrap_or(false)); - assert!(>::check(ux, &TestContext).is_ok()); - } - - #[test] - fn badly_signed_check_should_fail() { - let ux = Ex::new_signed(0, vec![0u8;0], DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, vec![0u8]), Era::immortal()); - assert!(ux.is_signed().unwrap_or(false)); - assert_eq!(>::check(ux, &TestContext), Err(crate::BAD_SIGNATURE)); - } - - #[test] - fn immortal_signed_check_should_work() { - let ux = Ex::new_signed(0, vec![0u8;0], DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (Compact::from(DUMMY_ACCOUNTID), vec![0u8;0], Era::immortal(), 0u64).encode()), Era::immortal()); - assert!(ux.is_signed().unwrap_or(false)); - assert_eq!(>::check(ux, &TestContext), Ok(CEx { signed: Some((DUMMY_ACCOUNTID, 0)), function: vec![0u8;0] })); - } - - #[test] - fn mortal_signed_check_should_work() { - let ux = Ex::new_signed(0, vec![0u8;0], DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (Compact::from(DUMMY_ACCOUNTID), vec![0u8;0], Era::mortal(32, 42), 42u64).encode()), Era::mortal(32, 42)); - assert!(ux.is_signed().unwrap_or(false)); - assert_eq!(>::check(ux, &TestContext), Ok(CEx { signed: Some((DUMMY_ACCOUNTID, 0)), function: vec![0u8;0] })); - } - - #[test] - fn later_mortal_signed_check_should_work() { - let ux = Ex::new_signed(0, vec![0u8;0], DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (Compact::from(DUMMY_ACCOUNTID), vec![0u8;0], Era::mortal(32, 11), 11u64).encode()), Era::mortal(32, 11)); - assert!(ux.is_signed().unwrap_or(false)); - assert_eq!(>::check(ux, &TestContext), Ok(CEx { signed: Some((DUMMY_ACCOUNTID, 0)), function: vec![0u8;0] })); - } - - #[test] - fn too_late_mortal_signed_check_should_fail() { - let ux = Ex::new_signed(0, vec![0u8;0], DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, vec![0u8;0], Era::mortal(32, 10), 10u64).encode()), Era::mortal(32, 10)); - assert!(ux.is_signed().unwrap_or(false)); - assert_eq!(>::check(ux, &TestContext), Err(crate::BAD_SIGNATURE)); - } - - #[test] - fn too_early_mortal_signed_check_should_fail() { - let ux = Ex::new_signed(0, vec![0u8;0], DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, vec![0u8;0], Era::mortal(32, 43), 43u64).encode()), Era::mortal(32, 43)); - assert!(ux.is_signed().unwrap_or(false)); - assert_eq!(>::check(ux, &TestContext), Err(crate::BAD_SIGNATURE)); - } - - #[test] - fn encoding_matches_vec() { - let ex = Ex::new_unsigned(vec![0u8;0]); - let encoded = ex.encode(); - let decoded = Ex::decode(&mut encoded.as_slice()).unwrap(); - assert_eq!(decoded, ex); - let as_vec: Vec = Decode::decode(&mut encoded.as_slice()).unwrap(); - assert_eq!(as_vec.encode(), encoded); - } + use super::*; + use crate::codec::{Decode, Encode}; + use runtime_io::blake2_256; + use serde_derive::{Deserialize, Serialize}; + + struct TestContext; + impl Lookup for TestContext { + type Source = u64; + type Target = u64; + fn lookup(&self, s: u64) -> Result { + Ok(s) + } + } + impl CurrentHeight for TestContext { + type BlockNumber = u64; + fn current_height(&self) -> u64 { + 42 + } + } + impl BlockNumberToHash for TestContext { + type BlockNumber = u64; + type Hash = u64; + fn block_number_to_hash(&self, n: u64) -> Option { + Some(n) + } + } + + #[derive(Eq, PartialEq, Clone, Debug, Serialize, Deserialize, Encode, Decode)] + struct TestSig(u64, Vec); + impl traits::Verify for TestSig { + type Signer = u64; + fn verify>(&self, mut msg: L, signer: &Self::Signer) -> bool { + *signer == self.0 && msg.get() == &self.1[..] + } + } + + const DUMMY_ACCOUNTID: u64 = 0; + + type Ex = UncheckedMortalCompactExtrinsic, TestSig>; + type CEx = CheckedExtrinsic>; + + #[test] + fn unsigned_codec_should_work() { + let ux = Ex::new_unsigned(vec![0u8; 0]); + let encoded = ux.encode(); + assert_eq!(Ex::decode(&mut &encoded[..]), Some(ux)); + } + + #[test] + fn signed_codec_should_work() { + let ux = Ex::new_signed( + 0, + vec![0u8; 0], + DUMMY_ACCOUNTID, + TestSig( + DUMMY_ACCOUNTID, + (DUMMY_ACCOUNTID, vec![0u8; 0], Era::immortal(), 0u64).encode(), + ), + Era::immortal(), + ); + let encoded = ux.encode(); + assert_eq!(Ex::decode(&mut &encoded[..]), Some(ux)); + } + + #[test] + fn large_signed_codec_should_work() { + let ux = Ex::new_signed( + 0, + vec![0u8; 0], + DUMMY_ACCOUNTID, + TestSig( + DUMMY_ACCOUNTID, + (DUMMY_ACCOUNTID, vec![0u8; 257], Era::immortal(), 0u64).using_encoded(blake2_256) + [..] + .to_owned(), + ), + Era::immortal(), + ); + let encoded = ux.encode(); + assert_eq!(Ex::decode(&mut &encoded[..]), Some(ux)); + } + + #[test] + fn unsigned_check_should_work() { + let ux = Ex::new_unsigned(vec![0u8; 0]); + assert!(!ux.is_signed().unwrap_or(false)); + assert!(>::check(ux, &TestContext).is_ok()); + } + + #[test] + fn badly_signed_check_should_fail() { + let ux = Ex::new_signed( + 0, + vec![0u8; 0], + DUMMY_ACCOUNTID, + TestSig(DUMMY_ACCOUNTID, vec![0u8]), + Era::immortal(), + ); + assert!(ux.is_signed().unwrap_or(false)); + assert_eq!( + >::check(ux, &TestContext), + Err(crate::BAD_SIGNATURE) + ); + } + + #[test] + fn immortal_signed_check_should_work() { + let ux = Ex::new_signed( + 0, + vec![0u8; 0], + DUMMY_ACCOUNTID, + TestSig( + DUMMY_ACCOUNTID, + ( + Compact::from(DUMMY_ACCOUNTID), + vec![0u8; 0], + Era::immortal(), + 0u64, + ) + .encode(), + ), + Era::immortal(), + ); + assert!(ux.is_signed().unwrap_or(false)); + assert_eq!( + >::check(ux, &TestContext), + Ok(CEx { + signed: Some((DUMMY_ACCOUNTID, 0)), + function: vec![0u8; 0] + }) + ); + } + + #[test] + fn mortal_signed_check_should_work() { + let ux = Ex::new_signed( + 0, + vec![0u8; 0], + DUMMY_ACCOUNTID, + TestSig( + DUMMY_ACCOUNTID, + ( + Compact::from(DUMMY_ACCOUNTID), + vec![0u8; 0], + Era::mortal(32, 42), + 42u64, + ) + .encode(), + ), + Era::mortal(32, 42), + ); + assert!(ux.is_signed().unwrap_or(false)); + assert_eq!( + >::check(ux, &TestContext), + Ok(CEx { + signed: Some((DUMMY_ACCOUNTID, 0)), + function: vec![0u8; 0] + }) + ); + } + + #[test] + fn later_mortal_signed_check_should_work() { + let ux = Ex::new_signed( + 0, + vec![0u8; 0], + DUMMY_ACCOUNTID, + TestSig( + DUMMY_ACCOUNTID, + ( + Compact::from(DUMMY_ACCOUNTID), + vec![0u8; 0], + Era::mortal(32, 11), + 11u64, + ) + .encode(), + ), + Era::mortal(32, 11), + ); + assert!(ux.is_signed().unwrap_or(false)); + assert_eq!( + >::check(ux, &TestContext), + Ok(CEx { + signed: Some((DUMMY_ACCOUNTID, 0)), + function: vec![0u8; 0] + }) + ); + } + + #[test] + fn too_late_mortal_signed_check_should_fail() { + let ux = Ex::new_signed( + 0, + vec![0u8; 0], + DUMMY_ACCOUNTID, + TestSig( + DUMMY_ACCOUNTID, + (DUMMY_ACCOUNTID, vec![0u8; 0], Era::mortal(32, 10), 10u64).encode(), + ), + Era::mortal(32, 10), + ); + assert!(ux.is_signed().unwrap_or(false)); + assert_eq!( + >::check(ux, &TestContext), + Err(crate::BAD_SIGNATURE) + ); + } + + #[test] + fn too_early_mortal_signed_check_should_fail() { + let ux = Ex::new_signed( + 0, + vec![0u8; 0], + DUMMY_ACCOUNTID, + TestSig( + DUMMY_ACCOUNTID, + (DUMMY_ACCOUNTID, vec![0u8; 0], Era::mortal(32, 43), 43u64).encode(), + ), + Era::mortal(32, 43), + ); + assert!(ux.is_signed().unwrap_or(false)); + assert_eq!( + >::check(ux, &TestContext), + Err(crate::BAD_SIGNATURE) + ); + } + + #[test] + fn encoding_matches_vec() { + let ex = Ex::new_unsigned(vec![0u8; 0]); + let encoded = ex.encode(); + let decoded = Ex::decode(&mut encoded.as_slice()).unwrap(); + assert_eq!(decoded, ex); + let as_vec: Vec = Decode::decode(&mut encoded.as_slice()).unwrap(); + assert_eq!(as_vec.encode(), encoded); + } } diff --git a/core/sr-primitives/src/generic/unchecked_mortal_extrinsic.rs b/core/sr-primitives/src/generic/unchecked_mortal_extrinsic.rs index 93eeb55884..15899dcdcb 100644 --- a/core/sr-primitives/src/generic/unchecked_mortal_extrinsic.rs +++ b/core/sr-primitives/src/generic/unchecked_mortal_extrinsic.rs @@ -19,12 +19,14 @@ #[cfg(feature = "std")] use std::fmt; +use super::{CheckedExtrinsic, Era}; +use crate::codec::{Decode, Encode, Input}; +use crate::traits::{ + self, BlockNumberToHash, Checkable, CurrentHeight, Extrinsic, Lookup, MaybeDisplay, Member, + SimpleArithmetic, +}; use rstd::prelude::*; use runtime_io::blake2_256; -use crate::codec::{Decode, Encode, Input}; -use crate::traits::{self, Member, SimpleArithmetic, MaybeDisplay, CurrentHeight, BlockNumberToHash, Lookup, - Checkable, Extrinsic}; -use super::{CheckedExtrinsic, Era}; const TRANSACTION_VERSION: u8 = 1; @@ -32,273 +34,402 @@ const TRANSACTION_VERSION: u8 = 1; /// can contain a signature. #[derive(PartialEq, Eq, Clone)] pub struct UncheckedMortalExtrinsic { - /// The signature, address, number of extrinsics have come before from - /// the same signer and an era describing the longevity of this transaction, - /// if this is a signed extrinsic. - pub signature: Option<(Address, Signature, Index, Era)>, - /// The function that should be called. - pub function: Call, + /// The signature, address, number of extrinsics have come before from + /// the same signer and an era describing the longevity of this transaction, + /// if this is a signed extrinsic. + pub signature: Option<(Address, Signature, Index, Era)>, + /// The function that should be called. + pub function: Call, } impl UncheckedMortalExtrinsic { - /// New instance of a signed extrinsic aka "transaction". - pub fn new_signed(index: Index, function: Call, signed: Address, signature: Signature, era: Era) -> Self { - UncheckedMortalExtrinsic { - signature: Some((signed, signature, index, era)), - function, - } - } - - /// New instance of an unsigned extrinsic aka "inherent". - pub fn new_unsigned(function: Call) -> Self { - UncheckedMortalExtrinsic { - signature: None, - function, - } - } + /// New instance of a signed extrinsic aka "transaction". + pub fn new_signed( + index: Index, + function: Call, + signed: Address, + signature: Signature, + era: Era, + ) -> Self { + UncheckedMortalExtrinsic { + signature: Some((signed, signature, index, era)), + function, + } + } + + /// New instance of an unsigned extrinsic aka "inherent". + pub fn new_unsigned(function: Call) -> Self { + UncheckedMortalExtrinsic { + signature: None, + function, + } + } } -impl Extrinsic for UncheckedMortalExtrinsic { - fn is_signed(&self) -> Option { - Some(self.signature.is_some()) - } +impl Extrinsic + for UncheckedMortalExtrinsic +{ + fn is_signed(&self) -> Option { + Some(self.signature.is_some()) + } } impl Checkable - for UncheckedMortalExtrinsic + for UncheckedMortalExtrinsic where - Address: Member + MaybeDisplay, - Index: Encode + Member + MaybeDisplay + SimpleArithmetic, - Call: Encode + Member, - Signature: Member + traits::Verify, - AccountId: Member + MaybeDisplay, - BlockNumber: SimpleArithmetic, - Hash: Encode, - Context: Lookup - + CurrentHeight - + BlockNumberToHash, + Address: Member + MaybeDisplay, + Index: Encode + Member + MaybeDisplay + SimpleArithmetic, + Call: Encode + Member, + Signature: Member + traits::Verify, + AccountId: Member + MaybeDisplay, + BlockNumber: SimpleArithmetic, + Hash: Encode, + Context: Lookup + + CurrentHeight + + BlockNumberToHash, { - type Checked = CheckedExtrinsic; - - fn check(self, context: &Context) -> Result { - Ok(match self.signature { - Some((signed, signature, index, era)) => { - let h = context.block_number_to_hash(BlockNumber::sa(era.birth(context.current_height().as_()))) - .ok_or("transaction birth block ancient")?; - let signed = context.lookup(signed)?; - let raw_payload = (index, self.function, era, h); - - if !raw_payload.using_encoded(|payload| { - if payload.len() > 256 { - signature.verify(&blake2_256(payload)[..], &signed) - } else { - signature.verify(payload, &signed) - } - }) { - return Err(crate::BAD_SIGNATURE) - } - CheckedExtrinsic { - signed: Some((signed, raw_payload.0)), - function: raw_payload.1, - } - } - None => CheckedExtrinsic { - signed: None, - function: self.function, - }, - }) - } + type Checked = CheckedExtrinsic; + + fn check(self, context: &Context) -> Result { + Ok(match self.signature { + Some((signed, signature, index, era)) => { + let h = context + .block_number_to_hash(BlockNumber::sa( + era.birth(context.current_height().as_()), + )) + .ok_or("transaction birth block ancient")?; + let signed = context.lookup(signed)?; + let raw_payload = (index, self.function, era, h); + + if !raw_payload.using_encoded(|payload| { + if payload.len() > 256 { + signature.verify(&blake2_256(payload)[..], &signed) + } else { + signature.verify(payload, &signed) + } + }) { + return Err(crate::BAD_SIGNATURE); + } + CheckedExtrinsic { + signed: Some((signed, raw_payload.0)), + function: raw_payload.1, + } + } + None => CheckedExtrinsic { + signed: None, + function: self.function, + }, + }) + } } impl Decode - for UncheckedMortalExtrinsic + for UncheckedMortalExtrinsic where - Address: Decode, - Signature: Decode, - Index: Decode, - Call: Decode, + Address: Decode, + Signature: Decode, + Index: Decode, + Call: Decode, { - fn decode(input: &mut I) -> Option { - // This is a little more complicated than usual since the binary format must be compatible - // with substrate's generic `Vec` type. Basically this just means accepting that there - // will be a prefix of vector length (we don't need - // to use this). - let _length_do_not_remove_me_see_above: Vec<()> = Decode::decode(input)?; - - let version = input.read_byte()?; - - let is_signed = version & 0b1000_0000 != 0; - let version = version & 0b0111_1111; - if version != TRANSACTION_VERSION { - return None - } - - Some(UncheckedMortalExtrinsic { - signature: if is_signed { Some(Decode::decode(input)?) } else { None }, - function: Decode::decode(input)?, - }) - } + fn decode(input: &mut I) -> Option { + // This is a little more complicated than usual since the binary format must be compatible + // with substrate's generic `Vec` type. Basically this just means accepting that there + // will be a prefix of vector length (we don't need + // to use this). + let _length_do_not_remove_me_see_above: Vec<()> = Decode::decode(input)?; + + let version = input.read_byte()?; + + let is_signed = version & 0b1000_0000 != 0; + let version = version & 0b0111_1111; + if version != TRANSACTION_VERSION { + return None; + } + + Some(UncheckedMortalExtrinsic { + signature: if is_signed { + Some(Decode::decode(input)?) + } else { + None + }, + function: Decode::decode(input)?, + }) + } } impl Encode - for UncheckedMortalExtrinsic + for UncheckedMortalExtrinsic where - Address: Encode, - Signature: Encode, - Index: Encode, - Call: Encode, + Address: Encode, + Signature: Encode, + Index: Encode, + Call: Encode, { - fn encode(&self) -> Vec { - super::encode_with_vec_prefix::(|v| { - // 1 byte version id. - match self.signature.as_ref() { - Some(s) => { - v.push(TRANSACTION_VERSION | 0b1000_0000); - s.encode_to(v); - } - None => { - v.push(TRANSACTION_VERSION & 0b0111_1111); - } - } - self.function.encode_to(v); - }) - } + fn encode(&self) -> Vec { + super::encode_with_vec_prefix::(|v| { + // 1 byte version id. + match self.signature.as_ref() { + Some(s) => { + v.push(TRANSACTION_VERSION | 0b1000_0000); + s.encode_to(v); + } + None => { + v.push(TRANSACTION_VERSION & 0b0111_1111); + } + } + self.function.encode_to(v); + }) + } } #[cfg(feature = "std")] impl serde::Serialize - for UncheckedMortalExtrinsic + for UncheckedMortalExtrinsic { - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { - self.using_encoded(|bytes| seq.serialize_bytes(bytes)) - } + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { + self.using_encoded(|bytes| seq.serialize_bytes(bytes)) + } } #[cfg(feature = "std")] -impl fmt::Debug for UncheckedMortalExtrinsic where - Address: fmt::Debug, - Index: fmt::Debug, - Call: fmt::Debug, +impl fmt::Debug + for UncheckedMortalExtrinsic +where + Address: fmt::Debug, + Index: fmt::Debug, + Call: fmt::Debug, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "UncheckedMortalExtrinsic({:?}, {:?})", self.signature.as_ref().map(|x| (&x.0, &x.2)), self.function) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "UncheckedMortalExtrinsic({:?}, {:?})", + self.signature.as_ref().map(|x| (&x.0, &x.2)), + self.function + ) + } } #[cfg(test)] mod tests { - use super::*; - use runtime_io::blake2_256; - use crate::codec::{Encode, Decode}; - use serde_derive::{Serialize, Deserialize}; - - struct TestContext; - impl Lookup for TestContext { - type Source = u64; - type Target = u64; - fn lookup(&self, s: u64) -> Result { Ok(s) } - } - impl CurrentHeight for TestContext { - type BlockNumber = u64; - fn current_height(&self) -> u64 { 42 } - } - impl BlockNumberToHash for TestContext { - type BlockNumber = u64; - type Hash = u64; - fn block_number_to_hash(&self, n: u64) -> Option { Some(n) } - } - - #[derive(Eq, PartialEq, Clone, Debug, Serialize, Deserialize, Encode, Decode)] - struct TestSig(u64, Vec); - impl traits::Verify for TestSig { - type Signer = u64; - fn verify>(&self, mut msg: L, signer: &Self::Signer) -> bool { - *signer == self.0 && msg.get() == &self.1[..] - } - } - - const DUMMY_ACCOUNTID: u64 = 0; - - type Ex = UncheckedMortalExtrinsic, TestSig>; - type CEx = CheckedExtrinsic>; - - #[test] - fn unsigned_codec_should_work() { - let ux = Ex::new_unsigned(vec![0u8;0]); - let encoded = ux.encode(); - assert_eq!(Ex::decode(&mut &encoded[..]), Some(ux)); - } - - #[test] - fn signed_codec_should_work() { - let ux = Ex::new_signed(0, vec![0u8;0], DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, vec![0u8;0], Era::immortal(), 0u64).encode()), Era::immortal()); - let encoded = ux.encode(); - assert_eq!(Ex::decode(&mut &encoded[..]), Some(ux)); - } - - #[test] - fn large_signed_codec_should_work() { - let ux = Ex::new_signed(0, vec![0u8;0], DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, vec![0u8; 257], Era::immortal(), 0u64).using_encoded(blake2_256)[..].to_owned()), Era::immortal()); - let encoded = ux.encode(); - assert_eq!(Ex::decode(&mut &encoded[..]), Some(ux)); - } - - #[test] - fn unsigned_check_should_work() { - let ux = Ex::new_unsigned(vec![0u8;0]); - assert!(!ux.is_signed().unwrap_or(false)); - assert!(>::check(ux, &TestContext).is_ok()); - } - - #[test] - fn badly_signed_check_should_fail() { - let ux = Ex::new_signed(0, vec![0u8;0], DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, vec![0u8]), Era::immortal()); - assert!(ux.is_signed().unwrap_or(false)); - assert_eq!(>::check(ux, &TestContext), Err(crate::BAD_SIGNATURE)); - } - - #[test] - fn immortal_signed_check_should_work() { - let ux = Ex::new_signed(0, vec![0u8;0], DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, vec![0u8;0], Era::immortal(), 0u64).encode()), Era::immortal()); - assert!(ux.is_signed().unwrap_or(false)); - assert_eq!(>::check(ux, &TestContext), Ok(CEx { signed: Some((DUMMY_ACCOUNTID, 0)), function: vec![0u8;0] })); - } - - #[test] - fn mortal_signed_check_should_work() { - let ux = Ex::new_signed(0, vec![0u8;0], DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, vec![0u8;0], Era::mortal(32, 42), 42u64).encode()), Era::mortal(32, 42)); - assert!(ux.is_signed().unwrap_or(false)); - assert_eq!(>::check(ux, &TestContext), Ok(CEx { signed: Some((DUMMY_ACCOUNTID, 0)), function: vec![0u8;0] })); - } - - #[test] - fn later_mortal_signed_check_should_work() { - let ux = Ex::new_signed(0, vec![0u8;0], DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, vec![0u8;0], Era::mortal(32, 11), 11u64).encode()), Era::mortal(32, 11)); - assert!(ux.is_signed().unwrap_or(false)); - assert_eq!(>::check(ux, &TestContext), Ok(CEx { signed: Some((DUMMY_ACCOUNTID, 0)), function: vec![0u8;0] })); - } - - #[test] - fn too_late_mortal_signed_check_should_fail() { - let ux = Ex::new_signed(0, vec![0u8;0], DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, vec![0u8;0], Era::mortal(32, 10), 10u64).encode()), Era::mortal(32, 10)); - assert!(ux.is_signed().unwrap_or(false)); - assert_eq!(>::check(ux, &TestContext), Err(crate::BAD_SIGNATURE)); - } - - #[test] - fn too_early_mortal_signed_check_should_fail() { - let ux = Ex::new_signed(0, vec![0u8;0], DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, vec![0u8;0], Era::mortal(32, 43), 43u64).encode()), Era::mortal(32, 43)); - assert!(ux.is_signed().unwrap_or(false)); - assert_eq!(>::check(ux, &TestContext), Err(crate::BAD_SIGNATURE)); - } - - #[test] - fn encoding_matches_vec() { - let ex = Ex::new_unsigned(vec![0u8;0]); - let encoded = ex.encode(); - let decoded = Ex::decode(&mut encoded.as_slice()).unwrap(); - assert_eq!(decoded, ex); - let as_vec: Vec = Decode::decode(&mut encoded.as_slice()).unwrap(); - assert_eq!(as_vec.encode(), encoded); - } + use super::*; + use crate::codec::{Decode, Encode}; + use runtime_io::blake2_256; + use serde_derive::{Deserialize, Serialize}; + + struct TestContext; + impl Lookup for TestContext { + type Source = u64; + type Target = u64; + fn lookup(&self, s: u64) -> Result { + Ok(s) + } + } + impl CurrentHeight for TestContext { + type BlockNumber = u64; + fn current_height(&self) -> u64 { + 42 + } + } + impl BlockNumberToHash for TestContext { + type BlockNumber = u64; + type Hash = u64; + fn block_number_to_hash(&self, n: u64) -> Option { + Some(n) + } + } + + #[derive(Eq, PartialEq, Clone, Debug, Serialize, Deserialize, Encode, Decode)] + struct TestSig(u64, Vec); + impl traits::Verify for TestSig { + type Signer = u64; + fn verify>(&self, mut msg: L, signer: &Self::Signer) -> bool { + *signer == self.0 && msg.get() == &self.1[..] + } + } + + const DUMMY_ACCOUNTID: u64 = 0; + + type Ex = UncheckedMortalExtrinsic, TestSig>; + type CEx = CheckedExtrinsic>; + + #[test] + fn unsigned_codec_should_work() { + let ux = Ex::new_unsigned(vec![0u8; 0]); + let encoded = ux.encode(); + assert_eq!(Ex::decode(&mut &encoded[..]), Some(ux)); + } + + #[test] + fn signed_codec_should_work() { + let ux = Ex::new_signed( + 0, + vec![0u8; 0], + DUMMY_ACCOUNTID, + TestSig( + DUMMY_ACCOUNTID, + (DUMMY_ACCOUNTID, vec![0u8; 0], Era::immortal(), 0u64).encode(), + ), + Era::immortal(), + ); + let encoded = ux.encode(); + assert_eq!(Ex::decode(&mut &encoded[..]), Some(ux)); + } + + #[test] + fn large_signed_codec_should_work() { + let ux = Ex::new_signed( + 0, + vec![0u8; 0], + DUMMY_ACCOUNTID, + TestSig( + DUMMY_ACCOUNTID, + (DUMMY_ACCOUNTID, vec![0u8; 257], Era::immortal(), 0u64).using_encoded(blake2_256) + [..] + .to_owned(), + ), + Era::immortal(), + ); + let encoded = ux.encode(); + assert_eq!(Ex::decode(&mut &encoded[..]), Some(ux)); + } + + #[test] + fn unsigned_check_should_work() { + let ux = Ex::new_unsigned(vec![0u8; 0]); + assert!(!ux.is_signed().unwrap_or(false)); + assert!(>::check(ux, &TestContext).is_ok()); + } + + #[test] + fn badly_signed_check_should_fail() { + let ux = Ex::new_signed( + 0, + vec![0u8; 0], + DUMMY_ACCOUNTID, + TestSig(DUMMY_ACCOUNTID, vec![0u8]), + Era::immortal(), + ); + assert!(ux.is_signed().unwrap_or(false)); + assert_eq!( + >::check(ux, &TestContext), + Err(crate::BAD_SIGNATURE) + ); + } + + #[test] + fn immortal_signed_check_should_work() { + let ux = Ex::new_signed( + 0, + vec![0u8; 0], + DUMMY_ACCOUNTID, + TestSig( + DUMMY_ACCOUNTID, + (DUMMY_ACCOUNTID, vec![0u8; 0], Era::immortal(), 0u64).encode(), + ), + Era::immortal(), + ); + assert!(ux.is_signed().unwrap_or(false)); + assert_eq!( + >::check(ux, &TestContext), + Ok(CEx { + signed: Some((DUMMY_ACCOUNTID, 0)), + function: vec![0u8; 0] + }) + ); + } + + #[test] + fn mortal_signed_check_should_work() { + let ux = Ex::new_signed( + 0, + vec![0u8; 0], + DUMMY_ACCOUNTID, + TestSig( + DUMMY_ACCOUNTID, + (DUMMY_ACCOUNTID, vec![0u8; 0], Era::mortal(32, 42), 42u64).encode(), + ), + Era::mortal(32, 42), + ); + assert!(ux.is_signed().unwrap_or(false)); + assert_eq!( + >::check(ux, &TestContext), + Ok(CEx { + signed: Some((DUMMY_ACCOUNTID, 0)), + function: vec![0u8; 0] + }) + ); + } + + #[test] + fn later_mortal_signed_check_should_work() { + let ux = Ex::new_signed( + 0, + vec![0u8; 0], + DUMMY_ACCOUNTID, + TestSig( + DUMMY_ACCOUNTID, + (DUMMY_ACCOUNTID, vec![0u8; 0], Era::mortal(32, 11), 11u64).encode(), + ), + Era::mortal(32, 11), + ); + assert!(ux.is_signed().unwrap_or(false)); + assert_eq!( + >::check(ux, &TestContext), + Ok(CEx { + signed: Some((DUMMY_ACCOUNTID, 0)), + function: vec![0u8; 0] + }) + ); + } + + #[test] + fn too_late_mortal_signed_check_should_fail() { + let ux = Ex::new_signed( + 0, + vec![0u8; 0], + DUMMY_ACCOUNTID, + TestSig( + DUMMY_ACCOUNTID, + (DUMMY_ACCOUNTID, vec![0u8; 0], Era::mortal(32, 10), 10u64).encode(), + ), + Era::mortal(32, 10), + ); + assert!(ux.is_signed().unwrap_or(false)); + assert_eq!( + >::check(ux, &TestContext), + Err(crate::BAD_SIGNATURE) + ); + } + + #[test] + fn too_early_mortal_signed_check_should_fail() { + let ux = Ex::new_signed( + 0, + vec![0u8; 0], + DUMMY_ACCOUNTID, + TestSig( + DUMMY_ACCOUNTID, + (DUMMY_ACCOUNTID, vec![0u8; 0], Era::mortal(32, 43), 43u64).encode(), + ), + Era::mortal(32, 43), + ); + assert!(ux.is_signed().unwrap_or(false)); + assert_eq!( + >::check(ux, &TestContext), + Err(crate::BAD_SIGNATURE) + ); + } + + #[test] + fn encoding_matches_vec() { + let ex = Ex::new_unsigned(vec![0u8; 0]); + let encoded = ex.encode(); + let decoded = Ex::decode(&mut encoded.as_slice()).unwrap(); + assert_eq!(decoded, ex); + let as_vec: Vec = Decode::decode(&mut encoded.as_slice()).unwrap(); + assert_eq!(as_vec.encode(), encoded); + } } diff --git a/core/sr-primitives/src/lib.rs b/core/sr-primitives/src/lib.rs index 5167e57072..72d72609ce 100644 --- a/core/sr-primitives/src/lib.rs +++ b/core/sr-primitives/src/lib.rs @@ -17,7 +17,6 @@ //! Runtime Modules shared primitive types. #![warn(missing_docs)] - #![cfg_attr(not(feature = "std"), no_std)] #[doc(hidden)] @@ -27,11 +26,11 @@ pub use parity_codec as codec; pub use serde_derive; #[cfg(feature = "std")] -pub use runtime_io::{StorageOverlay, ChildrenStorageOverlay}; +pub use runtime_io::{ChildrenStorageOverlay, StorageOverlay}; +use codec::{Decode, Encode}; use rstd::prelude::*; -use substrate_primitives::{ed25519, sr25519, hash::H512}; -use codec::{Encode, Decode}; +use substrate_primitives::{ed25519, hash::H512, sr25519}; #[cfg(feature = "std")] use substrate_primitives::hexdisplay::ascii_format; @@ -39,8 +38,8 @@ use substrate_primitives::hexdisplay::ascii_format; #[cfg(feature = "std")] pub mod testing; -pub mod traits; pub mod generic; +pub mod traits; pub mod transaction_validity; /// A message indicating an invalid signature in extrinsic. @@ -59,7 +58,7 @@ pub const BLOCK_FULL: &str = "block size limit is reached"; /// Justification type. pub type Justification = Vec; -use traits::{Verify, Lazy}; +use traits::{Lazy, Verify}; /// A String that is a `&'static str` on `no_std` and a `Cow<'static, str>` on `std`. #[cfg(feature = "std")] @@ -72,52 +71,64 @@ pub type RuntimeString = &'static str; #[cfg(feature = "std")] #[macro_export] macro_rules! create_runtime_str { - ( $y:expr ) => {{ ::std::borrow::Cow::Borrowed($y) }} + ( $y:expr ) => {{ + ::std::borrow::Cow::Borrowed($y) + }}; } /// Create a const [RuntimeString]. #[cfg(not(feature = "std"))] #[macro_export] macro_rules! create_runtime_str { - ( $y:expr ) => {{ $y }} + ( $y:expr ) => {{ + $y + }}; } #[cfg(feature = "std")] -pub use serde::{Serialize, de::DeserializeOwned}; +pub use serde::{de::DeserializeOwned, Serialize}; #[cfg(feature = "std")] -pub use serde_derive::{Serialize, Deserialize}; +pub use serde_derive::{Deserialize, Serialize}; /// Complex storage builder stuff. #[cfg(feature = "std")] pub trait BuildStorage: Sized { - /// Hash given slice. - /// - /// Default to xx128 hashing. - fn hash(data: &[u8]) -> [u8; 16] { - let r = runtime_io::twox_128(data); - log::trace!(target: "build_storage", "{} <= {}", substrate_primitives::hexdisplay::HexDisplay::from(&r), ascii_format(data)); - r - } - /// Build the storage out of this builder. - fn build_storage(self) -> Result<(StorageOverlay, ChildrenStorageOverlay), String> { - let mut storage = Default::default(); - let mut child_storage = Default::default(); - self.assimilate_storage(&mut storage, &mut child_storage)?; - Ok((storage, child_storage)) - } - /// Assimilate the storage for this module into pre-existing overlays. - fn assimilate_storage(self, storage: &mut StorageOverlay, child_storage: &mut ChildrenStorageOverlay) -> Result<(), String>; + /// Hash given slice. + /// + /// Default to xx128 hashing. + fn hash(data: &[u8]) -> [u8; 16] { + let r = runtime_io::twox_128(data); + log::trace!(target: "build_storage", "{} <= {}", substrate_primitives::hexdisplay::HexDisplay::from(&r), ascii_format(data)); + r + } + /// Build the storage out of this builder. + fn build_storage(self) -> Result<(StorageOverlay, ChildrenStorageOverlay), String> { + let mut storage = Default::default(); + let mut child_storage = Default::default(); + self.assimilate_storage(&mut storage, &mut child_storage)?; + Ok((storage, child_storage)) + } + /// Assimilate the storage for this module into pre-existing overlays. + fn assimilate_storage( + self, + storage: &mut StorageOverlay, + child_storage: &mut ChildrenStorageOverlay, + ) -> Result<(), String>; } #[cfg(feature = "std")] impl BuildStorage for StorageOverlay { - fn build_storage(self) -> Result<(StorageOverlay, ChildrenStorageOverlay), String> { - Ok((self, Default::default())) - } - fn assimilate_storage(self, storage: &mut StorageOverlay, _child_storage: &mut ChildrenStorageOverlay) -> Result<(), String> { - storage.extend(self); - Ok(()) - } + fn build_storage(self) -> Result<(StorageOverlay, ChildrenStorageOverlay), String> { + Ok((self, Default::default())) + } + fn assimilate_storage( + self, + storage: &mut StorageOverlay, + _child_storage: &mut ChildrenStorageOverlay, + ) -> Result<(), String> { + storage.extend(self); + Ok(()) + } } /// Consensus engine unique ID. @@ -129,55 +140,61 @@ pub type ConsensusEngineId = [u8; 4]; pub struct Permill(u32); impl Permill { - /// Wraps the argument into `Permill` type. - pub fn from_millionths(x: u32) -> Permill { Permill(x) } + /// Wraps the argument into `Permill` type. + pub fn from_millionths(x: u32) -> Permill { + Permill(x) + } - /// Converts percents into `Permill`. - pub fn from_percent(x: u32) -> Permill { Permill(x * 10_000) } + /// Converts percents into `Permill`. + pub fn from_percent(x: u32) -> Permill { + Permill(x * 10_000) + } - /// Converts a fraction into `Permill`. - #[cfg(feature = "std")] - pub fn from_fraction(x: f64) -> Permill { Permill((x * 1_000_000.0) as u32) } + /// Converts a fraction into `Permill`. + #[cfg(feature = "std")] + pub fn from_fraction(x: f64) -> Permill { + Permill((x * 1_000_000.0) as u32) + } } impl ::rstd::ops::Mul for Permill where - N: traits::As + N: traits::As, { - type Output = N; - fn mul(self, b: N) -> Self::Output { - >::sa(b.as_().saturating_mul(self.0 as u64) / 1_000_000) - } + type Output = N; + fn mul(self, b: N) -> Self::Output { + >::sa(b.as_().saturating_mul(self.0 as u64) / 1_000_000) + } } #[cfg(feature = "std")] impl From for Permill { - fn from(x: f64) -> Permill { - Permill::from_fraction(x) - } + fn from(x: f64) -> Permill { + Permill::from_fraction(x) + } } #[cfg(feature = "std")] impl From for Permill { - fn from(x: f32) -> Permill { - Permill::from_fraction(x as f64) - } + fn from(x: f32) -> Permill { + Permill::from_fraction(x as f64) + } } impl codec::CompactAs for Permill { - type As = u32; - fn encode_as(&self) -> &u32 { - &self.0 - } - fn decode_from(x: u32) -> Permill { - Permill(x) - } + type As = u32; + fn encode_as(&self) -> &u32 { + &self.0 + } + fn decode_from(x: u32) -> Permill { + Permill(x) + } } impl From> for Permill { - fn from(x: codec::Compact) -> Permill { - x.0 - } + fn from(x: codec::Compact) -> Permill { + x.0 + } } /// Perbill is parts-per-billion. It stores a value between 0 and 1 in fixed point and @@ -187,71 +204,87 @@ impl From> for Permill { pub struct Perbill(u32); impl Perbill { - /// Nothing. - pub fn zero() -> Perbill { Perbill(0) } + /// Nothing. + pub fn zero() -> Perbill { + Perbill(0) + } - /// `true` if this is nothing. - pub fn is_zero(&self) -> bool { self.0 == 0 } + /// `true` if this is nothing. + pub fn is_zero(&self) -> bool { + self.0 == 0 + } - /// Everything. - pub fn one() -> Perbill { Perbill(1_000_000_000) } + /// Everything. + pub fn one() -> Perbill { + Perbill(1_000_000_000) + } - /// Construct new instance where `x` is in billionths. Value equivalent to `x / 1,000,000,000`. - pub fn from_billionths(x: u32) -> Perbill { Perbill(x.min(1_000_000_000)) } + /// Construct new instance where `x` is in billionths. Value equivalent to `x / 1,000,000,000`. + pub fn from_billionths(x: u32) -> Perbill { + Perbill(x.min(1_000_000_000)) + } - /// Construct new instance where `x` is in millionths. Value equivalent to `x / 1,000,000`. - pub fn from_millionths(x: u32) -> Perbill { Perbill(x.min(1_000_000) * 1000) } + /// Construct new instance where `x` is in millionths. Value equivalent to `x / 1,000,000`. + pub fn from_millionths(x: u32) -> Perbill { + Perbill(x.min(1_000_000) * 1000) + } - /// Construct new instance where `x` is a percent. Value equivalent to `x%`. - pub fn from_percent(x: u32) -> Perbill { Perbill(x.min(100) * 10_000_000) } + /// Construct new instance where `x` is a percent. Value equivalent to `x%`. + pub fn from_percent(x: u32) -> Perbill { + Perbill(x.min(100) * 10_000_000) + } - #[cfg(feature = "std")] - /// Construct new instance whose value is equal to `x` (between 0 and 1). - pub fn from_fraction(x: f64) -> Perbill { Perbill((x.max(0.0).min(1.0) * 1_000_000_000.0) as u32) } + #[cfg(feature = "std")] + /// Construct new instance whose value is equal to `x` (between 0 and 1). + pub fn from_fraction(x: f64) -> Perbill { + Perbill((x.max(0.0).min(1.0) * 1_000_000_000.0) as u32) + } - #[cfg(feature = "std")] - /// Construct new instance whose value is equal to `n / d` (between 0 and 1). - pub fn from_rational(n: f64, d: f64) -> Perbill { Perbill(((n / d).max(0.0).min(1.0) * 1_000_000_000.0) as u32) } + #[cfg(feature = "std")] + /// Construct new instance whose value is equal to `n / d` (between 0 and 1). + pub fn from_rational(n: f64, d: f64) -> Perbill { + Perbill(((n / d).max(0.0).min(1.0) * 1_000_000_000.0) as u32) + } } impl ::rstd::ops::Mul for Perbill where - N: traits::As + N: traits::As, { - type Output = N; - fn mul(self, b: N) -> Self::Output { - >::sa(b.as_().saturating_mul(self.0 as u64) / 1_000_000_000) - } + type Output = N; + fn mul(self, b: N) -> Self::Output { + >::sa(b.as_().saturating_mul(self.0 as u64) / 1_000_000_000) + } } #[cfg(feature = "std")] impl From for Perbill { - fn from(x: f64) -> Perbill { - Perbill::from_fraction(x) - } + fn from(x: f64) -> Perbill { + Perbill::from_fraction(x) + } } #[cfg(feature = "std")] impl From for Perbill { - fn from(x: f32) -> Perbill { - Perbill::from_fraction(x as f64) - } + fn from(x: f32) -> Perbill { + Perbill::from_fraction(x as f64) + } } impl codec::CompactAs for Perbill { - type As = u32; - fn encode_as(&self) -> &u32 { - &self.0 - } - fn decode_from(x: u32) -> Perbill { - Perbill(x) - } + type As = u32; + fn encode_as(&self) -> &u32 { + &self.0 + } + fn decode_from(x: u32) -> Perbill { + Perbill(x) + } } impl From> for Perbill { - fn from(x: codec::Compact) -> Perbill { - x.0 - } + fn from(x: codec::Compact) -> Perbill { + x.0 + } } /// PerU128 is parts-per-u128-max-value. It stores a value between 0 and 1 in fixed point and @@ -263,84 +296,96 @@ pub struct PerU128(u128); const U128: u128 = u128::max_value(); impl PerU128 { - /// Nothing. - pub fn zero() -> Self { Self(0) } + /// Nothing. + pub fn zero() -> Self { + Self(0) + } - /// Everything. - pub fn one() -> Self { Self(U128) } + /// Everything. + pub fn one() -> Self { + Self(U128) + } - /// Construct new instance where `x` is parts in u128::max_value. Equal to x/U128::max_value. - pub fn from_max_value(x: u128) -> Self { Self(x) } + /// Construct new instance where `x` is parts in u128::max_value. Equal to x/U128::max_value. + pub fn from_max_value(x: u128) -> Self { + Self(x) + } - /// Construct new instance where `x` is denominator and the nominator is 1. - pub fn from_xth(x: u128) -> Self { Self(U128/x.max(1)) } + /// Construct new instance where `x` is denominator and the nominator is 1. + pub fn from_xth(x: u128) -> Self { + Self(U128 / x.max(1)) + } } impl ::rstd::ops::Deref for PerU128 { - type Target = u128; + type Target = u128; - fn deref(&self) -> &u128 { + fn deref(&self) -> &u128 { &self.0 } } impl codec::CompactAs for PerU128 { - type As = u128; - fn encode_as(&self) -> &u128 { - &self.0 - } - fn decode_from(x: u128) -> PerU128 { - Self(x) - } + type As = u128; + fn encode_as(&self) -> &u128 { + &self.0 + } + fn decode_from(x: u128) -> PerU128 { + Self(x) + } } impl From> for PerU128 { - fn from(x: codec::Compact) -> PerU128 { - x.0 - } + fn from(x: codec::Compact) -> PerU128 { + x.0 + } } /// Signature verify that can work with any known signature types.. #[derive(Eq, PartialEq, Clone, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug))] pub enum MultiSignature { - /// An Ed25519 signature. - Ed25519(ed25519::Signature), - /// An Sr25519 signature. - Sr25519(sr25519::Signature), + /// An Ed25519 signature. + Ed25519(ed25519::Signature), + /// An Sr25519 signature. + Sr25519(sr25519::Signature), } impl Default for MultiSignature { - fn default() -> Self { - MultiSignature::Ed25519(Default::default()) - } + fn default() -> Self { + MultiSignature::Ed25519(Default::default()) + } } /// Public key for any known crypto algorithm. #[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] pub enum MultiSigner { - /// An Ed25519 identity. - Ed25519(ed25519::Public), - /// An Sr25519 identity. - Sr25519(sr25519::Public), + /// An Ed25519 identity. + Ed25519(ed25519::Public), + /// An Sr25519 identity. + Sr25519(sr25519::Public), } impl Default for MultiSigner { - fn default() -> Self { - MultiSigner::Ed25519(Default::default()) - } + fn default() -> Self { + MultiSigner::Ed25519(Default::default()) + } } impl Verify for MultiSignature { - type Signer = MultiSigner; - fn verify>(&self, msg: L, signer: &Self::Signer) -> bool { - match (self, signer) { - (MultiSignature::Ed25519(ref sig), &MultiSigner::Ed25519(ref who)) => sig.verify(msg, who), - (MultiSignature::Sr25519(ref sig), &MultiSigner::Sr25519(ref who)) => sig.verify(msg, who), - _ => false, - } - } + type Signer = MultiSigner; + fn verify>(&self, msg: L, signer: &Self::Signer) -> bool { + match (self, signer) { + (MultiSignature::Ed25519(ref sig), &MultiSigner::Ed25519(ref who)) => { + sig.verify(msg, who) + } + (MultiSignature::Sr25519(ref sig), &MultiSigner::Sr25519(ref who)) => { + sig.verify(msg, who) + } + _ => false, + } + } } /// Signature verify that can work with any known signature types.. @@ -349,17 +394,17 @@ impl Verify for MultiSignature { pub struct AnySignature(H512); impl Verify for AnySignature { - type Signer = sr25519::Public; - fn verify>(&self, mut msg: L, signer: &sr25519::Public) -> bool { - runtime_io::sr25519_verify(self.0.as_fixed_bytes(), msg.get(), &signer.0) || - runtime_io::ed25519_verify(self.0.as_fixed_bytes(), msg.get(), &signer.0) - } + type Signer = sr25519::Public; + fn verify>(&self, mut msg: L, signer: &sr25519::Public) -> bool { + runtime_io::sr25519_verify(self.0.as_fixed_bytes(), msg.get(), &signer.0) + || runtime_io::ed25519_verify(self.0.as_fixed_bytes(), msg.get(), &signer.0) + } } impl From for AnySignature { - fn from(s: sr25519::Signature) -> AnySignature { - AnySignature(s.0.into()) - } + fn from(s: sr25519::Signature) -> AnySignature { + AnySignature(s.0.into()) + } } #[derive(Eq, PartialEq, Clone, Copy, Decode)] @@ -367,16 +412,16 @@ impl From for AnySignature { #[repr(u8)] /// Outcome of a valid extrinsic application. Capable of being sliced. pub enum ApplyOutcome { - /// Successful application (extrinsic reported no issue). - Success = 0, - /// Failed application (extrinsic was probably a no-op other than fees). - Fail = 1, + /// Successful application (extrinsic reported no issue). + Success = 0, + /// Failed application (extrinsic was probably a no-op other than fees). + Fail = 1, } impl codec::Encode for ApplyOutcome { - fn using_encoded R>(&self, f: F) -> R { - f(&[*self as u8]) - } + fn using_encoded R>(&self, f: F) -> R { + f(&[*self as u8]) + } } #[derive(Eq, PartialEq, Clone, Copy, Decode)] @@ -384,22 +429,22 @@ impl codec::Encode for ApplyOutcome { #[repr(u8)] /// Reason why an extrinsic couldn't be applied (i.e. invalid extrinsic). pub enum ApplyError { - /// Bad signature. - BadSignature = 0, - /// Nonce too low. - Stale = 1, - /// Nonce too high. - Future = 2, - /// Sending account had too low a balance. - CantPay = 3, - /// Block is full, no more extrinsics can be applied. - FullBlock = 255, + /// Bad signature. + BadSignature = 0, + /// Nonce too low. + Stale = 1, + /// Nonce too high. + Future = 2, + /// Sending account had too low a balance. + CantPay = 3, + /// Block is full, no more extrinsics can be applied. + FullBlock = 255, } impl codec::Encode for ApplyError { - fn using_encoded R>(&self, f: F) -> R { - f(&[*self as u8]) - } + fn using_encoded R>(&self, f: F) -> R { + f(&[*self as u8]) + } } /// Result from attempt to apply an extrinsic. @@ -407,26 +452,33 @@ pub type ApplyResult = Result; /// Verify a signature on an encoded value in a lazy manner. This can be /// an optimization if the signature scheme has an "unsigned" escape hash. -pub fn verify_encoded_lazy(sig: &V, item: &T, signer: &V::Signer) -> bool { - // The `Lazy` trait expresses something like `X: FnMut &'a T>`. - // unfortunately this is a lifetime relationship that can't - // be expressed without generic associated types, better unification of HRTBs in type position, - // and some kind of integration into the Fn* traits. - struct LazyEncode { - inner: F, - encoded: Option>, - } +pub fn verify_encoded_lazy( + sig: &V, + item: &T, + signer: &V::Signer, +) -> bool { + // The `Lazy` trait expresses something like `X: FnMut &'a T>`. + // unfortunately this is a lifetime relationship that can't + // be expressed without generic associated types, better unification of HRTBs in type position, + // and some kind of integration into the Fn* traits. + struct LazyEncode { + inner: F, + encoded: Option>, + } - impl Vec> traits::Lazy<[u8]> for LazyEncode { - fn get(&mut self) -> &[u8] { - self.encoded.get_or_insert_with(&self.inner).as_slice() - } - } + impl Vec> traits::Lazy<[u8]> for LazyEncode { + fn get(&mut self) -> &[u8] { + self.encoded.get_or_insert_with(&self.inner).as_slice() + } + } - sig.verify( - LazyEncode { inner: || item.encode(), encoded: None }, - signer, - ) + sig.verify( + LazyEncode { + inner: || item.encode(), + encoded: None, + }, + signer, + ) } /// Helper macro for `impl_outer_config` @@ -634,149 +686,199 @@ pub struct OpaqueExtrinsic(pub Vec); #[cfg(feature = "std")] impl std::fmt::Debug for OpaqueExtrinsic { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(fmt, "{}", substrate_primitives::hexdisplay::HexDisplay::from(&self.0)) - } + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + write!( + fmt, + "{}", + substrate_primitives::hexdisplay::HexDisplay::from(&self.0) + ) + } } #[cfg(feature = "std")] impl ::serde::Serialize for OpaqueExtrinsic { - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { - codec::Encode::using_encoded(&self.0, |bytes| ::substrate_primitives::bytes::serialize(bytes, seq)) - } + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { + codec::Encode::using_encoded(&self.0, |bytes| { + ::substrate_primitives::bytes::serialize(bytes, seq) + }) + } } impl traits::Extrinsic for OpaqueExtrinsic { - fn is_signed(&self) -> Option { - None - } + fn is_signed(&self) -> Option { + None + } } #[cfg(test)] mod tests { - use substrate_primitives::hash::{H256, H512}; - use crate::codec::{Encode, Decode}; - use crate::traits::DigestItem; + use crate::codec::{Decode, Encode}; + use crate::traits::DigestItem; + use substrate_primitives::hash::{H256, H512}; - pub trait RuntimeT { - type AuthorityId; - } - - pub struct Runtime; - - impl RuntimeT for Runtime { - type AuthorityId = u64; - } - - mod a { - use super::RuntimeT; - use crate::codec::{Encode, Decode}; - use serde_derive::Serialize; - pub type Log = RawLog<::AuthorityId>; - - #[derive(Serialize, Debug, Encode, Decode, PartialEq, Eq, Clone)] - pub enum RawLog { A1(AuthorityId), AuthoritiesChange(Vec), A3(AuthorityId) } - } - - mod b { - use super::RuntimeT; - use crate::codec::{Encode, Decode}; - use serde_derive::Serialize; - pub type Log = RawLog<::AuthorityId>; + pub trait RuntimeT { + type AuthorityId; + } - #[derive(Serialize, Debug, Encode, Decode, PartialEq, Eq, Clone)] - pub enum RawLog { B1(AuthorityId), B2(AuthorityId) } - } + pub struct Runtime; - impl_outer_log! { - pub enum Log(InternalLog: DigestItem) for Runtime { - a(AuthoritiesChange), b() - } - } + impl RuntimeT for Runtime { + type AuthorityId = u64; + } - #[test] - fn impl_outer_log_works() { - // encode/decode regular item - let b1: Log = b::RawLog::B1::(777).into(); - let encoded_b1 = b1.encode(); - let decoded_b1: Log = Decode::decode(&mut &encoded_b1[..]).unwrap(); - assert_eq!(b1, decoded_b1); - - // encode/decode system item - let auth_change: Log = a::RawLog::AuthoritiesChange::(vec![100, 200, 300]).into(); - let encoded_auth_change = auth_change.encode(); - let decoded_auth_change: Log = Decode::decode(&mut &encoded_auth_change[..]).unwrap(); - assert_eq!(auth_change, decoded_auth_change); - - // interpret regular item using `generic::DigestItem` - let generic_b1: super::generic::DigestItem = Decode::decode(&mut &encoded_b1[..]).unwrap(); - match generic_b1 { - super::generic::DigestItem::Other(_) => (), - _ => panic!("unexpected generic_b1: {:?}", generic_b1), - } + mod a { + use super::RuntimeT; + use crate::codec::{Decode, Encode}; + use serde_derive::Serialize; + pub type Log = RawLog<::AuthorityId>; + + #[derive(Serialize, Debug, Encode, Decode, PartialEq, Eq, Clone)] + pub enum RawLog { + A1(AuthorityId), + AuthoritiesChange(Vec), + A3(AuthorityId), + } + } - // interpret system item using `generic::DigestItem` - let generic_auth_change: super::generic::DigestItem = Decode::decode(&mut &encoded_auth_change[..]).unwrap(); - match generic_auth_change { - super::generic::DigestItem::AuthoritiesChange::(authorities) => assert_eq!(authorities, vec![100, 200, 300]), - _ => panic!("unexpected generic_auth_change: {:?}", generic_auth_change), - } + mod b { + use super::RuntimeT; + use crate::codec::{Decode, Encode}; + use serde_derive::Serialize; + pub type Log = RawLog<::AuthorityId>; + + #[derive(Serialize, Debug, Encode, Decode, PartialEq, Eq, Clone)] + pub enum RawLog { + B1(AuthorityId), + B2(AuthorityId), + } + } - // check that as-style methods are working with system items - assert!(auth_change.as_authorities_change().is_some()); + impl_outer_log! { + pub enum Log(InternalLog: DigestItem) for Runtime { + a(AuthoritiesChange), b() + } + } - // check that as-style methods are not working with regular items - assert!(b1.as_authorities_change().is_none()); - } + #[test] + fn impl_outer_log_works() { + // encode/decode regular item + let b1: Log = b::RawLog::B1::(777).into(); + let encoded_b1 = b1.encode(); + let decoded_b1: Log = Decode::decode(&mut &encoded_b1[..]).unwrap(); + assert_eq!(b1, decoded_b1); + + // encode/decode system item + let auth_change: Log = a::RawLog::AuthoritiesChange::(vec![100, 200, 300]).into(); + let encoded_auth_change = auth_change.encode(); + let decoded_auth_change: Log = Decode::decode(&mut &encoded_auth_change[..]).unwrap(); + assert_eq!(auth_change, decoded_auth_change); + + // interpret regular item using `generic::DigestItem` + let generic_b1: super::generic::DigestItem = + Decode::decode(&mut &encoded_b1[..]).unwrap(); + match generic_b1 { + super::generic::DigestItem::Other(_) => (), + _ => panic!("unexpected generic_b1: {:?}", generic_b1), + } + + // interpret system item using `generic::DigestItem` + let generic_auth_change: super::generic::DigestItem = + Decode::decode(&mut &encoded_auth_change[..]).unwrap(); + match generic_auth_change { + super::generic::DigestItem::AuthoritiesChange::(authorities) => { + assert_eq!(authorities, vec![100, 200, 300]) + } + _ => panic!("unexpected generic_auth_change: {:?}", generic_auth_change), + } + + // check that as-style methods are working with system items + assert!(auth_change.as_authorities_change().is_some()); + + // check that as-style methods are not working with regular items + assert!(b1.as_authorities_change().is_none()); + } - #[test] - fn opaque_extrinsic_serialization() { - let ex = super::OpaqueExtrinsic(vec![1, 2, 3, 4]); - assert_eq!(serde_json::to_string(&ex).unwrap(), "\"0x1001020304\"".to_owned()); - } + #[test] + fn opaque_extrinsic_serialization() { + let ex = super::OpaqueExtrinsic(vec![1, 2, 3, 4]); + assert_eq!( + serde_json::to_string(&ex).unwrap(), + "\"0x1001020304\"".to_owned() + ); + } - #[test] - fn compact_permill_perbill_encoding() { - let tests = [(0u32, 1usize), (63, 1), (64, 2), (16383, 2), (16384, 4), (1073741823, 4), (1073741824, 5), (u32::max_value(), 5)]; - for &(n, l) in &tests { - let compact: crate::codec::Compact = super::Permill(n).into(); - let encoded = compact.encode(); - assert_eq!(encoded.len(), l); - let decoded = >::decode(&mut & encoded[..]).unwrap(); - let permill: super::Permill = decoded.into(); - assert_eq!(permill, super::Permill(n)); - - let compact: crate::codec::Compact = super::Perbill(n).into(); - let encoded = compact.encode(); - assert_eq!(encoded.len(), l); - let decoded = >::decode(&mut & encoded[..]).unwrap(); - let perbill: super::Perbill = decoded.into(); - assert_eq!(perbill, super::Perbill(n)); - } - } + #[test] + fn compact_permill_perbill_encoding() { + let tests = [ + (0u32, 1usize), + (63, 1), + (64, 2), + (16383, 2), + (16384, 4), + (1073741823, 4), + (1073741824, 5), + (u32::max_value(), 5), + ]; + for &(n, l) in &tests { + let compact: crate::codec::Compact = super::Permill(n).into(); + let encoded = compact.encode(); + assert_eq!(encoded.len(), l); + let decoded = + >::decode(&mut &encoded[..]).unwrap(); + let permill: super::Permill = decoded.into(); + assert_eq!(permill, super::Permill(n)); + + let compact: crate::codec::Compact = super::Perbill(n).into(); + let encoded = compact.encode(); + assert_eq!(encoded.len(), l); + let decoded = + >::decode(&mut &encoded[..]).unwrap(); + let perbill: super::Perbill = decoded.into(); + assert_eq!(perbill, super::Perbill(n)); + } + } - #[derive(Encode, Decode, PartialEq, Eq, Debug)] - struct WithCompact { - data: T, - } + #[derive(Encode, Decode, PartialEq, Eq, Debug)] + struct WithCompact { + data: T, + } - #[test] - fn test_has_compact_permill() { - let data = WithCompact { data: super::Permill(1) }; - let encoded = data.encode(); - assert_eq!(data, WithCompact::::decode(&mut &encoded[..]).unwrap()); - } + #[test] + fn test_has_compact_permill() { + let data = WithCompact { + data: super::Permill(1), + }; + let encoded = data.encode(); + assert_eq!( + data, + WithCompact::::decode(&mut &encoded[..]).unwrap() + ); + } - #[test] - fn test_has_compact_perbill() { - let data = WithCompact { data: super::Perbill(1) }; - let encoded = data.encode(); - assert_eq!(data, WithCompact::::decode(&mut &encoded[..]).unwrap()); - } + #[test] + fn test_has_compact_perbill() { + let data = WithCompact { + data: super::Perbill(1), + }; + let encoded = data.encode(); + assert_eq!( + data, + WithCompact::::decode(&mut &encoded[..]).unwrap() + ); + } - #[test] - fn saturating_mul() { - assert_eq!(super::Perbill::one() * std::u64::MAX, std::u64::MAX/1_000_000_000); - assert_eq!(super::Permill::from_percent(100) * std::u64::MAX, std::u64::MAX/1_000_000); - } + #[test] + fn saturating_mul() { + assert_eq!( + super::Perbill::one() * std::u64::MAX, + std::u64::MAX / 1_000_000_000 + ); + assert_eq!( + super::Permill::from_percent(100) * std::u64::MAX, + std::u64::MAX / 1_000_000 + ); + } } diff --git a/core/sr-primitives/src/testing.rs b/core/sr-primitives/src/testing.rs index 2711c0e623..f5571c676a 100644 --- a/core/sr-primitives/src/testing.rs +++ b/core/sr-primitives/src/testing.rs @@ -16,35 +16,35 @@ //! Testing utilities. -use serde::{Serialize, Serializer, Deserialize, de::Error as DeError, Deserializer}; -use serde_derive::Serialize; +use crate::codec::{Codec, Decode, Encode}; +use crate::generic::DigestItem as GenDigestItem; +use crate::traits::{self, Applyable, BlakeTwo256, Checkable, Convert}; +use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "std")] use serde_derive::Deserialize; -use std::{fmt::Debug, ops::Deref, fmt}; -use crate::codec::{Codec, Encode, Decode}; -use crate::traits::{self, Checkable, Applyable, BlakeTwo256, Convert}; -use crate::generic::DigestItem as GenDigestItem; +use serde_derive::Serialize; +use std::{fmt, fmt::Debug, ops::Deref}; +use substrate_primitives::ed25519::{Public as AuthorityId, Signature as AuthoritySignature}; pub use substrate_primitives::H256; use substrate_primitives::U256; -use substrate_primitives::ed25519::{Public as AuthorityId, Signature as AuthoritySignature}; /// Authority Id #[derive(Default, PartialEq, Eq, Clone, Encode, Decode, Debug)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct UintAuthorityId(pub u64); impl Into for UintAuthorityId { - fn into(self) -> AuthorityId { - let bytes: [u8; 32] = U256::from(self.0).into(); - AuthorityId(bytes) - } + fn into(self) -> AuthorityId { + let bytes: [u8; 32] = U256::from(self.0).into(); + AuthorityId(bytes) + } } /// Converter between u64 and the AuthorityId wrapper type. pub struct ConvertUintAuthorityId; impl Convert> for ConvertUintAuthorityId { - fn convert(a: u64) -> Option { - Some(UintAuthorityId(a)) - } + fn convert(a: u64) -> Option { + Some(UintAuthorityId(a)) + } } /// Digest item pub type DigestItem = GenDigestItem; @@ -52,25 +52,25 @@ pub type DigestItem = GenDigestItem; /// Header Digest #[derive(Default, PartialEq, Eq, Clone, Serialize, Debug, Encode, Decode)] pub struct Digest { - /// Generated logs - pub logs: Vec, + /// Generated logs + pub logs: Vec, } impl traits::Digest for Digest { - type Hash = H256; - type Item = DigestItem; + type Hash = H256; + type Item = DigestItem; - fn logs(&self) -> &[Self::Item] { - &self.logs - } + fn logs(&self) -> &[Self::Item] { + &self.logs + } - fn push(&mut self, item: Self::Item) { - self.logs.push(item); - } + fn push(&mut self, item: Self::Item) { + self.logs.push(item); + } - fn pop(&mut self) -> Option { - self.logs.pop() - } + fn pop(&mut self) -> Option { + self.logs.pop() + } } /// Block Header @@ -78,62 +78,84 @@ impl traits::Digest for Digest { #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] pub struct Header { - /// Parent hash - pub parent_hash: H256, - /// Block Number - pub number: u64, - /// Post-execution state trie root - pub state_root: H256, - /// Merkle root of block's extrinsics - pub extrinsics_root: H256, - /// Digest items - pub digest: Digest, + /// Parent hash + pub parent_hash: H256, + /// Block Number + pub number: u64, + /// Post-execution state trie root + pub state_root: H256, + /// Merkle root of block's extrinsics + pub extrinsics_root: H256, + /// Digest items + pub digest: Digest, } impl traits::Header for Header { - type Number = u64; - type Hashing = BlakeTwo256; - type Hash = H256; - type Digest = Digest; - - fn number(&self) -> &Self::Number { &self.number } - fn set_number(&mut self, num: Self::Number) { self.number = num } - - fn extrinsics_root(&self) -> &Self::Hash { &self.extrinsics_root } - fn set_extrinsics_root(&mut self, root: Self::Hash) { self.extrinsics_root = root } - - fn state_root(&self) -> &Self::Hash { &self.state_root } - fn set_state_root(&mut self, root: Self::Hash) { self.state_root = root } - - fn parent_hash(&self) -> &Self::Hash { &self.parent_hash } - fn set_parent_hash(&mut self, hash: Self::Hash) { self.parent_hash = hash } - - fn digest(&self) -> &Self::Digest { &self.digest } - fn digest_mut(&mut self) -> &mut Self::Digest { &mut self.digest } - fn set_digest(&mut self, digest: Self::Digest) { self.digest = digest } - - fn new( - number: Self::Number, - extrinsics_root: Self::Hash, - state_root: Self::Hash, - parent_hash: Self::Hash, - digest: Self::Digest - ) -> Self { - Header { - number, - extrinsics_root: extrinsics_root, - state_root, - parent_hash, - digest - } - } + type Number = u64; + type Hashing = BlakeTwo256; + type Hash = H256; + type Digest = Digest; + + fn number(&self) -> &Self::Number { + &self.number + } + fn set_number(&mut self, num: Self::Number) { + self.number = num + } + + fn extrinsics_root(&self) -> &Self::Hash { + &self.extrinsics_root + } + fn set_extrinsics_root(&mut self, root: Self::Hash) { + self.extrinsics_root = root + } + + fn state_root(&self) -> &Self::Hash { + &self.state_root + } + fn set_state_root(&mut self, root: Self::Hash) { + self.state_root = root + } + + fn parent_hash(&self) -> &Self::Hash { + &self.parent_hash + } + fn set_parent_hash(&mut self, hash: Self::Hash) { + self.parent_hash = hash + } + + fn digest(&self) -> &Self::Digest { + &self.digest + } + fn digest_mut(&mut self) -> &mut Self::Digest { + &mut self.digest + } + fn set_digest(&mut self, digest: Self::Digest) { + self.digest = digest + } + + fn new( + number: Self::Number, + extrinsics_root: Self::Hash, + state_root: Self::Hash, + parent_hash: Self::Hash, + digest: Self::Digest, + ) -> Self { + Header { + number, + extrinsics_root: extrinsics_root, + state_root, + parent_hash, + digest, + } + } } impl<'a> Deserialize<'a> for Header { - fn deserialize>(de: D) -> Result { - let r = >::deserialize(de)?; - Decode::decode(&mut &r[..]).ok_or(DeError::custom("Invalid value passed into decode")) - } + fn deserialize>(de: D) -> Result { + let r = >::deserialize(de)?; + Decode::decode(&mut &r[..]).ok_or(DeError::custom("Invalid value passed into decode")) + } } /// An opaque extrinsic wrapper type. @@ -141,102 +163,122 @@ impl<'a> Deserialize<'a> for Header { pub struct ExtrinsicWrapper(Xt); impl traits::Extrinsic for ExtrinsicWrapper { - fn is_signed(&self) -> Option { - None - } + fn is_signed(&self) -> Option { + None + } } -impl serde::Serialize for ExtrinsicWrapper -{ - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { - self.using_encoded(|bytes| seq.serialize_bytes(bytes)) - } +impl serde::Serialize for ExtrinsicWrapper { + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { + self.using_encoded(|bytes| seq.serialize_bytes(bytes)) + } } impl From for ExtrinsicWrapper { - fn from(xt: Xt) -> Self { - ExtrinsicWrapper(xt) - } + fn from(xt: Xt) -> Self { + ExtrinsicWrapper(xt) + } } impl Deref for ExtrinsicWrapper { - type Target = Xt; + type Target = Xt; - fn deref(&self) -> &Self::Target { - &self.0 - } + fn deref(&self) -> &Self::Target { + &self.0 + } } /// Testing block #[derive(PartialEq, Eq, Clone, Serialize, Debug, Encode, Decode)] pub struct Block { - /// Block header - pub header: Header, - /// List of extrinsics - pub extrinsics: Vec, -} - -impl traits::Block for Block { - type Extrinsic = Xt; - type Header = Header; - type Hash =
::Hash; - - fn header(&self) -> &Self::Header { - &self.header - } - fn extrinsics(&self) -> &[Self::Extrinsic] { - &self.extrinsics[..] - } - fn deconstruct(self) -> (Self::Header, Vec) { - (self.header, self.extrinsics) - } - fn new(header: Self::Header, extrinsics: Vec) -> Self { - Block { header, extrinsics } - } -} - -impl<'a, Xt> Deserialize<'a> for Block where Block: Decode { - fn deserialize>(de: D) -> Result { - let r = >::deserialize(de)?; - Decode::decode(&mut &r[..]).ok_or(DeError::custom("Invalid value passed into decode")) - } + /// Block header + pub header: Header, + /// List of extrinsics + pub extrinsics: Vec, +} + +impl< + Xt: 'static + Codec + Sized + Send + Sync + Serialize + Clone + Eq + Debug + traits::Extrinsic, + > traits::Block for Block +{ + type Extrinsic = Xt; + type Header = Header; + type Hash =
::Hash; + + fn header(&self) -> &Self::Header { + &self.header + } + fn extrinsics(&self) -> &[Self::Extrinsic] { + &self.extrinsics[..] + } + fn deconstruct(self) -> (Self::Header, Vec) { + (self.header, self.extrinsics) + } + fn new(header: Self::Header, extrinsics: Vec) -> Self { + Block { header, extrinsics } + } +} + +impl<'a, Xt> Deserialize<'a> for Block +where + Block: Decode, +{ + fn deserialize>(de: D) -> Result { + let r = >::deserialize(de)?; + Decode::decode(&mut &r[..]).ok_or(DeError::custom("Invalid value passed into decode")) + } } /// Test transaction #[derive(PartialEq, Eq, Clone, Encode, Decode)] pub struct TestXt(pub Option, pub u64, pub Call); -impl Serialize for TestXt where TestXt: Encode +impl Serialize for TestXt +where + TestXt: Encode, { - fn serialize(&self, seq: S) -> Result where S: Serializer { - self.using_encoded(|bytes| seq.serialize_bytes(bytes)) - } + fn serialize(&self, seq: S) -> Result + where + S: Serializer, + { + self.using_encoded(|bytes| seq.serialize_bytes(bytes)) + } } impl Debug for TestXt { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "TestXt({:?}, {:?})", self.0, self.1) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "TestXt({:?}, {:?})", self.0, self.1) + } } impl Checkable for TestXt { - type Checked = Self; - fn check(self, _: &Context) -> Result { Ok(self) } + type Checked = Self; + fn check(self, _: &Context) -> Result { + Ok(self) + } } impl traits::Extrinsic for TestXt { - fn is_signed(&self) -> Option { - None - } + fn is_signed(&self) -> Option { + None + } } -impl Applyable for TestXt where - Call: 'static + Sized + Send + Sync + Clone + Eq + Codec + Debug, +impl Applyable for TestXt +where + Call: 'static + Sized + Send + Sync + Clone + Eq + Codec + Debug, { - type AccountId = u64; - type Index = u64; - type Call = Call; - fn sender(&self) -> Option<&u64> { self.0.as_ref() } - fn index(&self) -> Option<&u64> { self.0.as_ref().map(|_| &self.1) } - fn deconstruct(self) -> (Self::Call, Option) { - (self.2, self.0) - } + type AccountId = u64; + type Index = u64; + type Call = Call; + fn sender(&self) -> Option<&u64> { + self.0.as_ref() + } + fn index(&self) -> Option<&u64> { + self.0.as_ref().map(|_| &self.1) + } + fn deconstruct(self) -> (Self::Call, Option) { + (self.2, self.0) + } } diff --git a/core/sr-primitives/src/traits.rs b/core/sr-primitives/src/traits.rs index b62bc067b6..5bd060ab68 100644 --- a/core/sr-primitives/src/traits.rs +++ b/core/sr-primitives/src/traits.rs @@ -16,140 +16,152 @@ //! Primitives for the runtime modules. -use rstd::prelude::*; -use rstd::{self, result, marker::PhantomData}; -use runtime_io; -#[cfg(feature = "std")] use std::fmt::{Debug, Display}; -#[cfg(feature = "std")] use serde::{Serialize, de::DeserializeOwned}; -#[cfg(feature = "std")] -use serde_derive::{Serialize, Deserialize}; -use substrate_primitives::{self, Hasher, Blake2Hasher}; use crate::codec::{Codec, Encode, HasCompact}; pub use integer_sqrt::IntegerSquareRoot; pub use num_traits::{ - Zero, One, Bounded, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv, - CheckedShl, CheckedShr, Saturating + Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedShl, CheckedShr, CheckedSub, One, + Saturating, Zero, }; use rstd::ops::{ - Add, Sub, Mul, Div, Rem, AddAssign, SubAssign, MulAssign, DivAssign, - RemAssign, Shl, Shr + Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, RemAssign, Shl, Shr, Sub, SubAssign, }; +use rstd::prelude::*; +use rstd::{self, marker::PhantomData, result}; +use runtime_io; +#[cfg(feature = "std")] +use serde::{de::DeserializeOwned, Serialize}; +#[cfg(feature = "std")] +use serde_derive::{Deserialize, Serialize}; +#[cfg(feature = "std")] +use std::fmt::{Debug, Display}; +use substrate_primitives::{self, Blake2Hasher, Hasher}; /// A lazy value. pub trait Lazy { - /// Get a reference to the underlying value. - /// - /// This will compute the value if the function is invoked for the first time. - fn get(&mut self) -> &T; + /// Get a reference to the underlying value. + /// + /// This will compute the value if the function is invoked for the first time. + fn get(&mut self) -> &T; } impl<'a> Lazy<[u8]> for &'a [u8] { - fn get(&mut self) -> &[u8] { &**self } + fn get(&mut self) -> &[u8] { + &**self + } } /// Means of signature verification. pub trait Verify { - /// Type of the signer. - type Signer; - /// Verify a signature. Return `true` if signature is valid for the value. - fn verify>(&self, msg: L, signer: &Self::Signer) -> bool; + /// Type of the signer. + type Signer; + /// Verify a signature. Return `true` if signature is valid for the value. + fn verify>(&self, msg: L, signer: &Self::Signer) -> bool; } impl Verify for substrate_primitives::ed25519::Signature { - type Signer = substrate_primitives::ed25519::Public; - fn verify>(&self, mut msg: L, signer: &Self::Signer) -> bool { - runtime_io::ed25519_verify(self.as_ref(), msg.get(), signer) - } + type Signer = substrate_primitives::ed25519::Public; + fn verify>(&self, mut msg: L, signer: &Self::Signer) -> bool { + runtime_io::ed25519_verify(self.as_ref(), msg.get(), signer) + } } impl Verify for substrate_primitives::sr25519::Signature { - type Signer = substrate_primitives::sr25519::Public; - fn verify>(&self, mut msg: L, signer: &Self::Signer) -> bool { - runtime_io::sr25519_verify(self.as_ref(), msg.get(), signer) - } + type Signer = substrate_primitives::sr25519::Public; + fn verify>(&self, mut msg: L, signer: &Self::Signer) -> bool { + runtime_io::sr25519_verify(self.as_ref(), msg.get(), signer) + } } /// Some sort of check on the origin is performed by this object. pub trait EnsureOrigin { - /// A return type. - type Success; - /// Perform the origin check. - fn ensure_origin(o: OuterOrigin) -> result::Result; + /// A return type. + type Success; + /// Perform the origin check. + fn ensure_origin(o: OuterOrigin) -> result::Result; } /// Means of changing one type into another in a manner dependent on the source type. pub trait Lookup { - /// Type to lookup from. - type Source; - /// Type to lookup into. - type Target; - /// Attempt a lookup. - fn lookup(&self, s: Self::Source) -> result::Result; + /// Type to lookup from. + type Source; + /// Type to lookup into. + type Target; + /// Attempt a lookup. + fn lookup(&self, s: Self::Source) -> result::Result; } /// Means of changing one type into another in a manner dependent on the source type. /// This variant is different to `Lookup` in that it doesn't (can cannot) require any /// context. pub trait StaticLookup { - /// Type to lookup from. - type Source: Codec + Clone + PartialEq + MaybeDebug; - /// Type to lookup into. - type Target; - /// Attempt a lookup. - fn lookup(s: Self::Source) -> result::Result; - /// Convert from Target back to Source. - fn unlookup(t: Self::Target) -> Self::Source; + /// Type to lookup from. + type Source: Codec + Clone + PartialEq + MaybeDebug; + /// Type to lookup into. + type Target; + /// Attempt a lookup. + fn lookup(s: Self::Source) -> result::Result; + /// Convert from Target back to Source. + fn unlookup(t: Self::Target) -> Self::Source; } /// A lookup implementation returning the input value. #[derive(Default)] pub struct IdentityLookup(PhantomData); impl StaticLookup for IdentityLookup { - type Source = T; - type Target = T; - fn lookup(x: T) -> result::Result { Ok(x) } - fn unlookup(x: T) -> T { x } + type Source = T; + type Target = T; + fn lookup(x: T) -> result::Result { + Ok(x) + } + fn unlookup(x: T) -> T { + x + } } impl Lookup for IdentityLookup { - type Source = T; - type Target = T; - fn lookup(&self, x: T) -> result::Result { Ok(x) } + type Source = T; + type Target = T; + fn lookup(&self, x: T) -> result::Result { + Ok(x) + } } /// Get the "current" block number. pub trait CurrentHeight { - /// The type of the block number. - type BlockNumber; + /// The type of the block number. + type BlockNumber; - /// Return the current block number. Not allowed to fail. - fn current_height(&self) -> Self::BlockNumber; + /// Return the current block number. Not allowed to fail. + fn current_height(&self) -> Self::BlockNumber; } /// Translate a block number into a hash. pub trait BlockNumberToHash { - /// The type of the block number. - type BlockNumber: Zero; + /// The type of the block number. + type BlockNumber: Zero; - /// The type of the hash. - type Hash: Encode; + /// The type of the hash. + type Hash: Encode; - /// Get the hash for a given block number, or `None` if unknown. - fn block_number_to_hash(&self, n: Self::BlockNumber) -> Option; + /// Get the hash for a given block number, or `None` if unknown. + fn block_number_to_hash(&self, n: Self::BlockNumber) -> Option; - /// Get the genesis block hash; this should always be known. - fn genesis_hash(&self) -> Self::Hash { - self.block_number_to_hash(Zero::zero()).expect("All blockchains must know their genesis block hash; qed") - } + /// Get the genesis block hash; this should always be known. + fn genesis_hash(&self) -> Self::Hash { + self.block_number_to_hash(Zero::zero()) + .expect("All blockchains must know their genesis block hash; qed") + } } /// Extensible conversion trait. Generic over both source and destination types. pub trait Convert { - /// Make conversion. - fn convert(a: A) -> B; + /// Make conversion. + fn convert(a: A) -> B; } impl Convert for () { - fn convert(_: A) -> B { Default::default() } + fn convert(_: A) -> B { + Default::default() + } } /// A structure that converts the currency type into a lossy u64 @@ -157,43 +169,44 @@ impl Convert for () { pub struct CurrencyToVoteHandler; impl Convert for CurrencyToVoteHandler { - fn convert(x: u128) -> u64 { - if x >> 96 == 0 { - // Remove dust; divide by 2^32 - (x >> 32) as u64 - } else { - u64::max_value() - } - } + fn convert(x: u128) -> u64 { + if x >> 96 == 0 { + // Remove dust; divide by 2^32 + (x >> 32) as u64 + } else { + u64::max_value() + } + } } impl Convert for CurrencyToVoteHandler { - fn convert(x: u128) -> u128 { - // if it practically fits in u64 - if x >> 64 == 0 { - // Add zero dust; multiply by 2^32 - x << 32 - } - else { - // 0000_0000_FFFF_FFFF_FFFF_FFFF_0000_0000 - (u64::max_value() << 32) as u128 - } - } + fn convert(x: u128) -> u128 { + // if it practically fits in u64 + if x >> 64 == 0 { + // Add zero dust; multiply by 2^32 + x << 32 + } else { + // 0000_0000_FFFF_FFFF_FFFF_FFFF_0000_0000 + (u64::max_value() << 32) as u128 + } + } } /// A structure that performs identity conversion. pub struct Identity; impl Convert for Identity { - fn convert(a: T) -> T { a } + fn convert(a: T) -> T { + a + } } /// Simple trait similar to `Into`, except that it can be used to convert numerics between each /// other. pub trait As { - /// Convert forward (ala `Into::into`). - fn as_(self) -> T; - /// Convert backward (ala `From::from`). - fn sa(_: T) -> Self; + /// Convert forward (ala `Into::into`). + fn as_(self) -> T; + /// Convert backward (ala `From::from`). + fn sa(_: T) -> Self; } macro_rules! impl_numerics { @@ -216,76 +229,110 @@ impl_numerics!(u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize); /// A meta trait for arithmetic. pub trait SimpleArithmetic: - Zero + One + IntegerSquareRoot + As + - Add + AddAssign + - Sub + SubAssign + - Mul + MulAssign + - Div + DivAssign + - Rem + RemAssign + - Shl + Shr + - CheckedShl + - CheckedShr + - CheckedAdd + - CheckedSub + - CheckedMul + - CheckedDiv + - Saturating + - PartialOrd + Ord + Bounded + - HasCompact -{} -impl + - Add + AddAssign + - Sub + SubAssign + - Mul + MulAssign + - Div + DivAssign + - Rem + RemAssign + - Shl + Shr + - CheckedShl + - CheckedShr + - CheckedAdd + - CheckedSub + - CheckedMul + - CheckedDiv + - Saturating + - PartialOrd + Ord + Bounded + - HasCompact -> SimpleArithmetic for T {} + Zero + + One + + IntegerSquareRoot + + As + + Add + + AddAssign + + Sub + + SubAssign + + Mul + + MulAssign + + Div + + DivAssign + + Rem + + RemAssign + + Shl + + Shr + + CheckedShl + + CheckedShr + + CheckedAdd + + CheckedSub + + CheckedMul + + CheckedDiv + + Saturating + + PartialOrd + + Ord + + Bounded + + HasCompact +{ +} +impl< + T: Zero + + One + + IntegerSquareRoot + + As + + Add + + AddAssign + + Sub + + SubAssign + + Mul + + MulAssign + + Div + + DivAssign + + Rem + + RemAssign + + Shl + + Shr + + CheckedShl + + CheckedShr + + CheckedAdd + + CheckedSub + + CheckedMul + + CheckedDiv + + Saturating + + PartialOrd + + Ord + + Bounded + + HasCompact, + > SimpleArithmetic for T +{ +} /// Trait for things that can be clear (have no bits set). For numeric types, essentially the same /// as `Zero`. pub trait Clear { - /// True iff no bits are set. - fn is_clear(&self) -> bool; + /// True iff no bits are set. + fn is_clear(&self) -> bool; - /// Return the value of Self that is clear. - fn clear() -> Self; + /// Return the value of Self that is clear. + fn clear() -> Self; } impl Clear for T { - fn is_clear(&self) -> bool { *self == Self::clear() } - fn clear() -> Self { Default::default() } + fn is_clear(&self) -> bool { + *self == Self::clear() + } + fn clear() -> Self { + Default::default() + } } /// A meta trait for all bit ops. pub trait SimpleBitOps: - Sized + Clear + - rstd::ops::BitOr + - rstd::ops::BitXor + - rstd::ops::BitAnd -{} -impl + - rstd::ops::BitXor + - rstd::ops::BitAnd -> SimpleBitOps for T {} + Sized + + Clear + + rstd::ops::BitOr + + rstd::ops::BitXor + + rstd::ops::BitAnd +{ +} +impl< + T: Sized + + Clear + + rstd::ops::BitOr + + rstd::ops::BitXor + + rstd::ops::BitAnd, + > SimpleBitOps for T +{ +} /// The block finalization trait. Implementing this lets you express what should happen /// for your module when the block is ending. pub trait OnFinalize { - /// The block is being finalized. Implement to have something happen. - fn on_finalize(_n: BlockNumber) {} + /// The block is being finalized. Implement to have something happen. + fn on_finalize(_n: BlockNumber) {} } impl OnFinalize for () {} @@ -293,8 +340,8 @@ impl OnFinalize for () {} /// The block initialization trait. Implementing this lets you express what should happen /// for your module when the block is beginning (right before the first extrinsic is executed). pub trait OnInitialize { - /// The block is being initialized. Implement to have something happen. - fn on_initialize(_n: BlockNumber) {} + /// The block is being initialized. Implement to have something happen. + fn on_initialize(_n: BlockNumber) {} } impl OnInitialize for () {} @@ -308,11 +355,11 @@ impl OnInitialize for () {} /// NOTE: This function runs off-chain, so it can access the block state, /// but cannot preform any alterations. pub trait OffchainWorker { - /// This function is being called on every block. - /// - /// Implement this and use special `extern`s to generate transactions or inherents. - /// Any state alterations are lost and are not persisted. - fn generate_extrinsics(_n: BlockNumber) {} + /// This function is being called on every block. + /// + /// Implement this and use special `extern`s to generate transactions or inherents. + /// Any state alterations are lost and are not persisted. + fn generate_extrinsics(_n: BlockNumber) {} } impl OffchainWorker for () {} @@ -374,43 +421,47 @@ macro_rules! tuple_impl { tuple_impl!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z,); /// Abstraction around hashing -pub trait Hash: 'static + MaybeSerializeDebug + Clone + Eq + PartialEq { // Stupid bug in the Rust compiler believes derived - // traits must be fulfilled by all type parameters. - /// The hash type produced. - type Output: Member + MaybeSerializeDebug + rstd::hash::Hash + AsRef<[u8]> + AsMut<[u8]> + Copy + Default; +pub trait Hash: 'static + MaybeSerializeDebug + Clone + Eq + PartialEq { + // Stupid bug in the Rust compiler believes derived + // traits must be fulfilled by all type parameters. + /// The hash type produced. + type Output: Member + + MaybeSerializeDebug + + rstd::hash::Hash + + AsRef<[u8]> + + AsMut<[u8]> + + Copy + + Default; - /// The associated hash_db Hasher type. - type Hasher: Hasher; + /// The associated hash_db Hasher type. + type Hasher: Hasher; - /// Produce the hash of some byte-slice. - fn hash(s: &[u8]) -> Self::Output; + /// Produce the hash of some byte-slice. + fn hash(s: &[u8]) -> Self::Output; - /// Produce the hash of some codec-encodable value. - fn hash_of(s: &S) -> Self::Output { - Encode::using_encoded(s, Self::hash) - } + /// Produce the hash of some codec-encodable value. + fn hash_of(s: &S) -> Self::Output { + Encode::using_encoded(s, Self::hash) + } - /// Produce the trie-db root of a mapping from indices to byte slices. - fn enumerated_trie_root(items: &[&[u8]]) -> Self::Output; + /// Produce the trie-db root of a mapping from indices to byte slices. + fn enumerated_trie_root(items: &[&[u8]]) -> Self::Output; - /// Iterator-based version of `enumerated_trie_root`. - fn ordered_trie_root< - I: IntoIterator + Iterator, - A: AsRef<[u8]> - >(input: I) -> Self::Output; + /// Iterator-based version of `enumerated_trie_root`. + fn ordered_trie_root + Iterator, A: AsRef<[u8]>>( + input: I, + ) -> Self::Output; - /// The Patricia tree root of the given mapping as an iterator. - fn trie_root< - I: IntoIterator, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]> - >(input: I) -> Self::Output; + /// The Patricia tree root of the given mapping as an iterator. + fn trie_root, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>>( + input: I, + ) -> Self::Output; - /// Acquire the global storage root. - fn storage_root() -> Self::Output; + /// Acquire the global storage root. + fn storage_root() -> Self::Output; - /// Acquire the global storage changes root. - fn storage_changes_root(parent_hash: Self::Output, parent_number: u64) -> Option; + /// Acquire the global storage changes root. + fn storage_changes_root(parent_hash: Self::Output, parent_number: u64) -> Option; } /// Blake2-256 Hash implementation. @@ -419,76 +470,80 @@ pub trait Hash: 'static + MaybeSerializeDebug + Clone + Eq + PartialEq { // Stup pub struct BlakeTwo256; impl Hash for BlakeTwo256 { - type Output = substrate_primitives::H256; - type Hasher = Blake2Hasher; - fn hash(s: &[u8]) -> Self::Output { - runtime_io::blake2_256(s).into() - } - fn enumerated_trie_root(items: &[&[u8]]) -> Self::Output { - runtime_io::enumerated_trie_root::(items).into() - } - fn trie_root< - I: IntoIterator, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]> - >(input: I) -> Self::Output { - runtime_io::trie_root::(input).into() - } - fn ordered_trie_root< - I: IntoIterator + Iterator, - A: AsRef<[u8]> - >(input: I) -> Self::Output { - runtime_io::ordered_trie_root::(input).into() - } - fn storage_root() -> Self::Output { - runtime_io::storage_root().into() - } - fn storage_changes_root(parent_hash: Self::Output, parent_number: u64) -> Option { - runtime_io::storage_changes_root(parent_hash.into(), parent_number).map(Into::into) - } + type Output = substrate_primitives::H256; + type Hasher = Blake2Hasher; + fn hash(s: &[u8]) -> Self::Output { + runtime_io::blake2_256(s).into() + } + fn enumerated_trie_root(items: &[&[u8]]) -> Self::Output { + runtime_io::enumerated_trie_root::(items).into() + } + fn trie_root, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>>( + input: I, + ) -> Self::Output { + runtime_io::trie_root::(input).into() + } + fn ordered_trie_root + Iterator, A: AsRef<[u8]>>( + input: I, + ) -> Self::Output { + runtime_io::ordered_trie_root::(input).into() + } + fn storage_root() -> Self::Output { + runtime_io::storage_root().into() + } + fn storage_changes_root(parent_hash: Self::Output, parent_number: u64) -> Option { + runtime_io::storage_changes_root(parent_hash.into(), parent_number).map(Into::into) + } } /// Something that can be checked for equality and printed out to a debug channel if bad. pub trait CheckEqual { - /// Perform the equality check. - fn check_equal(&self, other: &Self); + /// Perform the equality check. + fn check_equal(&self, other: &Self); } impl CheckEqual for substrate_primitives::H256 { - #[cfg(feature = "std")] - fn check_equal(&self, other: &Self) { - use substrate_primitives::hexdisplay::HexDisplay; - if self != other { - println!("Hash: given={}, expected={}", HexDisplay::from(self.as_fixed_bytes()), HexDisplay::from(other.as_fixed_bytes())); - } - } - - #[cfg(not(feature = "std"))] - fn check_equal(&self, other: &Self) { - if self != other { - runtime_io::print("Hash not equal"); - runtime_io::print(self.as_bytes()); - runtime_io::print(other.as_bytes()); - } - } -} - -impl CheckEqual for I where I: DigestItem { - #[cfg(feature = "std")] - fn check_equal(&self, other: &Self) { - if self != other { - println!("DigestItem: given={:?}, expected={:?}", self, other); - } - } - - #[cfg(not(feature = "std"))] - fn check_equal(&self, other: &Self) { - if self != other { - runtime_io::print("DigestItem not equal"); - runtime_io::print(&Encode::encode(self)[..]); - runtime_io::print(&Encode::encode(other)[..]); - } - } + #[cfg(feature = "std")] + fn check_equal(&self, other: &Self) { + use substrate_primitives::hexdisplay::HexDisplay; + if self != other { + println!( + "Hash: given={}, expected={}", + HexDisplay::from(self.as_fixed_bytes()), + HexDisplay::from(other.as_fixed_bytes()) + ); + } + } + + #[cfg(not(feature = "std"))] + fn check_equal(&self, other: &Self) { + if self != other { + runtime_io::print("Hash not equal"); + runtime_io::print(self.as_bytes()); + runtime_io::print(other.as_bytes()); + } + } +} + +impl CheckEqual for I +where + I: DigestItem, +{ + #[cfg(feature = "std")] + fn check_equal(&self, other: &Self) { + if self != other { + println!("DigestItem: given={:?}, expected={:?}", self, other); + } + } + + #[cfg(not(feature = "std"))] + fn check_equal(&self, other: &Self) { + if self != other { + runtime_io::print("DigestItem not equal"); + runtime_io::print(&Encode::encode(self)[..]); + runtime_io::print(&Encode::encode(other)[..]); + } + } } /// A type that implements Serialize and Debug when in std environment. @@ -563,7 +618,6 @@ pub trait MaybeHash {} #[cfg(not(feature = "std"))] impl MaybeHash for T {} - /// A type that can be used in runtime structures. pub trait Member: Send + Sync + Sized + MaybeDebug + Eq + PartialEq + Clone + 'static {} impl Member for T {} @@ -573,89 +627,119 @@ impl Mem /// `parent_hash`, as well as a `digest` and a block `number`. /// /// You can also create a `new` one from those fields. -pub trait Header: Clone + Send + Sync + Codec + Eq + MaybeSerializeDebugButNotDeserialize + 'static { - /// Header number. - type Number: Member + MaybeSerializeDebug + ::rstd::hash::Hash + Copy + MaybeDisplay + SimpleArithmetic + Codec; - /// Header hash type - type Hash: Member + MaybeSerializeDebug + ::rstd::hash::Hash + Copy + MaybeDisplay + Default + SimpleBitOps + Codec + AsRef<[u8]> + AsMut<[u8]>; - /// Hashing algorithm - type Hashing: Hash; - /// Digest type - type Digest: Digest + Codec; - - /// Creates new header. - fn new( - number: Self::Number, - extrinsics_root: Self::Hash, - state_root: Self::Hash, - parent_hash: Self::Hash, - digest: Self::Digest - ) -> Self; - - /// Returns a reference to the header number. - fn number(&self) -> &Self::Number; - /// Sets the header number. - fn set_number(&mut self, number: Self::Number); - - /// Returns a reference to the extrinsics root. - fn extrinsics_root(&self) -> &Self::Hash; - /// Sets the extrinsic root. - fn set_extrinsics_root(&mut self, root: Self::Hash); - - /// Returns a reference to the state root. - fn state_root(&self) -> &Self::Hash; - /// Sets the state root. - fn set_state_root(&mut self, root: Self::Hash); - - /// Returns a reference to the parent hash. - fn parent_hash(&self) -> &Self::Hash; - /// Sets the parent hash. - fn set_parent_hash(&mut self, hash: Self::Hash); - - /// Returns a reference to the digest. - fn digest(&self) -> &Self::Digest; - /// Get a mutable reference to the digest. - fn digest_mut(&mut self) -> &mut Self::Digest; - /// Sets the digest. - fn set_digest(&mut self, digest: Self::Digest); - - /// Returns the hash of the header. - fn hash(&self) -> Self::Hash { - ::hash_of(self) - } +pub trait Header: + Clone + Send + Sync + Codec + Eq + MaybeSerializeDebugButNotDeserialize + 'static +{ + /// Header number. + type Number: Member + + MaybeSerializeDebug + + ::rstd::hash::Hash + + Copy + + MaybeDisplay + + SimpleArithmetic + + Codec; + /// Header hash type + type Hash: Member + + MaybeSerializeDebug + + ::rstd::hash::Hash + + Copy + + MaybeDisplay + + Default + + SimpleBitOps + + Codec + + AsRef<[u8]> + + AsMut<[u8]>; + /// Hashing algorithm + type Hashing: Hash; + /// Digest type + type Digest: Digest + Codec; + + /// Creates new header. + fn new( + number: Self::Number, + extrinsics_root: Self::Hash, + state_root: Self::Hash, + parent_hash: Self::Hash, + digest: Self::Digest, + ) -> Self; + + /// Returns a reference to the header number. + fn number(&self) -> &Self::Number; + /// Sets the header number. + fn set_number(&mut self, number: Self::Number); + + /// Returns a reference to the extrinsics root. + fn extrinsics_root(&self) -> &Self::Hash; + /// Sets the extrinsic root. + fn set_extrinsics_root(&mut self, root: Self::Hash); + + /// Returns a reference to the state root. + fn state_root(&self) -> &Self::Hash; + /// Sets the state root. + fn set_state_root(&mut self, root: Self::Hash); + + /// Returns a reference to the parent hash. + fn parent_hash(&self) -> &Self::Hash; + /// Sets the parent hash. + fn set_parent_hash(&mut self, hash: Self::Hash); + + /// Returns a reference to the digest. + fn digest(&self) -> &Self::Digest; + /// Get a mutable reference to the digest. + fn digest_mut(&mut self) -> &mut Self::Digest; + /// Sets the digest. + fn set_digest(&mut self, digest: Self::Digest); + + /// Returns the hash of the header. + fn hash(&self) -> Self::Hash { + ::hash_of(self) + } } /// Something which fulfills the abstract idea of a Substrate block. It has types for an /// `Extrinsic` piece of information as well as a `Header`. /// /// You can get an iterator over each of the `extrinsics` and retrieve the `header`. -pub trait Block: Clone + Send + Sync + Codec + Eq + MaybeSerializeDebugButNotDeserialize + 'static { - /// Type of extrinsics. - type Extrinsic: Member + Codec + Extrinsic + MaybeSerialize; - /// Header type. - type Header: Header; - /// Block hash type. - type Hash: Member + MaybeSerializeDebug + ::rstd::hash::Hash + Copy + MaybeDisplay + Default + SimpleBitOps + Codec + AsRef<[u8]> + AsMut<[u8]>; - - /// Returns a reference to the header. - fn header(&self) -> &Self::Header; - /// Returns a reference to the list of extrinsics. - fn extrinsics(&self) -> &[Self::Extrinsic]; - /// Split the block into header and list of extrinsics. - fn deconstruct(self) -> (Self::Header, Vec); - /// Creates new block from header and extrinsics. - fn new(header: Self::Header, extrinsics: Vec) -> Self; - /// Returns the hash of the block. - fn hash(&self) -> Self::Hash { - <::Hashing as Hash>::hash_of(self.header()) - } +pub trait Block: + Clone + Send + Sync + Codec + Eq + MaybeSerializeDebugButNotDeserialize + 'static +{ + /// Type of extrinsics. + type Extrinsic: Member + Codec + Extrinsic + MaybeSerialize; + /// Header type. + type Header: Header; + /// Block hash type. + type Hash: Member + + MaybeSerializeDebug + + ::rstd::hash::Hash + + Copy + + MaybeDisplay + + Default + + SimpleBitOps + + Codec + + AsRef<[u8]> + + AsMut<[u8]>; + + /// Returns a reference to the header. + fn header(&self) -> &Self::Header; + /// Returns a reference to the list of extrinsics. + fn extrinsics(&self) -> &[Self::Extrinsic]; + /// Split the block into header and list of extrinsics. + fn deconstruct(self) -> (Self::Header, Vec); + /// Creates new block from header and extrinsics. + fn new(header: Self::Header, extrinsics: Vec) -> Self; + /// Returns the hash of the block. + fn hash(&self) -> Self::Hash { + <::Hashing as Hash>::hash_of(self.header()) + } } /// Something that acts like an `Extrinsic`. pub trait Extrinsic { - /// Is this `Extrinsic` signed? - /// If no information are available about signed/unsigned, `None` should be returned. - fn is_signed(&self) -> Option { None } + /// Is this `Extrinsic` signed? + /// If no information are available about signed/unsigned, `None` should be returned. + fn is_signed(&self) -> Option { + None + } } /// Extract the hashing type for a block. @@ -674,11 +758,11 @@ pub type AuthorityIdFor = as DigestItem>::AuthorityId; /// Implement for pieces of information that require some additional context `Context` in order to be /// checked. pub trait Checkable: Sized { - /// Returned if `check` succeeds. - type Checked; + /// Returned if `check` succeeds. + type Checked; - /// Check self, given an instance of Context. - fn check(self, c: &Context) -> Result; + /// Check self, given an instance of Context. + fn check(self, c: &Context) -> Result; } /// A "checkable" piece of information, used by the standard Substrate Executive in order to @@ -686,19 +770,19 @@ pub trait Checkable: Sized { /// Implement for pieces of information that don't require additional context in order to be /// checked. pub trait BlindCheckable: Sized { - /// Returned if `check` succeeds. - type Checked; + /// Returned if `check` succeeds. + type Checked; - /// Check self. - fn check(self) -> Result; + /// Check self. + fn check(self) -> Result; } // Every `BlindCheckable` is also a `StaticCheckable` for arbitrary `Context`. impl Checkable for T { - type Checked = ::Checked; - fn check(self, _c: &Context) -> Result { - BlindCheckable::check(self) - } + type Checked = ::Checked; + fn check(self, _c: &Context) -> Result { + BlindCheckable::check(self) + } } /// An "executable" piece of information, used by the standard Substrate Executive in order to @@ -708,41 +792,39 @@ impl Checkable for T { /// Also provides information on to whom this information is attributable and an index that allows /// each piece of attributable information to be disambiguated. pub trait Applyable: Sized + Send + Sync { - /// Id of the account that is responsible for this piece of information (sender). - type AccountId: Member + MaybeDisplay; - /// Index allowing to disambiguate other `Applyable`s from the same `AccountId`. - type Index: Member + MaybeDisplay + SimpleArithmetic; - /// Function call. - type Call: Member; - /// Returns a reference to the index if any. - fn index(&self) -> Option<&Self::Index>; - /// Returns a reference to the sender if any. - fn sender(&self) -> Option<&Self::AccountId>; - /// Deconstructs into function call and sender. - fn deconstruct(self) -> (Self::Call, Option); + /// Id of the account that is responsible for this piece of information (sender). + type AccountId: Member + MaybeDisplay; + /// Index allowing to disambiguate other `Applyable`s from the same `AccountId`. + type Index: Member + MaybeDisplay + SimpleArithmetic; + /// Function call. + type Call: Member; + /// Returns a reference to the index if any. + fn index(&self) -> Option<&Self::Index>; + /// Returns a reference to the sender if any. + fn sender(&self) -> Option<&Self::AccountId>; + /// Deconstructs into function call and sender. + fn deconstruct(self) -> (Self::Call, Option); } /// Something that acts like a `Digest` - it can have `Log`s `push`ed onto it and these `Log`s are /// each `Codec`. pub trait Digest: Member + MaybeSerializeDebugButNotDeserialize + Default { - /// Hash of the items. - type Hash: Member; - /// Digest item type. - type Item: DigestItem; - - /// Get reference to all digest items. - fn logs(&self) -> &[Self::Item]; - /// Push new digest item. - fn push(&mut self, item: Self::Item); - /// Pop a digest item. - fn pop(&mut self) -> Option; - - /// Get reference to the first digest item that matches the passed predicate. - fn log Option<&T>>(&self, predicate: F) -> Option<&T> { - self.logs().iter() - .filter_map(predicate) - .next() - } + /// Hash of the items. + type Hash: Member; + /// Digest item type. + type Item: DigestItem; + + /// Get reference to all digest items. + fn logs(&self) -> &[Self::Item]; + /// Push new digest item. + fn push(&mut self, item: Self::Item); + /// Pop a digest item. + fn pop(&mut self) -> Option; + + /// Get reference to the first digest item that matches the passed predicate. + fn log Option<&T>>(&self, predicate: F) -> Option<&T> { + self.logs().iter().filter_map(predicate).next() + } } /// Single digest item. Could be any type that implements `Member` and provides methods @@ -750,64 +832,64 @@ pub trait Digest: Member + MaybeSerializeDebugButNotDeserialize + Default { /// /// If the runtime does not supports some 'system' items, use `()` as a stub. pub trait DigestItem: Codec + Member + MaybeSerializeDebugButNotDeserialize { - /// `ChangesTrieRoot` payload. - type Hash: Member; - /// `AuthorityChange` payload. - type AuthorityId: Member + MaybeHash + crate::codec::Encode + crate::codec::Decode; + /// `ChangesTrieRoot` payload. + type Hash: Member; + /// `AuthorityChange` payload. + type AuthorityId: Member + MaybeHash + crate::codec::Encode + crate::codec::Decode; - /// Returns Some if the entry is the `AuthoritiesChange` entry. - fn as_authorities_change(&self) -> Option<&[Self::AuthorityId]>; + /// Returns Some if the entry is the `AuthoritiesChange` entry. + fn as_authorities_change(&self) -> Option<&[Self::AuthorityId]>; - /// Returns Some if the entry is the `ChangesTrieRoot` entry. - fn as_changes_trie_root(&self) -> Option<&Self::Hash>; + /// Returns Some if the entry is the `ChangesTrieRoot` entry. + fn as_changes_trie_root(&self) -> Option<&Self::Hash>; } /// Auxiliary wrapper that holds an api instance and binds it to the given lifetime. pub struct ApiRef<'a, T>(T, rstd::marker::PhantomData<&'a ()>); impl<'a, T> From for ApiRef<'a, T> { - fn from(api: T) -> Self { - ApiRef(api, Default::default()) - } + fn from(api: T) -> Self { + ApiRef(api, Default::default()) + } } impl<'a, T> rstd::ops::Deref for ApiRef<'a, T> { - type Target = T; + type Target = T; - fn deref(&self) -> &Self::Target { - &self.0 - } + fn deref(&self) -> &Self::Target { + &self.0 + } } /// Something that provides a runtime api. pub trait ProvideRuntimeApi { - /// The concrete type that provides the api. - type Api; + /// The concrete type that provides the api. + type Api; - /// Returns the runtime api. - /// The returned instance will keep track of modifications to the storage. Any successful - /// call to an api function, will `commit` its changes to an internal buffer. Otherwise, - /// the modifications will be `discarded`. The modifications will not be applied to the - /// storage, even on a `commit`. - fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api>; + /// Returns the runtime api. + /// The returned instance will keep track of modifications to the storage. Any successful + /// call to an api function, will `commit` its changes to an internal buffer. Otherwise, + /// the modifications will be `discarded`. The modifications will not be applied to the + /// storage, even on a `commit`. + fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api>; } /// A marker trait for something that knows the type of the runtime block. pub trait GetRuntimeBlockType { - /// The `RuntimeBlock` type. - type RuntimeBlock: self::Block; + /// The `RuntimeBlock` type. + type RuntimeBlock: self::Block; } /// A marker trait for something that knows the type of the node block. pub trait GetNodeBlockType { - /// The `NodeBlock` type. - type NodeBlock: self::Block; + /// The `NodeBlock` type. + type NodeBlock: self::Block; } /// Something that provides information about a runtime api. pub trait RuntimeApiInfo { - /// The identifier of the runtime api. - const ID: [u8; 8]; - /// The version of the runtime api. - const VERSION: u32; + /// The identifier of the runtime api. + const ID: [u8; 8]; + /// The version of the runtime api. + const VERSION: u32; } diff --git a/core/sr-primitives/src/transaction_validity.rs b/core/sr-primitives/src/transaction_validity.rs index 7cf3aa1d6d..348824da30 100644 --- a/core/sr-primitives/src/transaction_validity.rs +++ b/core/sr-primitives/src/transaction_validity.rs @@ -16,8 +16,8 @@ //! Transaction validity interface. +use crate::codec::{Decode, Encode}; use rstd::prelude::*; -use crate::codec::{Encode, Decode}; /// Priority for a transaction. Additive. Higher is better. pub type TransactionPriority = u64; @@ -33,33 +33,33 @@ pub type TransactionTag = Vec; #[derive(Clone, PartialEq, Eq, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug))] pub enum TransactionValidity { - /// Transaction is invalid. Details are described by the error code. - Invalid(i8), - /// Transaction is valid. - Valid { - /// Priority of the transaction. - /// - /// Priority determines the ordering of two transactions that have all - /// their dependencies (required tags) satisfied. - priority: TransactionPriority, - /// Transaction dependencies - /// - /// A non-empty list signifies that some other transactions which provide - /// given tags are required to be included before that one. - requires: Vec, - /// Provided tags - /// - /// A list of tags this transaction provides. Successfuly importing the transaction - /// will enable other transactions that depend on (require) those tags to be included as well. - /// Provided and requried tags allow Substrate to build a dependency graph of transactions - /// and import them in the right (linear) order. - provides: Vec, - /// Transaction longevity - /// - /// Longevity describes minimum number of blocks the validity is correct. - /// After this period transaction should be removed from the pool or revalidated. - longevity: TransactionLongevity, - }, - /// Transaction validity can't be determined. - Unknown(i8), + /// Transaction is invalid. Details are described by the error code. + Invalid(i8), + /// Transaction is valid. + Valid { + /// Priority of the transaction. + /// + /// Priority determines the ordering of two transactions that have all + /// their dependencies (required tags) satisfied. + priority: TransactionPriority, + /// Transaction dependencies + /// + /// A non-empty list signifies that some other transactions which provide + /// given tags are required to be included before that one. + requires: Vec, + /// Provided tags + /// + /// A list of tags this transaction provides. Successfuly importing the transaction + /// will enable other transactions that depend on (require) those tags to be included as well. + /// Provided and requried tags allow Substrate to build a dependency graph of transactions + /// and import them in the right (linear) order. + provides: Vec, + /// Transaction longevity + /// + /// Longevity describes minimum number of blocks the validity is correct. + /// After this period transaction should be removed from the pool or revalidated. + longevity: TransactionLongevity, + }, + /// Transaction validity can't be determined. + Unknown(i8), } diff --git a/core/sr-sandbox/src/lib.rs b/core/sr-sandbox/src/lib.rs index e8bdd5727e..28142b2386 100755 --- a/core/sr-sandbox/src/lib.rs +++ b/core/sr-sandbox/src/lib.rs @@ -41,36 +41,36 @@ use rstd::prelude::*; -pub use primitives::sandbox::{TypedValue, ReturnValue, HostError}; +pub use primitives::sandbox::{HostError, ReturnValue, TypedValue}; mod imp { - #[cfg(feature = "std")] - include!("../with_std.rs"); + #[cfg(feature = "std")] + include!("../with_std.rs"); - #[cfg(not(feature = "std"))] - include!("../without_std.rs"); + #[cfg(not(feature = "std"))] + include!("../without_std.rs"); } /// Error that can occur while using this crate. #[cfg_attr(feature = "std", derive(Debug))] pub enum Error { - /// Module is not valid, couldn't be instantiated or it's `start` function trapped - /// when executed. - Module, + /// Module is not valid, couldn't be instantiated or it's `start` function trapped + /// when executed. + Module, - /// Access to a memory or table was made with an address or an index which is out of bounds. - /// - /// Note that if wasm module makes an out-of-bounds access then trap will occur. - OutOfBounds, + /// Access to a memory or table was made with an address or an index which is out of bounds. + /// + /// Note that if wasm module makes an out-of-bounds access then trap will occur. + OutOfBounds, - /// Failed to invoke an exported function for some reason. - Execution, + /// Failed to invoke an exported function for some reason. + Execution, } impl From for HostError { - fn from(_e: Error) -> HostError { - HostError - } + fn from(_e: Error) -> HostError { + HostError + } } /// Function pointer for specifying functions by the @@ -89,39 +89,39 @@ pub type HostFuncType = fn(&mut T, &[TypedValue]) -> Result32 = 4GiB = 65536 * 64KiB). - /// - /// It is possible to limit maximum number of pages this memory instance can have by specifying - /// `maximum`. If not specified, this memory instance would be able to allocate up to 4GiB. - /// - /// Allocated memory is always zeroed. - pub fn new(initial: u32, maximum: Option) -> Result { - Ok(Memory { - inner: imp::Memory::new(initial, maximum)?, - }) - } - - /// Read a memory area at the address `ptr` with the size of the provided slice `buf`. - /// - /// Returns `Err` if the range is out-of-bounds. - pub fn get(&self, ptr: u32, buf: &mut [u8]) -> Result<(), Error> { - self.inner.get(ptr, buf) - } - - /// Write a memory area at the address `ptr` with contents of the provided slice `buf`. - /// - /// Returns `Err` if the range is out-of-bounds. - pub fn set(&self, ptr: u32, value: &[u8]) -> Result<(), Error> { - self.inner.set(ptr, value) - } + /// Construct a new linear memory instance. + /// + /// The memory allocated with initial number of pages specified by `initial`. + /// Minimal possible value for `initial` is 0 and maximum possible is `65536`. + /// (Since maximum addressible memory is 232 = 4GiB = 65536 * 64KiB). + /// + /// It is possible to limit maximum number of pages this memory instance can have by specifying + /// `maximum`. If not specified, this memory instance would be able to allocate up to 4GiB. + /// + /// Allocated memory is always zeroed. + pub fn new(initial: u32, maximum: Option) -> Result { + Ok(Memory { + inner: imp::Memory::new(initial, maximum)?, + }) + } + + /// Read a memory area at the address `ptr` with the size of the provided slice `buf`. + /// + /// Returns `Err` if the range is out-of-bounds. + pub fn get(&self, ptr: u32, buf: &mut [u8]) -> Result<(), Error> { + self.inner.get(ptr, buf) + } + + /// Write a memory area at the address `ptr` with contents of the provided slice `buf`. + /// + /// Returns `Err` if the range is out-of-bounds. + pub fn set(&self, ptr: u32, value: &[u8]) -> Result<(), Error> { + self.inner.set(ptr, value) + } } /// Struct that can be used for defining an environment for a sandboxed module. @@ -129,81 +129,84 @@ impl Memory { /// The sandboxed module can access only the entities which were defined and passed /// to the module at the instantiation time. pub struct EnvironmentDefinitionBuilder { - inner: imp::EnvironmentDefinitionBuilder, + inner: imp::EnvironmentDefinitionBuilder, } impl EnvironmentDefinitionBuilder { - /// Construct a new `EnvironmentDefinitionBuilder`. - pub fn new() -> EnvironmentDefinitionBuilder { - EnvironmentDefinitionBuilder { - inner: imp::EnvironmentDefinitionBuilder::new(), - } - } - - /// Register a host function in this environment definition. - /// - /// NOTE that there is no constraints on type of this function. An instance - /// can import function passed here with any signature it wants. It can even import - /// the same function (i.e. with same `module` and `field`) several times. It's up to - /// the user code to check or constrain the types of signatures. - pub fn add_host_func(&mut self, module: N1, field: N2, f: HostFuncType) - where - N1: Into>, - N2: Into>, - { - self.inner.add_host_func(module, field, f); - } - - /// Register a memory in this environment definition. - pub fn add_memory(&mut self, module: N1, field: N2, mem: Memory) - where - N1: Into>, - N2: Into>, - { - self.inner.add_memory(module, field, mem.inner); - } + /// Construct a new `EnvironmentDefinitionBuilder`. + pub fn new() -> EnvironmentDefinitionBuilder { + EnvironmentDefinitionBuilder { + inner: imp::EnvironmentDefinitionBuilder::new(), + } + } + + /// Register a host function in this environment definition. + /// + /// NOTE that there is no constraints on type of this function. An instance + /// can import function passed here with any signature it wants. It can even import + /// the same function (i.e. with same `module` and `field`) several times. It's up to + /// the user code to check or constrain the types of signatures. + pub fn add_host_func(&mut self, module: N1, field: N2, f: HostFuncType) + where + N1: Into>, + N2: Into>, + { + self.inner.add_host_func(module, field, f); + } + + /// Register a memory in this environment definition. + pub fn add_memory(&mut self, module: N1, field: N2, mem: Memory) + where + N1: Into>, + N2: Into>, + { + self.inner.add_memory(module, field, mem.inner); + } } /// Sandboxed instance of a wasm module. /// /// This instance can be used for invoking exported functions. pub struct Instance { - inner: imp::Instance, - + inner: imp::Instance, } impl Instance { - /// Instantiate a module with the given [`EnvironmentDefinitionBuilder`]. It will - /// run the `start` function with the given `state`. - /// - /// Returns `Err(Error::Module)` if this module can't be instantiated with the given - /// environment. If execution of `start` function generated a trap, then `Err(Error::Execution)` will - /// be returned. - /// - /// [`EnvironmentDefinitionBuilder`]: struct.EnvironmentDefinitionBuilder.html - pub fn new(code: &[u8], env_def_builder: &EnvironmentDefinitionBuilder, state: &mut T) -> Result, Error> { - Ok(Instance { - inner: imp::Instance::new(code, &env_def_builder.inner, state)?, - }) - } - - /// Invoke an exported function with the given name. - /// - /// # Errors - /// - /// Returns `Err(Error::Execution)` if: - /// - /// - An export function name isn't a proper utf8 byte sequence, - /// - This module doesn't have an exported function with the given name, - /// - If types of the arguments passed to the function doesn't match function signature - /// then trap occurs (as if the exported function was called via call_indirect), - /// - Trap occured at the execution time. - pub fn invoke( - &mut self, - name: &[u8], - args: &[TypedValue], - state: &mut T, - ) -> Result { - self.inner.invoke(name, args, state) - } + /// Instantiate a module with the given [`EnvironmentDefinitionBuilder`]. It will + /// run the `start` function with the given `state`. + /// + /// Returns `Err(Error::Module)` if this module can't be instantiated with the given + /// environment. If execution of `start` function generated a trap, then `Err(Error::Execution)` will + /// be returned. + /// + /// [`EnvironmentDefinitionBuilder`]: struct.EnvironmentDefinitionBuilder.html + pub fn new( + code: &[u8], + env_def_builder: &EnvironmentDefinitionBuilder, + state: &mut T, + ) -> Result, Error> { + Ok(Instance { + inner: imp::Instance::new(code, &env_def_builder.inner, state)?, + }) + } + + /// Invoke an exported function with the given name. + /// + /// # Errors + /// + /// Returns `Err(Error::Execution)` if: + /// + /// - An export function name isn't a proper utf8 byte sequence, + /// - This module doesn't have an exported function with the given name, + /// - If types of the arguments passed to the function doesn't match function signature + /// then trap occurs (as if the exported function was called via call_indirect), + /// - Trap occured at the execution time. + pub fn invoke( + &mut self, + name: &[u8], + args: &[TypedValue], + state: &mut T, + ) -> Result { + self.inner.invoke(name, args, state) + } } diff --git a/core/sr-std/build.rs b/core/sr-std/build.rs index af9c91db87..5b5d06b65a 100644 --- a/core/sr-std/build.rs +++ b/core/sr-std/build.rs @@ -3,11 +3,11 @@ use rustc_version::{version, version_meta, Channel}; fn main() { - // Assert we haven't traveled back in time - assert!(version().unwrap().major >= 1); + // Assert we haven't traveled back in time + assert!(version().unwrap().major >= 1); - // Set cfg flags depending on release channel - if let Channel::Nightly = version_meta().unwrap().channel { - println!("cargo:rustc-cfg=feature=\"nightly\""); - } + // Set cfg flags depending on release channel + if let Channel::Nightly = version_meta().unwrap().channel { + println!("cargo:rustc-cfg=feature=\"nightly\""); + } } diff --git a/core/sr-std/src/lib.rs b/core/sr-std/src/lib.rs index 45857b33ed..8cb3e61c94 100644 --- a/core/sr-std/src/lib.rs +++ b/core/sr-std/src/lib.rs @@ -20,9 +20,14 @@ #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), feature(core_intrinsics))] #![cfg_attr(not(feature = "std"), feature(alloc))] - -#![cfg_attr(feature = "std", doc = "Substrate runtime standard library as compiled when linked with Rust's standard library.")] -#![cfg_attr(not(feature = "std"), doc = "Substrate's runtime standard library as compiled without Rust's standard library.")] +#![cfg_attr( + feature = "std", + doc = "Substrate runtime standard library as compiled when linked with Rust's standard library." +)] +#![cfg_attr( + not(feature = "std"), + doc = "Substrate's runtime standard library as compiled without Rust's standard library." +)] #[macro_export] macro_rules! map { @@ -41,13 +46,13 @@ include!("../without_std.rs"); /// /// This should include only things which are in the normal std prelude. pub mod prelude { - pub use crate::vec::Vec; - pub use crate::boxed::Box; - pub use crate::cmp::{Eq, PartialEq}; - pub use crate::clone::Clone; - - // Re-export `vec!` macro here, but not in `std` mode, since - // std's prelude already brings `vec!` into the scope. - #[cfg(not(feature = "std"))] - pub use crate::vec; + pub use crate::boxed::Box; + pub use crate::clone::Clone; + pub use crate::cmp::{Eq, PartialEq}; + pub use crate::vec::Vec; + + // Re-export `vec!` macro here, but not in `std` mode, since + // std's prelude already brings `vec!` into the scope. + #[cfg(not(feature = "std"))] + pub use crate::vec; } diff --git a/core/sr-version/src/lib.rs b/core/sr-version/src/lib.rs index 3d1dfb4313..b722e71ede 100644 --- a/core/sr-version/src/lib.rs +++ b/core/sr-version/src/lib.rs @@ -19,19 +19,19 @@ #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "std")] -use serde_derive::Serialize; +use runtime_primitives::traits::RuntimeApiInfo; #[cfg(feature = "std")] -use std::fmt; +use serde_derive::Serialize; #[cfg(feature = "std")] use std::collections::HashSet; #[cfg(feature = "std")] -use runtime_primitives::traits::RuntimeApiInfo; +use std::fmt; -use parity_codec::Encode; #[cfg(feature = "std")] use parity_codec::Decode; -use runtime_primitives::RuntimeString; +use parity_codec::Encode; pub use runtime_primitives::create_runtime_str; +use runtime_primitives::RuntimeString; /// The identity of a particular API interface that the runtime might provide. pub type ApiId = [u8; 8]; @@ -49,12 +49,16 @@ pub type ApisVec = &'static [(ApiId, u32)]; #[macro_export] #[cfg(feature = "std")] macro_rules! create_apis_vec { - ( $y:expr ) => { ::std::borrow::Cow::Borrowed(& $y) } + ( $y:expr ) => { + ::std::borrow::Cow::Borrowed(&$y) + }; } #[macro_export] #[cfg(not(feature = "std"))] macro_rules! create_apis_vec { - ( $y:expr ) => { & $y } + ( $y:expr ) => { + &$y + }; } /// Runtime version. @@ -66,125 +70,124 @@ macro_rules! create_apis_vec { #[cfg_attr(feature = "std", derive(Debug, Serialize, Decode))] #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub struct RuntimeVersion { - /// Identifies the different Substrate runtimes. There'll be at least polkadot and node. - /// A different on-chain spec_name to that of the native runtime would normally result - /// in node not attempting to sync or author blocks. - pub spec_name: RuntimeString, - - /// Name of the implementation of the spec. This is of little consequence for the node - /// and serves only to differentiate code of different implementation teams. For this - /// codebase, it will be parity-polkadot. If there were a non-Rust implementation of the - /// Polkadot runtime (e.g. C++), then it would identify itself with an accordingly different - /// `impl_name`. - pub impl_name: RuntimeString, - - /// `authoring_version` is the version of the authorship interface. An authoring node - /// will not attempt to author blocks unless this is equal to its native runtime. - pub authoring_version: u32, - - /// Version of the runtime specification. A full-node will not attempt to use its native - /// runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, - /// `spec_version` and `authoring_version` are the same between Wasm and native. - pub spec_version: u32, - - /// Version of the implementation of the specification. Nodes are free to ignore this; it - /// serves only as an indication that the code is different; as long as the other two versions - /// are the same then while the actual code may be different, it is nonetheless required to - /// do the same thing. - /// Non-consensus-breaking optimizations are about the only changes that could be made which - /// would result in only the `impl_version` changing. - pub impl_version: u32, - - /// List of supported API "features" along with their versions. - #[cfg_attr(feature = "std", serde(serialize_with = "apis_serialize::serialize"))] - pub apis: ApisVec, + /// Identifies the different Substrate runtimes. There'll be at least polkadot and node. + /// A different on-chain spec_name to that of the native runtime would normally result + /// in node not attempting to sync or author blocks. + pub spec_name: RuntimeString, + + /// Name of the implementation of the spec. This is of little consequence for the node + /// and serves only to differentiate code of different implementation teams. For this + /// codebase, it will be parity-polkadot. If there were a non-Rust implementation of the + /// Polkadot runtime (e.g. C++), then it would identify itself with an accordingly different + /// `impl_name`. + pub impl_name: RuntimeString, + + /// `authoring_version` is the version of the authorship interface. An authoring node + /// will not attempt to author blocks unless this is equal to its native runtime. + pub authoring_version: u32, + + /// Version of the runtime specification. A full-node will not attempt to use its native + /// runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, + /// `spec_version` and `authoring_version` are the same between Wasm and native. + pub spec_version: u32, + + /// Version of the implementation of the specification. Nodes are free to ignore this; it + /// serves only as an indication that the code is different; as long as the other two versions + /// are the same then while the actual code may be different, it is nonetheless required to + /// do the same thing. + /// Non-consensus-breaking optimizations are about the only changes that could be made which + /// would result in only the `impl_version` changing. + pub impl_version: u32, + + /// List of supported API "features" along with their versions. + #[cfg_attr(feature = "std", serde(serialize_with = "apis_serialize::serialize"))] + pub apis: ApisVec, } #[cfg(feature = "std")] impl fmt::Display for RuntimeVersion { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}-{}:{}({}-{})", - self.spec_name, - self.spec_version, - self.authoring_version, - self.impl_name, - self.impl_version - ) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "{}-{}:{}({}-{})", + self.spec_name, + self.spec_version, + self.authoring_version, + self.impl_name, + self.impl_version + ) + } } #[cfg(feature = "std")] impl RuntimeVersion { - /// Check if this version matches other version for calling into runtime. - pub fn can_call_with(&self, other: &RuntimeVersion) -> bool { - self.spec_version == other.spec_version && - self.spec_name == other.spec_name && - self.authoring_version == other.authoring_version - } - - /// Check if this version supports a particular API. - pub fn has_api(&self) -> bool { - self.apis.iter().any(|(s, v)| { - s == &A::ID && *v == A::VERSION - }) - } - - /// Check if the given api is implemented and the version passes a predicate. - pub fn has_api_with bool>( - &self, - pred: P, - ) -> bool { - self.apis.iter().any(|(s, v)| { - s == &A::ID && pred(*v) - }) - } + /// Check if this version matches other version for calling into runtime. + pub fn can_call_with(&self, other: &RuntimeVersion) -> bool { + self.spec_version == other.spec_version + && self.spec_name == other.spec_name + && self.authoring_version == other.authoring_version + } + + /// Check if this version supports a particular API. + pub fn has_api(&self) -> bool { + self.apis + .iter() + .any(|(s, v)| s == &A::ID && *v == A::VERSION) + } + + /// Check if the given api is implemented and the version passes a predicate. + pub fn has_api_with bool>(&self, pred: P) -> bool { + self.apis.iter().any(|(s, v)| s == &A::ID && pred(*v)) + } } #[cfg(feature = "std")] #[cfg_attr(feature = "std", derive(Debug))] pub struct NativeVersion { - /// Basic runtime version info. - pub runtime_version: RuntimeVersion, - /// Authoring runtimes that this native runtime supports. - pub can_author_with: HashSet, + /// Basic runtime version info. + pub runtime_version: RuntimeVersion, + /// Authoring runtimes that this native runtime supports. + pub can_author_with: HashSet, } #[cfg(feature = "std")] impl NativeVersion { - /// Check if this version matches other version for authoring blocks. - pub fn can_author_with(&self, other: &RuntimeVersion) -> bool { - self.runtime_version.spec_name == other.spec_name && - (self.runtime_version.authoring_version == other.authoring_version || - self.can_author_with.contains(&other.authoring_version)) - } + /// Check if this version matches other version for authoring blocks. + pub fn can_author_with(&self, other: &RuntimeVersion) -> bool { + self.runtime_version.spec_name == other.spec_name + && (self.runtime_version.authoring_version == other.authoring_version + || self.can_author_with.contains(&other.authoring_version)) + } } #[cfg(feature = "std")] mod apis_serialize { - use super::*; - use impl_serde::serialize as bytes; - use serde::{Serializer, ser::SerializeTuple}; - - #[derive(Serialize)] - struct ApiId<'a>( - #[serde(serialize_with="serialize_bytesref")] &'a super::ApiId, - &'a u32, - ); - - pub fn serialize(apis: &ApisVec, ser: S) -> Result where - S: Serializer, - { - let len = apis.len(); - let mut seq = ser.serialize_tuple(len)?; - for (api, ver) in &**apis { - seq.serialize_element(&ApiId(api, ver))?; - } - seq.end() - } - - pub fn serialize_bytesref(apis: &&super::ApiId, ser: S) -> Result where - S: Serializer, - { - bytes::serialize(*apis, ser) - } + use super::*; + use impl_serde::serialize as bytes; + use serde::{ser::SerializeTuple, Serializer}; + + #[derive(Serialize)] + struct ApiId<'a>( + #[serde(serialize_with = "serialize_bytesref")] &'a super::ApiId, + &'a u32, + ); + + pub fn serialize(apis: &ApisVec, ser: S) -> Result + where + S: Serializer, + { + let len = apis.len(); + let mut seq = ser.serialize_tuple(len)?; + for (api, ver) in &**apis { + seq.serialize_element(&ApiId(api, ver))?; + } + seq.end() + } + + pub fn serialize_bytesref(apis: &&super::ApiId, ser: S) -> Result + where + S: Serializer, + { + bytes::serialize(*apis, ser) + } } diff --git a/core/state-db/src/lib.rs b/core/state-db/src/lib.rs index 8d9cf9c965..4b021e78db 100644 --- a/core/state-db/src/lib.rs +++ b/core/state-db/src/lib.rs @@ -31,478 +31,544 @@ mod noncanonical; mod pruning; -#[cfg(test)] mod test; +#[cfg(test)] +mod test; -use std::fmt; -use parking_lot::RwLock; -use parity_codec as codec; use codec::Codec; -use std::collections::HashSet; +use log::trace; use noncanonical::NonCanonicalOverlay; +use parity_codec as codec; +use parking_lot::RwLock; use pruning::RefWindow; -use log::trace; +use std::collections::HashSet; +use std::fmt; /// Database value type. pub type DBValue = Vec; /// Basic set of requirements for the Block hash and node key types. -pub trait Hash: Send + Sync + Sized + Eq + PartialEq + Clone + Default + fmt::Debug + Codec + std::hash::Hash + 'static {} -impl Hash for T {} +pub trait Hash: + Send + + Sync + + Sized + + Eq + + PartialEq + + Clone + + Default + + fmt::Debug + + Codec + + std::hash::Hash + + 'static +{ +} +impl< + T: Send + + Sync + + Sized + + Eq + + PartialEq + + Clone + + Default + + fmt::Debug + + Codec + + std::hash::Hash + + 'static, + > Hash for T +{ +} /// Backend database trait. Read-only. pub trait MetaDb { - type Error: fmt::Debug; + type Error: fmt::Debug; - /// Get meta value, such as the journal. - fn get_meta(&self, key: &[u8]) -> Result, Self::Error>; + /// Get meta value, such as the journal. + fn get_meta(&self, key: &[u8]) -> Result, Self::Error>; } /// Backend database trait. Read-only. pub trait NodeDb { - type Key: ?Sized; - type Error: fmt::Debug; + type Key: ?Sized; + type Error: fmt::Debug; - /// Get state trie node. - fn get(&self, key: &Self::Key) -> Result, Self::Error>; + /// Get state trie node. + fn get(&self, key: &Self::Key) -> Result, Self::Error>; } /// Error type. pub enum Error { - /// Database backend error. - Db(E), - /// `Codec` decoding error. - Decoding, - /// Trying to canonicalize invalid block. - InvalidBlock, - /// Trying to insert block with invalid number. - InvalidBlockNumber, - /// Trying to insert block with unknown parent. - InvalidParent, + /// Database backend error. + Db(E), + /// `Codec` decoding error. + Decoding, + /// Trying to canonicalize invalid block. + InvalidBlock, + /// Trying to insert block with invalid number. + InvalidBlockNumber, + /// Trying to insert block with unknown parent. + InvalidParent, } impl fmt::Debug for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Error::Db(e) => e.fmt(f), - Error::Decoding => write!(f, "Error decoding slicable value"), - Error::InvalidBlock => write!(f, "Trying to canonicalize invalid block"), - Error::InvalidBlockNumber => write!(f, "Trying to insert block with invalid number"), - Error::InvalidParent => write!(f, "Trying to insert block with unknown parent"), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Error::Db(e) => e.fmt(f), + Error::Decoding => write!(f, "Error decoding slicable value"), + Error::InvalidBlock => write!(f, "Trying to canonicalize invalid block"), + Error::InvalidBlockNumber => write!(f, "Trying to insert block with invalid number"), + Error::InvalidParent => write!(f, "Trying to insert block with unknown parent"), + } + } } /// A set of state node changes. #[derive(Default, Debug, Clone)] pub struct ChangeSet { - /// Inserted nodes. - pub inserted: Vec<(H, DBValue)>, - /// Deleted nodes. - pub deleted: Vec, + /// Inserted nodes. + pub inserted: Vec<(H, DBValue)>, + /// Deleted nodes. + pub deleted: Vec, } - /// A set of changes to the backing database. #[derive(Default, Debug, Clone)] pub struct CommitSet { - /// State node changes. - pub data: ChangeSet, - /// Metadata changes. - pub meta: ChangeSet>, + /// State node changes. + pub data: ChangeSet, + /// Metadata changes. + pub meta: ChangeSet>, } /// Pruning constraints. If none are specified pruning is #[derive(Default, Debug, Clone)] pub struct Constraints { - /// Maximum blocks. Defaults to 0 when unspecified, effectively keeping only non-canonical states. - pub max_blocks: Option, - /// Maximum memory in the pruning overlay. - pub max_mem: Option, + /// Maximum blocks. Defaults to 0 when unspecified, effectively keeping only non-canonical states. + pub max_blocks: Option, + /// Maximum memory in the pruning overlay. + pub max_mem: Option, } /// Pruning mode. #[derive(Debug, Clone)] pub enum PruningMode { - /// Maintain a pruning window. - Constrained(Constraints), - /// No pruning. Canonicalization is a no-op. - ArchiveAll, - /// Canonicalization discards non-canonical nodes. All the canonical nodes are kept in the DB. - ArchiveCanonical, + /// Maintain a pruning window. + Constrained(Constraints), + /// No pruning. Canonicalization is a no-op. + ArchiveAll, + /// Canonicalization discards non-canonical nodes. All the canonical nodes are kept in the DB. + ArchiveCanonical, } impl PruningMode { - /// Create a mode that keeps given number of blocks. - pub fn keep_blocks(n: u32) -> PruningMode { - PruningMode::Constrained(Constraints { - max_blocks: Some(n), - max_mem: None, - }) - } - - /// Is this an archive (either ArchiveAll or ArchiveCanonical) pruning mode? - pub fn is_archive(&self) -> bool { - match *self { - PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => true, - PruningMode::Constrained(_) => false - } - } - + /// Create a mode that keeps given number of blocks. + pub fn keep_blocks(n: u32) -> PruningMode { + PruningMode::Constrained(Constraints { + max_blocks: Some(n), + max_mem: None, + }) + } + + /// Is this an archive (either ArchiveAll or ArchiveCanonical) pruning mode? + pub fn is_archive(&self) -> bool { + match *self { + PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => true, + PruningMode::Constrained(_) => false, + } + } } impl Default for PruningMode { - fn default() -> Self { - PruningMode::keep_blocks(256) - } + fn default() -> Self { + PruningMode::keep_blocks(256) + } } fn to_meta_key(suffix: &[u8], data: &S) -> Vec { - let mut buffer = data.encode(); - buffer.extend(suffix); - buffer + let mut buffer = data.encode(); + buffer.extend(suffix); + buffer } struct StateDbSync { - mode: PruningMode, - non_canonical: NonCanonicalOverlay, - pruning: Option>, - pinned: HashSet, + mode: PruningMode, + non_canonical: NonCanonicalOverlay, + pruning: Option>, + pinned: HashSet, } impl StateDbSync { - pub fn new(mode: PruningMode, db: &D) -> Result, Error> { - trace!("StateDb settings: {:?}", mode); - let non_canonical: NonCanonicalOverlay = NonCanonicalOverlay::new(db)?; - let pruning: Option> = match mode { - PruningMode::Constrained(Constraints { - max_mem: Some(_), - .. - }) => unimplemented!(), - PruningMode::Constrained(_) => Some(RefWindow::new(db)?), - PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => None, - }; - Ok(StateDbSync { - mode, - non_canonical, - pruning, - pinned: Default::default(), - }) - } - - pub fn insert_block(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, mut changeset: ChangeSet) -> Result, Error> { - match self.mode { - PruningMode::ArchiveAll => { - changeset.deleted.clear(); - // write changes immediately - Ok(CommitSet { - data: changeset, - meta: Default::default(), - }) - }, - PruningMode::Constrained(_) | PruningMode::ArchiveCanonical => { - self.non_canonical.insert(hash, number, parent_hash, changeset) - } - } - } - - pub fn canonicalize_block(&mut self, hash: &BlockHash) -> Result, Error> { - let mut commit = match self.mode { - PruningMode::ArchiveAll => { - CommitSet::default() - }, - PruningMode::ArchiveCanonical => { - let mut commit = self.non_canonical.canonicalize(hash)?; - commit.data.deleted.clear(); - commit - }, - PruningMode::Constrained(_) => { - self.non_canonical.canonicalize(hash)? - }, - }; - if let Some(ref mut pruning) = self.pruning { - pruning.note_canonical(hash, &mut commit); - } - self.prune(&mut commit); - Ok(commit) - } - - pub fn best_canonical(&self) -> Option { - return self.non_canonical.last_canonicalized_block_number() - } - - pub fn is_pruned(&self, hash: &BlockHash, number: u64) -> bool { - match self.mode { - PruningMode::ArchiveAll => false, - PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { - if self.best_canonical().map(|c| number > c).unwrap_or(true) { - !self.non_canonical.have_block(hash) - } else { - self.pruning.as_ref().map_or(false, |pruning| number < pruning.pending() || !pruning.have_block(hash)) - } - } - } - } - - fn prune(&mut self, commit: &mut CommitSet) { - if let (&mut Some(ref mut pruning), &PruningMode::Constrained(ref constraints)) = (&mut self.pruning, &self.mode) { - loop { - if pruning.window_size() <= constraints.max_blocks.unwrap_or(0) as u64 { - break; - } - - if constraints.max_mem.map_or(false, |m| pruning.mem_used() > m) { - break; - } - - let pinned = &self.pinned; - if pruning.next_hash().map_or(false, |h| pinned.contains(&h)) { - break; - } - pruning.prune_one(commit); - } - } - } - - /// Revert all non-canonical blocks with the best block number. - /// Returns a database commit or `None` if not possible. - /// For archive an empty commit set is returned. - pub fn revert_one(&mut self) -> Option> { - match self.mode { - PruningMode::ArchiveAll => { - Some(CommitSet::default()) - }, - PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { - self.non_canonical.revert_one() - }, - } - } - - pub fn pin(&mut self, hash: &BlockHash) { - self.pinned.insert(hash.clone()); - } - - pub fn unpin(&mut self, hash: &BlockHash) { - self.pinned.remove(hash); - } - - pub fn get(&self, key: &Key, db: &D) -> Result, Error> - where Key: AsRef - { - if let Some(value) = self.non_canonical.get(key) { - return Ok(Some(value)); - } - db.get(key.as_ref()).map_err(|e| Error::Db(e)) - } - - pub fn apply_pending(&mut self) { - self.non_canonical.apply_pending(); - if let Some(pruning) = &mut self.pruning { - pruning.apply_pending(); - } - trace!(target: "forks", "First available: {:?} ({}), Last canon: {:?} ({}), Best forks: {:?}", - self.pruning.as_ref().and_then(|p| p.next_hash()), - self.pruning.as_ref().map(|p| p.pending()).unwrap_or(0), - self.non_canonical.last_canonicalized_hash(), - self.non_canonical.last_canonicalized_block_number().unwrap_or(0), - self.non_canonical.top_level(), - ); - } - - pub fn revert_pending(&mut self) { - if let Some(pruning) = &mut self.pruning { - pruning.revert_pending(); - } - self.non_canonical.revert_pending(); - } + pub fn new( + mode: PruningMode, + db: &D, + ) -> Result, Error> { + trace!("StateDb settings: {:?}", mode); + let non_canonical: NonCanonicalOverlay = NonCanonicalOverlay::new(db)?; + let pruning: Option> = match mode { + PruningMode::Constrained(Constraints { + max_mem: Some(_), .. + }) => unimplemented!(), + PruningMode::Constrained(_) => Some(RefWindow::new(db)?), + PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => None, + }; + Ok(StateDbSync { + mode, + non_canonical, + pruning, + pinned: Default::default(), + }) + } + + pub fn insert_block( + &mut self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + mut changeset: ChangeSet, + ) -> Result, Error> { + match self.mode { + PruningMode::ArchiveAll => { + changeset.deleted.clear(); + // write changes immediately + Ok(CommitSet { + data: changeset, + meta: Default::default(), + }) + } + PruningMode::Constrained(_) | PruningMode::ArchiveCanonical => self + .non_canonical + .insert(hash, number, parent_hash, changeset), + } + } + + pub fn canonicalize_block( + &mut self, + hash: &BlockHash, + ) -> Result, Error> { + let mut commit = match self.mode { + PruningMode::ArchiveAll => CommitSet::default(), + PruningMode::ArchiveCanonical => { + let mut commit = self.non_canonical.canonicalize(hash)?; + commit.data.deleted.clear(); + commit + } + PruningMode::Constrained(_) => self.non_canonical.canonicalize(hash)?, + }; + if let Some(ref mut pruning) = self.pruning { + pruning.note_canonical(hash, &mut commit); + } + self.prune(&mut commit); + Ok(commit) + } + + pub fn best_canonical(&self) -> Option { + return self.non_canonical.last_canonicalized_block_number(); + } + + pub fn is_pruned(&self, hash: &BlockHash, number: u64) -> bool { + match self.mode { + PruningMode::ArchiveAll => false, + PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { + if self.best_canonical().map(|c| number > c).unwrap_or(true) { + !self.non_canonical.have_block(hash) + } else { + self.pruning.as_ref().map_or(false, |pruning| { + number < pruning.pending() || !pruning.have_block(hash) + }) + } + } + } + } + + fn prune(&mut self, commit: &mut CommitSet) { + if let (&mut Some(ref mut pruning), &PruningMode::Constrained(ref constraints)) = + (&mut self.pruning, &self.mode) + { + loop { + if pruning.window_size() <= constraints.max_blocks.unwrap_or(0) as u64 { + break; + } + + if constraints + .max_mem + .map_or(false, |m| pruning.mem_used() > m) + { + break; + } + + let pinned = &self.pinned; + if pruning.next_hash().map_or(false, |h| pinned.contains(&h)) { + break; + } + pruning.prune_one(commit); + } + } + } + + /// Revert all non-canonical blocks with the best block number. + /// Returns a database commit or `None` if not possible. + /// For archive an empty commit set is returned. + pub fn revert_one(&mut self) -> Option> { + match self.mode { + PruningMode::ArchiveAll => Some(CommitSet::default()), + PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { + self.non_canonical.revert_one() + } + } + } + + pub fn pin(&mut self, hash: &BlockHash) { + self.pinned.insert(hash.clone()); + } + + pub fn unpin(&mut self, hash: &BlockHash) { + self.pinned.remove(hash); + } + + pub fn get(&self, key: &Key, db: &D) -> Result, Error> + where + Key: AsRef, + { + if let Some(value) = self.non_canonical.get(key) { + return Ok(Some(value)); + } + db.get(key.as_ref()).map_err(|e| Error::Db(e)) + } + + pub fn apply_pending(&mut self) { + self.non_canonical.apply_pending(); + if let Some(pruning) = &mut self.pruning { + pruning.apply_pending(); + } + trace!(target: "forks", "First available: {:?} ({}), Last canon: {:?} ({}), Best forks: {:?}", + self.pruning.as_ref().and_then(|p| p.next_hash()), + self.pruning.as_ref().map(|p| p.pending()).unwrap_or(0), + self.non_canonical.last_canonicalized_hash(), + self.non_canonical.last_canonicalized_block_number().unwrap_or(0), + self.non_canonical.top_level(), + ); + } + + pub fn revert_pending(&mut self) { + if let Some(pruning) = &mut self.pruning { + pruning.revert_pending(); + } + self.non_canonical.revert_pending(); + } } /// State DB maintenance. See module description. /// Can be shared across threads. pub struct StateDb { - db: RwLock>, + db: RwLock>, } impl StateDb { - /// Creates a new instance. Does not expect any metadata in the database. - pub fn new(mode: PruningMode, db: &D) -> Result, Error> { - Ok(StateDb { - db: RwLock::new(StateDbSync::new(mode, db)?) - }) - } - - /// Add a new non-canonical block. - pub fn insert_block(&self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet) -> Result, Error> { - self.db.write().insert_block(hash, number, parent_hash, changeset) - } - - /// Finalize a previously inserted block. - pub fn canonicalize_block(&self, hash: &BlockHash) -> Result, Error> { - self.db.write().canonicalize_block(hash) - } - - /// Prevents pruning of specified block and its descendants. - pub fn pin(&self, hash: &BlockHash) { - self.db.write().pin(hash) - } - - /// Allows pruning of specified block. - pub fn unpin(&self, hash: &BlockHash) { - self.db.write().unpin(hash) - } - - /// Get a value from non-canonical/pruning overlay or the backing DB. - pub fn get(&self, key: &Key, db: &D) -> Result, Error> - where Key: AsRef - { - self.db.read().get(key, db) - } - - /// Revert all non-canonical blocks with the best block number. - /// Returns a database commit or `None` if not possible. - /// For archive an empty commit set is returned. - pub fn revert_one(&self) -> Option> { - self.db.write().revert_one() - } - - /// Returns last finalized block number. - pub fn best_canonical(&self) -> Option { - return self.db.read().best_canonical() - } - - /// Check if block is pruned away. - pub fn is_pruned(&self, hash: &BlockHash, number: u64) -> bool { - return self.db.read().is_pruned(hash, number) - } - - /// Apply all pending changes - pub fn apply_pending(&self) { - self.db.write().apply_pending(); - } - - /// Revert all pending changes - pub fn revert_pending(&self) { - self.db.write().revert_pending(); - } + /// Creates a new instance. Does not expect any metadata in the database. + pub fn new( + mode: PruningMode, + db: &D, + ) -> Result, Error> { + Ok(StateDb { + db: RwLock::new(StateDbSync::new(mode, db)?), + }) + } + + /// Add a new non-canonical block. + pub fn insert_block( + &self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + changeset: ChangeSet, + ) -> Result, Error> { + self.db + .write() + .insert_block(hash, number, parent_hash, changeset) + } + + /// Finalize a previously inserted block. + pub fn canonicalize_block( + &self, + hash: &BlockHash, + ) -> Result, Error> { + self.db.write().canonicalize_block(hash) + } + + /// Prevents pruning of specified block and its descendants. + pub fn pin(&self, hash: &BlockHash) { + self.db.write().pin(hash) + } + + /// Allows pruning of specified block. + pub fn unpin(&self, hash: &BlockHash) { + self.db.write().unpin(hash) + } + + /// Get a value from non-canonical/pruning overlay or the backing DB. + pub fn get(&self, key: &Key, db: &D) -> Result, Error> + where + Key: AsRef, + { + self.db.read().get(key, db) + } + + /// Revert all non-canonical blocks with the best block number. + /// Returns a database commit or `None` if not possible. + /// For archive an empty commit set is returned. + pub fn revert_one(&self) -> Option> { + self.db.write().revert_one() + } + + /// Returns last finalized block number. + pub fn best_canonical(&self) -> Option { + return self.db.read().best_canonical(); + } + + /// Check if block is pruned away. + pub fn is_pruned(&self, hash: &BlockHash, number: u64) -> bool { + return self.db.read().is_pruned(hash, number); + } + + /// Apply all pending changes + pub fn apply_pending(&self) { + self.db.write().apply_pending(); + } + + /// Revert all pending changes + pub fn revert_pending(&self) { + self.db.write().revert_pending(); + } } #[cfg(test)] mod tests { - use std::io; - use primitives::H256; - use crate::{StateDb, PruningMode, Constraints}; - use crate::test::{make_db, make_changeset, TestDb}; - - fn make_test_db(settings: PruningMode) -> (TestDb, StateDb) { - let mut db = make_db(&[91, 921, 922, 93, 94]); - let state_db = StateDb::new(settings, &db).unwrap(); - - db.commit( - &state_db - .insert_block::( - &H256::from_low_u64_be(1), - 1, - &H256::from_low_u64_be(0), - make_changeset(&[1], &[91]), - ) - .unwrap(), - ); - db.commit( - &state_db - .insert_block::( - &H256::from_low_u64_be(21), - 2, - &H256::from_low_u64_be(1), - make_changeset(&[21], &[921, 1]), - ) - .unwrap(), - ); - db.commit( - &state_db - .insert_block::( - &H256::from_low_u64_be(22), - 2, - &H256::from_low_u64_be(1), - make_changeset(&[22], &[922]), - ) - .unwrap(), - ); - db.commit( - &state_db - .insert_block::( - &H256::from_low_u64_be(3), - 3, - &H256::from_low_u64_be(21), - make_changeset(&[3], &[93]), - ) - .unwrap(), - ); - state_db.apply_pending(); - db.commit(&state_db.canonicalize_block::(&H256::from_low_u64_be(1)).unwrap()); - state_db.apply_pending(); - db.commit( - &state_db - .insert_block::( - &H256::from_low_u64_be(4), - 4, - &H256::from_low_u64_be(3), - make_changeset(&[4], &[94]), - ) - .unwrap(), - ); - state_db.apply_pending(); - db.commit(&state_db.canonicalize_block::(&H256::from_low_u64_be(21)).unwrap()); - state_db.apply_pending(); - db.commit(&state_db.canonicalize_block::(&H256::from_low_u64_be(3)).unwrap()); - state_db.apply_pending(); - - (db, state_db) - } - - #[test] - fn full_archive_keeps_everything() { - let (db, sdb) = make_test_db(PruningMode::ArchiveAll); - assert!(db.data_eq(&make_db(&[1, 21, 22, 3, 4, 91, 921, 922, 93, 94]))); - assert!(!sdb.is_pruned(&H256::from_low_u64_be(0), 0)); - } - - #[test] - fn canonical_archive_keeps_canonical() { - let (db, _) = make_test_db(PruningMode::ArchiveCanonical); - assert!(db.data_eq(&make_db(&[1, 21, 3, 91, 921, 922, 93, 94]))); - } - - #[test] - fn prune_window_0() { - let (db, _) = make_test_db(PruningMode::Constrained(Constraints { - max_blocks: Some(0), - max_mem: None, - })); - assert!(db.data_eq(&make_db(&[21, 3, 922, 94]))); - } - - #[test] - fn prune_window_1() { - let (db, sdb) = make_test_db(PruningMode::Constrained(Constraints { - max_blocks: Some(1), - max_mem: None, - })); - assert!(sdb.is_pruned(&H256::from_low_u64_be(0), 0)); - assert!(sdb.is_pruned(&H256::from_low_u64_be(1), 1)); - assert!(sdb.is_pruned(&H256::from_low_u64_be(21), 2)); - assert!(sdb.is_pruned(&H256::from_low_u64_be(22), 2)); - assert!(db.data_eq(&make_db(&[21, 3, 922, 93, 94]))); - } - - #[test] - fn prune_window_2() { - let (db, sdb) = make_test_db(PruningMode::Constrained(Constraints { - max_blocks: Some(2), - max_mem: None, - })); - assert!(sdb.is_pruned(&H256::from_low_u64_be(0), 0)); - assert!(sdb.is_pruned(&H256::from_low_u64_be(1), 1)); - assert!(!sdb.is_pruned(&H256::from_low_u64_be(21), 2)); - assert!(sdb.is_pruned(&H256::from_low_u64_be(22), 2)); - assert!(db.data_eq(&make_db(&[1, 21, 3, 921, 922, 93, 94]))); - } + use crate::test::{make_changeset, make_db, TestDb}; + use crate::{Constraints, PruningMode, StateDb}; + use primitives::H256; + use std::io; + + fn make_test_db(settings: PruningMode) -> (TestDb, StateDb) { + let mut db = make_db(&[91, 921, 922, 93, 94]); + let state_db = StateDb::new(settings, &db).unwrap(); + + db.commit( + &state_db + .insert_block::( + &H256::from_low_u64_be(1), + 1, + &H256::from_low_u64_be(0), + make_changeset(&[1], &[91]), + ) + .unwrap(), + ); + db.commit( + &state_db + .insert_block::( + &H256::from_low_u64_be(21), + 2, + &H256::from_low_u64_be(1), + make_changeset(&[21], &[921, 1]), + ) + .unwrap(), + ); + db.commit( + &state_db + .insert_block::( + &H256::from_low_u64_be(22), + 2, + &H256::from_low_u64_be(1), + make_changeset(&[22], &[922]), + ) + .unwrap(), + ); + db.commit( + &state_db + .insert_block::( + &H256::from_low_u64_be(3), + 3, + &H256::from_low_u64_be(21), + make_changeset(&[3], &[93]), + ) + .unwrap(), + ); + state_db.apply_pending(); + db.commit( + &state_db + .canonicalize_block::(&H256::from_low_u64_be(1)) + .unwrap(), + ); + state_db.apply_pending(); + db.commit( + &state_db + .insert_block::( + &H256::from_low_u64_be(4), + 4, + &H256::from_low_u64_be(3), + make_changeset(&[4], &[94]), + ) + .unwrap(), + ); + state_db.apply_pending(); + db.commit( + &state_db + .canonicalize_block::(&H256::from_low_u64_be(21)) + .unwrap(), + ); + state_db.apply_pending(); + db.commit( + &state_db + .canonicalize_block::(&H256::from_low_u64_be(3)) + .unwrap(), + ); + state_db.apply_pending(); + + (db, state_db) + } + + #[test] + fn full_archive_keeps_everything() { + let (db, sdb) = make_test_db(PruningMode::ArchiveAll); + assert!(db.data_eq(&make_db(&[1, 21, 22, 3, 4, 91, 921, 922, 93, 94]))); + assert!(!sdb.is_pruned(&H256::from_low_u64_be(0), 0)); + } + + #[test] + fn canonical_archive_keeps_canonical() { + let (db, _) = make_test_db(PruningMode::ArchiveCanonical); + assert!(db.data_eq(&make_db(&[1, 21, 3, 91, 921, 922, 93, 94]))); + } + + #[test] + fn prune_window_0() { + let (db, _) = make_test_db(PruningMode::Constrained(Constraints { + max_blocks: Some(0), + max_mem: None, + })); + assert!(db.data_eq(&make_db(&[21, 3, 922, 94]))); + } + + #[test] + fn prune_window_1() { + let (db, sdb) = make_test_db(PruningMode::Constrained(Constraints { + max_blocks: Some(1), + max_mem: None, + })); + assert!(sdb.is_pruned(&H256::from_low_u64_be(0), 0)); + assert!(sdb.is_pruned(&H256::from_low_u64_be(1), 1)); + assert!(sdb.is_pruned(&H256::from_low_u64_be(21), 2)); + assert!(sdb.is_pruned(&H256::from_low_u64_be(22), 2)); + assert!(db.data_eq(&make_db(&[21, 3, 922, 93, 94]))); + } + + #[test] + fn prune_window_2() { + let (db, sdb) = make_test_db(PruningMode::Constrained(Constraints { + max_blocks: Some(2), + max_mem: None, + })); + assert!(sdb.is_pruned(&H256::from_low_u64_be(0), 0)); + assert!(sdb.is_pruned(&H256::from_low_u64_be(1), 1)); + assert!(!sdb.is_pruned(&H256::from_low_u64_be(21), 2)); + assert!(sdb.is_pruned(&H256::from_low_u64_be(22), 2)); + assert!(db.data_eq(&make_db(&[1, 21, 3, 921, 922, 93, 94]))); + } } diff --git a/core/state-db/src/noncanonical.rs b/core/state-db/src/noncanonical.rs index da957335ba..a8228ae7f9 100644 --- a/core/state-db/src/noncanonical.rs +++ b/core/state-db/src/noncanonical.rs @@ -20,712 +20,916 @@ //! All pending changes are kept in memory until next call to `apply_pending` or //! `revert_pending` -use std::fmt; -use std::collections::{HashMap, VecDeque, hash_map::Entry}; -use super::{Error, DBValue, ChangeSet, CommitSet, MetaDb, Hash, to_meta_key}; -use crate::codec::{Encode, Decode}; +use super::{to_meta_key, ChangeSet, CommitSet, DBValue, Error, Hash, MetaDb}; +use crate::codec::{Decode, Encode}; use log::trace; +use std::collections::{hash_map::Entry, HashMap, VecDeque}; +use std::fmt; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; const LAST_CANONICAL: &[u8] = b"last_canonical"; /// See module documentation. pub struct NonCanonicalOverlay { - last_canonicalized: Option<(BlockHash, u64)>, - levels: VecDeque>>, - parents: HashMap, - pending_canonicalizations: Vec, - pending_insertions: Vec, - values: HashMap, //ref counted + last_canonicalized: Option<(BlockHash, u64)>, + levels: VecDeque>>, + parents: HashMap, + pending_canonicalizations: Vec, + pending_insertions: Vec, + values: HashMap, //ref counted } #[derive(Encode, Decode)] struct JournalRecord { - hash: BlockHash, - parent_hash: BlockHash, - inserted: Vec<(Key, DBValue)>, - deleted: Vec, + hash: BlockHash, + parent_hash: BlockHash, + inserted: Vec<(Key, DBValue)>, + deleted: Vec, } fn to_journal_key(block: u64, index: u64) -> Vec { - to_meta_key(NON_CANONICAL_JOURNAL, &(block, index)) + to_meta_key(NON_CANONICAL_JOURNAL, &(block, index)) } #[cfg_attr(test, derive(PartialEq, Debug))] struct BlockOverlay { - hash: BlockHash, - journal_key: Vec, - inserted: Vec, - deleted: Vec, + hash: BlockHash, + journal_key: Vec, + inserted: Vec, + deleted: Vec, } -fn insert_values(values: &mut HashMap, inserted: Vec<(Key, DBValue)>) { - for (k, v) in inserted { - debug_assert!(values.get(&k).map_or(true, |(_, value)| *value == v)); - let (ref mut counter, _) = values.entry(k).or_insert_with(|| (0, v)); - *counter += 1; - } +fn insert_values( + values: &mut HashMap, + inserted: Vec<(Key, DBValue)>, +) { + for (k, v) in inserted { + debug_assert!(values.get(&k).map_or(true, |(_, value)| *value == v)); + let (ref mut counter, _) = values.entry(k).or_insert_with(|| (0, v)); + *counter += 1; + } } fn discard_values(values: &mut HashMap, inserted: Vec) { - for k in inserted { - match values.entry(k) { - Entry::Occupied(mut e) => { - let (ref mut counter, _) = e.get_mut(); - *counter -= 1; - if *counter == 0 { - e.remove(); - } - }, - Entry::Vacant(_) => { - debug_assert!(false, "Trying to discard missing value"); - } - } - } + for k in inserted { + match values.entry(k) { + Entry::Occupied(mut e) => { + let (ref mut counter, _) = e.get_mut(); + *counter -= 1; + if *counter == 0 { + e.remove(); + } + } + Entry::Vacant(_) => { + debug_assert!(false, "Trying to discard missing value"); + } + } + } } fn discard_descendants( - levels: &mut VecDeque>>, - mut values: &mut HashMap, - index: usize, - parents: &mut HashMap, - hash: &BlockHash, - ) { - let mut discarded = Vec::new(); - if let Some(level) = levels.get_mut(index) { - *level = level.drain(..).filter_map(|overlay| { - let parent = parents.get(&overlay.hash).expect("there is a parent entry for each entry in levels; qed").clone(); - if parent == *hash { - parents.remove(&overlay.hash); - discarded.push(overlay.hash); - discard_values(&mut values, overlay.inserted); - None - } else { - Some(overlay) - } - }).collect(); - } - for hash in discarded { - discard_descendants(levels, values, index + 1, parents, &hash); - } + levels: &mut VecDeque>>, + mut values: &mut HashMap, + index: usize, + parents: &mut HashMap, + hash: &BlockHash, +) { + let mut discarded = Vec::new(); + if let Some(level) = levels.get_mut(index) { + *level = level + .drain(..) + .filter_map(|overlay| { + let parent = parents + .get(&overlay.hash) + .expect("there is a parent entry for each entry in levels; qed") + .clone(); + if parent == *hash { + parents.remove(&overlay.hash); + discarded.push(overlay.hash); + discard_values(&mut values, overlay.inserted); + None + } else { + Some(overlay) + } + }) + .collect(); + } + for hash in discarded { + discard_descendants(levels, values, index + 1, parents, &hash); + } } impl NonCanonicalOverlay { - /// Creates a new instance. Does not expect any metadata to be present in the DB. - pub fn new(db: &D) -> Result, Error> { - let last_canonicalized = db.get_meta(&to_meta_key(LAST_CANONICAL, &())) - .map_err(|e| Error::Db(e))?; - let last_canonicalized = match last_canonicalized { - Some(buffer) => Some(<(BlockHash, u64)>::decode(&mut buffer.as_slice()).ok_or(Error::Decoding)?), - None => None, - }; - let mut levels = VecDeque::new(); - let mut parents = HashMap::new(); - let mut values = HashMap::new(); - if let Some((ref hash, mut block)) = last_canonicalized { - // read the journal - trace!(target: "state-db", "Reading uncanonicalized journal. Last canonicalized #{} ({:?})", block, hash); - let mut total: u64 = 0; - block += 1; - loop { - let mut index: u64 = 0; - let mut level = Vec::new(); - loop { - let journal_key = to_journal_key(block, index); - match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { - Some(record) => { - let record: JournalRecord = Decode::decode(&mut record.as_slice()).ok_or(Error::Decoding)?; - let inserted = record.inserted.iter().map(|(k, _)| k.clone()).collect(); - let overlay = BlockOverlay { - hash: record.hash.clone(), - journal_key, - inserted: inserted, - deleted: record.deleted, - }; - insert_values(&mut values, record.inserted); - trace!(target: "state-db", "Uncanonicalized journal entry {}.{} ({} inserted, {} deleted)", block, index, overlay.inserted.len(), overlay.deleted.len()); - level.push(overlay); - parents.insert(record.hash, record.parent_hash); - index += 1; - total += 1; - }, - None => break, - } - } - if level.is_empty() { - break; - } - levels.push_back(level); - block += 1; - } - trace!(target: "state-db", "Finished reading uncanonicalized journal, {} entries", total); - } - Ok(NonCanonicalOverlay { - last_canonicalized, - levels, - parents, - pending_canonicalizations: Default::default(), - pending_insertions: Default::default(), - values: values, - }) - } - - /// Insert a new block into the overlay. If inserted on the second level or lover expects parent to be present in the window. - pub fn insert(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet) -> Result, Error> { - let mut commit = CommitSet::default(); - let front_block_number = self.front_block_number(); - if self.levels.is_empty() && self.last_canonicalized.is_none() && number > 0 { - // assume that parent was canonicalized - let last_canonicalized = (parent_hash.clone(), number - 1); - commit.meta.inserted.push((to_meta_key(LAST_CANONICAL, &()), last_canonicalized.encode())); - self.last_canonicalized = Some(last_canonicalized); - } else if self.last_canonicalized.is_some() { - if number < front_block_number || number >= front_block_number + self.levels.len() as u64 + 1 { - trace!(target: "state-db", "Failed to insert block {}, current is {} .. {})", - number, - front_block_number, - front_block_number + self.levels.len() as u64, - ); - return Err(Error::InvalidBlockNumber); - } - // check for valid parent if inserting on second level or higher - if number == front_block_number { - if !self.last_canonicalized.as_ref().map_or(false, |&(ref h, n)| h == parent_hash && n == number - 1) { - return Err(Error::InvalidParent); - } - } else if !self.parents.contains_key(&parent_hash) { - return Err(Error::InvalidParent); - } - } - let level = if self.levels.is_empty() || number == front_block_number + self.levels.len() as u64 { - self.levels.push_back(Vec::new()); - self.levels.back_mut().expect("can't be empty after insertion; qed") - } else { - self.levels.get_mut((number - front_block_number) as usize) + /// Creates a new instance. Does not expect any metadata to be present in the DB. + pub fn new(db: &D) -> Result, Error> { + let last_canonicalized = db + .get_meta(&to_meta_key(LAST_CANONICAL, &())) + .map_err(|e| Error::Db(e))?; + let last_canonicalized = match last_canonicalized { + Some(buffer) => { + Some(<(BlockHash, u64)>::decode(&mut buffer.as_slice()).ok_or(Error::Decoding)?) + } + None => None, + }; + let mut levels = VecDeque::new(); + let mut parents = HashMap::new(); + let mut values = HashMap::new(); + if let Some((ref hash, mut block)) = last_canonicalized { + // read the journal + trace!(target: "state-db", "Reading uncanonicalized journal. Last canonicalized #{} ({:?})", block, hash); + let mut total: u64 = 0; + block += 1; + loop { + let mut index: u64 = 0; + let mut level = Vec::new(); + loop { + let journal_key = to_journal_key(block, index); + match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { + Some(record) => { + let record: JournalRecord = + Decode::decode(&mut record.as_slice()).ok_or(Error::Decoding)?; + let inserted = record.inserted.iter().map(|(k, _)| k.clone()).collect(); + let overlay = BlockOverlay { + hash: record.hash.clone(), + journal_key, + inserted: inserted, + deleted: record.deleted, + }; + insert_values(&mut values, record.inserted); + trace!(target: "state-db", "Uncanonicalized journal entry {}.{} ({} inserted, {} deleted)", block, index, overlay.inserted.len(), overlay.deleted.len()); + level.push(overlay); + parents.insert(record.hash, record.parent_hash); + index += 1; + total += 1; + } + None => break, + } + } + if level.is_empty() { + break; + } + levels.push_back(level); + block += 1; + } + trace!(target: "state-db", "Finished reading uncanonicalized journal, {} entries", total); + } + Ok(NonCanonicalOverlay { + last_canonicalized, + levels, + parents, + pending_canonicalizations: Default::default(), + pending_insertions: Default::default(), + values: values, + }) + } + + /// Insert a new block into the overlay. If inserted on the second level or lover expects parent to be present in the window. + pub fn insert( + &mut self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + changeset: ChangeSet, + ) -> Result, Error> { + let mut commit = CommitSet::default(); + let front_block_number = self.front_block_number(); + if self.levels.is_empty() && self.last_canonicalized.is_none() && number > 0 { + // assume that parent was canonicalized + let last_canonicalized = (parent_hash.clone(), number - 1); + commit.meta.inserted.push(( + to_meta_key(LAST_CANONICAL, &()), + last_canonicalized.encode(), + )); + self.last_canonicalized = Some(last_canonicalized); + } else if self.last_canonicalized.is_some() { + if number < front_block_number + || number >= front_block_number + self.levels.len() as u64 + 1 + { + trace!(target: "state-db", "Failed to insert block {}, current is {} .. {})", + number, + front_block_number, + front_block_number + self.levels.len() as u64, + ); + return Err(Error::InvalidBlockNumber); + } + // check for valid parent if inserting on second level or higher + if number == front_block_number { + if !self + .last_canonicalized + .as_ref() + .map_or(false, |&(ref h, n)| h == parent_hash && n == number - 1) + { + return Err(Error::InvalidParent); + } + } else if !self.parents.contains_key(&parent_hash) { + return Err(Error::InvalidParent); + } + } + let level = if self.levels.is_empty() + || number == front_block_number + self.levels.len() as u64 + { + self.levels.push_back(Vec::new()); + self.levels + .back_mut() + .expect("can't be empty after insertion; qed") + } else { + self.levels.get_mut((number - front_block_number) as usize) .expect("number is [front_block_number .. front_block_number + levels.len()) is asserted in precondition; qed") - }; - - let index = level.len() as u64; - let journal_key = to_journal_key(number, index); - - let inserted = changeset.inserted.iter().map(|(k, _)| k.clone()).collect(); - let overlay = BlockOverlay { - hash: hash.clone(), - journal_key: journal_key.clone(), - inserted: inserted, - deleted: changeset.deleted.clone(), - }; - level.push(overlay); - self.parents.insert(hash.clone(), parent_hash.clone()); - let journal_record = JournalRecord { - hash: hash.clone(), - parent_hash: parent_hash.clone(), - inserted: changeset.inserted, - deleted: changeset.deleted, - }; - commit.meta.inserted.push((journal_key, journal_record.encode())); - trace!(target: "state-db", "Inserted uncanonicalized changeset {}.{} ({} inserted, {} deleted)", number, index, journal_record.inserted.len(), journal_record.deleted.len()); - insert_values(&mut self.values, journal_record.inserted); - self.pending_insertions.push(hash.clone()); - Ok(commit) - } - - fn discard_journals(&self, level_index: usize, discarded_journals: &mut Vec>, hash: &BlockHash) { - if let Some(level) = self.levels.get(level_index) { - level.iter().for_each(|overlay| { - let parent = self.parents.get(&overlay.hash).expect("there is a parent entry for each entry in levels; qed").clone(); - if parent == *hash { - discarded_journals.push(overlay.journal_key.clone()); - self.discard_journals(level_index + 1, discarded_journals, &overlay.hash); - } - }); - } - } - - fn front_block_number(&self) -> u64 { - self.last_canonicalized.as_ref().map(|&(_, n)| n + 1).unwrap_or(0) - } - - pub fn last_canonicalized_block_number(&self) -> Option { - match self.last_canonicalized.as_ref().map(|&(_, n)| n) { - Some(n) => Some(n + self.pending_canonicalizations.len() as u64), - None if !self.pending_canonicalizations.is_empty() => Some(self.pending_canonicalizations.len() as u64), - _ => None, - } - } - - pub fn last_canonicalized_hash(&self) -> Option { - self.last_canonicalized.as_ref().map(|&(ref h, _)| h.clone()) - } - - pub fn top_level(&self) -> Vec<(BlockHash, u64)> { - let start = self.last_canonicalized_block_number().unwrap_or(0); - self.levels - .get(self.pending_canonicalizations.len()) - .map(|level| level.iter().map(|r| (r.hash.clone(), start)).collect()) - .unwrap_or_default() - } - - /// Select a top-level root and canonicalized it. Discards all sibling subtrees and the root. - /// Returns a set of changes that need to be added to the DB. - pub fn canonicalize(&mut self, hash: &BlockHash) -> Result, Error> { - trace!(target: "state-db", "Canonicalizing {:?}", hash); - let level = self.levels.get(self.pending_canonicalizations.len()).ok_or_else(|| Error::InvalidBlock)?; - let index = level - .iter() - .position(|overlay| overlay.hash == *hash) - .ok_or_else(|| Error::InvalidBlock)?; - - let mut commit = CommitSet::default(); - let mut discarded_journals = Vec::new(); - for (i, overlay) in level.into_iter().enumerate() { - if i == index { - // that's the one we need to canonicalize - commit.data.inserted = overlay.inserted.iter() - .map(|k| (k.clone(), self.values.get(k).expect("For each key in verlays there's a value in values").1.clone())) - .collect(); - commit.data.deleted = overlay.deleted.clone(); - } else { - self.discard_journals(self.pending_canonicalizations.len() + 1, &mut discarded_journals, &overlay.hash); - } - discarded_journals.push(overlay.journal_key.clone()); - } - commit.meta.deleted.append(&mut discarded_journals); - let canonicalized = (hash.clone(), self.front_block_number() + self.pending_canonicalizations.len() as u64); - commit.meta.inserted.push((to_meta_key(LAST_CANONICAL, &()), canonicalized.encode())); - trace!(target: "state-db", "Discarding {} records", commit.meta.deleted.len()); - self.pending_canonicalizations.push(hash.clone()); - Ok(commit) - } - - fn apply_canonicalizations(&mut self) { - let last = self.pending_canonicalizations.last().cloned(); - let count = self.pending_canonicalizations.len() as u64; - for hash in self.pending_canonicalizations.drain(..) { - trace!(target: "state-db", "Post canonicalizing {:?}", hash); - let level = self.levels.pop_front().expect("Hash validity is checked in `canonicalize`"); - let index = level - .iter() - .position(|overlay| overlay.hash == hash) - .expect("Hash validity is checked in `canonicalize`"); - - // discard unfinalized overlays and values - for (i, overlay) in level.into_iter().enumerate() { - self.parents.remove(&overlay.hash); - if i != index { - discard_descendants(&mut self.levels, &mut self.values, 0, &mut self.parents, &overlay.hash); - } - discard_values(&mut self.values, overlay.inserted); - } - } - if let Some(hash) = last { - let last_canonicalized = (hash, self.last_canonicalized.as_ref().map(|(_, n)| n + count).unwrap_or(count - 1)); - self.last_canonicalized = Some(last_canonicalized); - } - } - - /// Get a value from the node overlay. This searches in every existing changeset. - pub fn get(&self, key: &Key) -> Option { - if let Some((_, value)) = self.values.get(&key) { - return Some(value.clone()); - } - None - } - - /// Check if the block is in the canonicalization queue. - pub fn have_block(&self, hash: &BlockHash) -> bool { - (self.parents.contains_key(hash) || self.pending_insertions.contains(hash)) - && !self.pending_canonicalizations.contains(hash) - } - - /// Revert a single level. Returns commit set that deletes the journal or `None` if not possible. - pub fn revert_one(&mut self) -> Option> { - self.levels.pop_back().map(|level| { - let mut commit = CommitSet::default(); - for overlay in level.into_iter() { - commit.meta.deleted.push(overlay.journal_key); - self.parents.remove(&overlay.hash); - discard_values(&mut self.values, overlay.inserted); - } - commit - }) - } - - fn revert_insertions(&mut self) { - self.pending_insertions.reverse(); - for hash in self.pending_insertions.drain(..) { - self.parents.remove(&hash); - // find a level. When iterating insertions backwards the hash is always last in the level. - let level_index = - self.levels.iter().position(|level| - level.last().expect("Hash is added in `insert` in reverse order").hash == hash) - .expect("Hash is added in insert"); - - let overlay = self.levels[level_index].pop().expect("Empty levels are not allowed in self.levels"); - discard_values(&mut self.values, overlay.inserted); - if self.levels[level_index].is_empty() { - debug_assert_eq!(level_index, self.levels.len() - 1); - self.levels.pop_back(); - } - } - } - - /// Apply all pending changes - pub fn apply_pending(&mut self) { - self.apply_canonicalizations(); - self.pending_insertions.clear(); - } - - /// Revert all pending changes - pub fn revert_pending(&mut self) { - self.pending_canonicalizations.clear(); - self.revert_insertions(); - } + }; + + let index = level.len() as u64; + let journal_key = to_journal_key(number, index); + + let inserted = changeset.inserted.iter().map(|(k, _)| k.clone()).collect(); + let overlay = BlockOverlay { + hash: hash.clone(), + journal_key: journal_key.clone(), + inserted: inserted, + deleted: changeset.deleted.clone(), + }; + level.push(overlay); + self.parents.insert(hash.clone(), parent_hash.clone()); + let journal_record = JournalRecord { + hash: hash.clone(), + parent_hash: parent_hash.clone(), + inserted: changeset.inserted, + deleted: changeset.deleted, + }; + commit + .meta + .inserted + .push((journal_key, journal_record.encode())); + trace!(target: "state-db", "Inserted uncanonicalized changeset {}.{} ({} inserted, {} deleted)", number, index, journal_record.inserted.len(), journal_record.deleted.len()); + insert_values(&mut self.values, journal_record.inserted); + self.pending_insertions.push(hash.clone()); + Ok(commit) + } + + fn discard_journals( + &self, + level_index: usize, + discarded_journals: &mut Vec>, + hash: &BlockHash, + ) { + if let Some(level) = self.levels.get(level_index) { + level.iter().for_each(|overlay| { + let parent = self + .parents + .get(&overlay.hash) + .expect("there is a parent entry for each entry in levels; qed") + .clone(); + if parent == *hash { + discarded_journals.push(overlay.journal_key.clone()); + self.discard_journals(level_index + 1, discarded_journals, &overlay.hash); + } + }); + } + } + + fn front_block_number(&self) -> u64 { + self.last_canonicalized + .as_ref() + .map(|&(_, n)| n + 1) + .unwrap_or(0) + } + + pub fn last_canonicalized_block_number(&self) -> Option { + match self.last_canonicalized.as_ref().map(|&(_, n)| n) { + Some(n) => Some(n + self.pending_canonicalizations.len() as u64), + None if !self.pending_canonicalizations.is_empty() => { + Some(self.pending_canonicalizations.len() as u64) + } + _ => None, + } + } + + pub fn last_canonicalized_hash(&self) -> Option { + self.last_canonicalized + .as_ref() + .map(|&(ref h, _)| h.clone()) + } + + pub fn top_level(&self) -> Vec<(BlockHash, u64)> { + let start = self.last_canonicalized_block_number().unwrap_or(0); + self.levels + .get(self.pending_canonicalizations.len()) + .map(|level| level.iter().map(|r| (r.hash.clone(), start)).collect()) + .unwrap_or_default() + } + + /// Select a top-level root and canonicalized it. Discards all sibling subtrees and the root. + /// Returns a set of changes that need to be added to the DB. + pub fn canonicalize( + &mut self, + hash: &BlockHash, + ) -> Result, Error> { + trace!(target: "state-db", "Canonicalizing {:?}", hash); + let level = self + .levels + .get(self.pending_canonicalizations.len()) + .ok_or_else(|| Error::InvalidBlock)?; + let index = level + .iter() + .position(|overlay| overlay.hash == *hash) + .ok_or_else(|| Error::InvalidBlock)?; + + let mut commit = CommitSet::default(); + let mut discarded_journals = Vec::new(); + for (i, overlay) in level.into_iter().enumerate() { + if i == index { + // that's the one we need to canonicalize + commit.data.inserted = overlay + .inserted + .iter() + .map(|k| { + ( + k.clone(), + self.values + .get(k) + .expect("For each key in verlays there's a value in values") + .1 + .clone(), + ) + }) + .collect(); + commit.data.deleted = overlay.deleted.clone(); + } else { + self.discard_journals( + self.pending_canonicalizations.len() + 1, + &mut discarded_journals, + &overlay.hash, + ); + } + discarded_journals.push(overlay.journal_key.clone()); + } + commit.meta.deleted.append(&mut discarded_journals); + let canonicalized = ( + hash.clone(), + self.front_block_number() + self.pending_canonicalizations.len() as u64, + ); + commit + .meta + .inserted + .push((to_meta_key(LAST_CANONICAL, &()), canonicalized.encode())); + trace!(target: "state-db", "Discarding {} records", commit.meta.deleted.len()); + self.pending_canonicalizations.push(hash.clone()); + Ok(commit) + } + + fn apply_canonicalizations(&mut self) { + let last = self.pending_canonicalizations.last().cloned(); + let count = self.pending_canonicalizations.len() as u64; + for hash in self.pending_canonicalizations.drain(..) { + trace!(target: "state-db", "Post canonicalizing {:?}", hash); + let level = self + .levels + .pop_front() + .expect("Hash validity is checked in `canonicalize`"); + let index = level + .iter() + .position(|overlay| overlay.hash == hash) + .expect("Hash validity is checked in `canonicalize`"); + + // discard unfinalized overlays and values + for (i, overlay) in level.into_iter().enumerate() { + self.parents.remove(&overlay.hash); + if i != index { + discard_descendants( + &mut self.levels, + &mut self.values, + 0, + &mut self.parents, + &overlay.hash, + ); + } + discard_values(&mut self.values, overlay.inserted); + } + } + if let Some(hash) = last { + let last_canonicalized = ( + hash, + self.last_canonicalized + .as_ref() + .map(|(_, n)| n + count) + .unwrap_or(count - 1), + ); + self.last_canonicalized = Some(last_canonicalized); + } + } + + /// Get a value from the node overlay. This searches in every existing changeset. + pub fn get(&self, key: &Key) -> Option { + if let Some((_, value)) = self.values.get(&key) { + return Some(value.clone()); + } + None + } + + /// Check if the block is in the canonicalization queue. + pub fn have_block(&self, hash: &BlockHash) -> bool { + (self.parents.contains_key(hash) || self.pending_insertions.contains(hash)) + && !self.pending_canonicalizations.contains(hash) + } + + /// Revert a single level. Returns commit set that deletes the journal or `None` if not possible. + pub fn revert_one(&mut self) -> Option> { + self.levels.pop_back().map(|level| { + let mut commit = CommitSet::default(); + for overlay in level.into_iter() { + commit.meta.deleted.push(overlay.journal_key); + self.parents.remove(&overlay.hash); + discard_values(&mut self.values, overlay.inserted); + } + commit + }) + } + + fn revert_insertions(&mut self) { + self.pending_insertions.reverse(); + for hash in self.pending_insertions.drain(..) { + self.parents.remove(&hash); + // find a level. When iterating insertions backwards the hash is always last in the level. + let level_index = self + .levels + .iter() + .position(|level| { + level + .last() + .expect("Hash is added in `insert` in reverse order") + .hash + == hash + }) + .expect("Hash is added in insert"); + + let overlay = self.levels[level_index] + .pop() + .expect("Empty levels are not allowed in self.levels"); + discard_values(&mut self.values, overlay.inserted); + if self.levels[level_index].is_empty() { + debug_assert_eq!(level_index, self.levels.len() - 1); + self.levels.pop_back(); + } + } + } + + /// Apply all pending changes + pub fn apply_pending(&mut self) { + self.apply_canonicalizations(); + self.pending_insertions.clear(); + } + + /// Revert all pending changes + pub fn revert_pending(&mut self) { + self.pending_canonicalizations.clear(); + self.revert_insertions(); + } } #[cfg(test)] mod tests { - use std::io; - use primitives::H256; - use super::{NonCanonicalOverlay, to_journal_key}; - use crate::ChangeSet; - use crate::test::{make_db, make_changeset}; - - fn contains(overlay: &NonCanonicalOverlay, key: u64) -> bool { - overlay.get(&H256::from_low_u64_be(key)) == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) - } - - #[test] - fn created_from_empty_db() { - let db = make_db(&[]); - let overlay: NonCanonicalOverlay = NonCanonicalOverlay::new(&db).unwrap(); - assert_eq!(overlay.last_canonicalized, None); - assert!(overlay.levels.is_empty()); - assert!(overlay.parents.is_empty()); - } - - #[test] - #[should_panic] - fn canonicalize_empty_panics() { - let db = make_db(&[]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.canonicalize::(&H256::default()).unwrap(); - } - - #[test] - #[should_panic] - fn insert_ahead_panics() { - let db = make_db(&[]); - let h1 = H256::random(); - let h2 = H256::random(); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 2, &H256::default(), ChangeSet::default()).unwrap(); - overlay.insert::(&h2, 1, &h1, ChangeSet::default()).unwrap(); - } - - #[test] - #[should_panic] - fn insert_behind_panics() { - let h1 = H256::random(); - let h2 = H256::random(); - let db = make_db(&[]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); - overlay.insert::(&h2, 3, &h1, ChangeSet::default()).unwrap(); - } - - #[test] - #[should_panic] - fn insert_unknown_parent_panics() { - let db = make_db(&[]); - let h1 = H256::random(); - let h2 = H256::random(); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); - overlay.insert::(&h2, 2, &H256::default(), ChangeSet::default()).unwrap(); - } - - #[test] - #[should_panic] - fn canonicalize_unknown_panics() { - let h1 = H256::random(); - let h2 = H256::random(); - let db = make_db(&[]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); - overlay.canonicalize::(&h2).unwrap(); - } - - #[test] - fn insert_canonicalize_one() { - let h1 = H256::random(); - let mut db = make_db(&[1, 2]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset = make_changeset(&[3, 4], &[2]); - let insertion = overlay.insert::(&h1, 1, &H256::default(), changeset.clone()).unwrap(); - assert_eq!(insertion.data.inserted.len(), 0); - assert_eq!(insertion.data.deleted.len(), 0); - assert_eq!(insertion.meta.inserted.len(), 2); - assert_eq!(insertion.meta.deleted.len(), 0); - db.commit(&insertion); - let finalization = overlay.canonicalize::(&h1).unwrap(); - assert_eq!(finalization.data.inserted.len(), changeset.inserted.len()); - assert_eq!(finalization.data.deleted.len(), changeset.deleted.len()); - assert_eq!(finalization.meta.inserted.len(), 1); - assert_eq!(finalization.meta.deleted.len(), 1); - db.commit(&finalization); - assert!(db.data_eq(&make_db(&[1, 3, 4]))); - } - - #[test] - fn restore_from_journal() { - let h1 = H256::random(); - let h2 = H256::random(); - let mut db = make_db(&[1, 2]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])).unwrap()); - db.commit(&overlay.insert::(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); - assert_eq!(db.meta.len(), 3); - - let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); - assert_eq!(overlay.levels, overlay2.levels); - assert_eq!(overlay.parents, overlay2.parents); - assert_eq!(overlay.last_canonicalized, overlay2.last_canonicalized); - } - - #[test] - fn restore_from_journal_after_canonicalize() { - let h1 = H256::random(); - let h2 = H256::random(); - let mut db = make_db(&[1, 2]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])).unwrap()); - db.commit(&overlay.insert::(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); - db.commit(&overlay.canonicalize::(&h1).unwrap()); - overlay.apply_pending(); - assert_eq!(overlay.levels.len(), 1); - - let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); - assert_eq!(overlay.levels, overlay2.levels); - assert_eq!(overlay.parents, overlay2.parents); - assert_eq!(overlay.last_canonicalized, overlay2.last_canonicalized); - } - - #[test] - fn insert_canonicalize_two() { - let h1 = H256::random(); - let h2 = H256::random(); - let mut db = make_db(&[1, 2, 3, 4]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset1 = make_changeset(&[5, 6], &[2]); - let changeset2 = make_changeset(&[7, 8], &[5, 3]); - db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap()); - assert!(contains(&overlay, 5)); - db.commit(&overlay.insert::(&h2, 2, &h1, changeset2).unwrap()); - assert!(contains(&overlay, 7)); - assert!(contains(&overlay, 5)); - assert_eq!(overlay.levels.len(), 2); - assert_eq!(overlay.parents.len(), 2); - db.commit(&overlay.canonicalize::(&h1).unwrap()); - assert!(contains(&overlay, 5)); - assert_eq!(overlay.levels.len(), 2); - assert_eq!(overlay.parents.len(), 2); - overlay.apply_pending(); - assert_eq!(overlay.levels.len(), 1); - assert_eq!(overlay.parents.len(), 1); - assert!(!contains(&overlay, 5)); - assert!(contains(&overlay, 7)); - db.commit(&overlay.canonicalize::(&h2).unwrap()); - overlay.apply_pending(); - assert_eq!(overlay.levels.len(), 0); - assert_eq!(overlay.parents.len(), 0); - assert!(db.data_eq(&make_db(&[1, 4, 6, 7, 8]))); - } - - #[test] - fn insert_same_key() { - let mut db = make_db(&[]); - let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_changeset(&[1], &[])); - - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); - db.commit(&overlay.insert::(&h_2, 1, &H256::default(), c_2).unwrap()); - assert!(contains(&overlay, 1)); - db.commit(&overlay.canonicalize::(&h_1).unwrap()); - assert!(contains(&overlay, 1)); - overlay.apply_pending(); - assert!(!contains(&overlay, 1)); - } - - #[test] - fn insert_with_pending_canonicalization() { - let h1 = H256::random(); - let h2 = H256::random(); - let h3 = H256::random(); - let mut db = make_db(&[]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset = make_changeset(&[], &[]); - db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset.clone()).unwrap()); - db.commit(&overlay.insert::(&h2, 2, &h1, changeset.clone()).unwrap()); - overlay.apply_pending(); - db.commit(&overlay.canonicalize::(&h1).unwrap()); - db.commit(&overlay.canonicalize::(&h2).unwrap()); - db.commit(&overlay.insert::(&h3, 3, &h2, changeset.clone()).unwrap()); - overlay.apply_pending(); - assert_eq!(overlay.levels.len(), 1); - } - - #[test] - fn complex_tree() { - use crate::MetaDb; - let mut db = make_db(&[]); - - // - 1 - 1_1 - 1_1_1 - // \ 1_2 - 1_2_1 - // \ 1_2_2 - // \ 1_2_3 - // - // - 2 - 2_1 - 2_1_1 - // \ 2_2 - // - // 1_2_2 is the winner - - let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_changeset(&[2], &[])); - - let (h_1_1, c_1_1) = (H256::random(), make_changeset(&[11], &[])); - let (h_1_2, c_1_2) = (H256::random(), make_changeset(&[12], &[])); - let (h_2_1, c_2_1) = (H256::random(), make_changeset(&[21], &[])); - let (h_2_2, c_2_2) = (H256::random(), make_changeset(&[22], &[])); - - let (h_1_1_1, c_1_1_1) = (H256::random(), make_changeset(&[111], &[])); - let (h_1_2_1, c_1_2_1) = (H256::random(), make_changeset(&[121], &[])); - let (h_1_2_2, c_1_2_2) = (H256::random(), make_changeset(&[122], &[])); - let (h_1_2_3, c_1_2_3) = (H256::random(), make_changeset(&[123], &[])); - let (h_2_1_1, c_2_1_1) = (H256::random(), make_changeset(&[211], &[])); - - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); - - db.commit(&overlay.insert::(&h_1_1, 2, &h_1, c_1_1).unwrap()); - db.commit(&overlay.insert::(&h_1_2, 2, &h_1, c_1_2).unwrap()); - - db.commit(&overlay.insert::(&h_2, 1, &H256::default(), c_2).unwrap()); - - db.commit(&overlay.insert::(&h_2_1, 2, &h_2, c_2_1).unwrap()); - db.commit(&overlay.insert::(&h_2_2, 2, &h_2, c_2_2).unwrap()); - - db.commit(&overlay.insert::(&h_1_1_1, 3, &h_1_1, c_1_1_1).unwrap()); - db.commit(&overlay.insert::(&h_1_2_1, 3, &h_1_2, c_1_2_1).unwrap()); - db.commit(&overlay.insert::(&h_1_2_2, 3, &h_1_2, c_1_2_2).unwrap()); - db.commit(&overlay.insert::(&h_1_2_3, 3, &h_1_2, c_1_2_3).unwrap()); - db.commit(&overlay.insert::(&h_2_1_1, 3, &h_2_1, c_2_1_1).unwrap()); - - assert!(contains(&overlay, 2)); - assert!(contains(&overlay, 11)); - assert!(contains(&overlay, 21)); - assert!(contains(&overlay, 111)); - assert!(contains(&overlay, 122)); - assert!(contains(&overlay, 211)); - assert_eq!(overlay.levels.len(), 3); - assert_eq!(overlay.parents.len(), 11); - assert_eq!(overlay.last_canonicalized, Some((H256::default(), 0))); - - // check if restoration from journal results in the same tree - let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); - assert_eq!(overlay.levels, overlay2.levels); - assert_eq!(overlay.parents, overlay2.parents); - assert_eq!(overlay.last_canonicalized, overlay2.last_canonicalized); - - // canonicalize 1. 2 and all its children should be discarded - db.commit(&overlay.canonicalize::(&h_1).unwrap()); - overlay.apply_pending(); - assert_eq!(overlay.levels.len(), 2); - assert_eq!(overlay.parents.len(), 6); - assert!(!contains(&overlay, 1)); - assert!(!contains(&overlay, 2)); - assert!(!contains(&overlay, 21)); - assert!(!contains(&overlay, 22)); - assert!(!contains(&overlay, 211)); - assert!(contains(&overlay, 111)); - assert!(!contains(&overlay, 211)); - // check that journals are deleted - assert!(db.get_meta(&to_journal_key(1, 0)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key(1, 1)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key(2, 1)).unwrap().is_some()); - assert!(db.get_meta(&to_journal_key(2, 2)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key(2, 3)).unwrap().is_none()); - - // canonicalize 1_2. 1_1 and all its children should be discarded - db.commit(&overlay.canonicalize::(&h_1_2).unwrap()); - overlay.apply_pending(); - assert_eq!(overlay.levels.len(), 1); - assert_eq!(overlay.parents.len(), 3); - assert!(!contains(&overlay, 11)); - assert!(!contains(&overlay, 111)); - assert!(contains(&overlay, 121)); - assert!(contains(&overlay, 122)); - assert!(contains(&overlay, 123)); - assert!(overlay.have_block(&h_1_2_1)); - assert!(!overlay.have_block(&h_1_2)); - assert!(!overlay.have_block(&h_1_1)); - assert!(!overlay.have_block(&h_1_1_1)); - - // canonicalize 1_2_2 - db.commit(&overlay.canonicalize::(&h_1_2_2).unwrap()); - overlay.apply_pending(); - assert_eq!(overlay.levels.len(), 0); - assert_eq!(overlay.parents.len(), 0); - assert!(db.data_eq(&make_db(&[1, 12, 122]))); - assert_eq!(overlay.last_canonicalized, Some((h_1_2_2, 3))); - } - - #[test] - fn insert_revert() { - let h1 = H256::random(); - let h2 = H256::random(); - let mut db = make_db(&[1, 2, 3, 4]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - assert!(overlay.revert_one().is_none()); - let changeset1 = make_changeset(&[5, 6], &[2]); - let changeset2 = make_changeset(&[7, 8], &[5, 3]); - db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap()); - db.commit(&overlay.insert::(&h2, 2, &h1, changeset2).unwrap()); - assert!(contains(&overlay, 7)); - db.commit(&overlay.revert_one().unwrap()); - assert_eq!(overlay.parents.len(), 1); - assert!(contains(&overlay, 5)); - assert!(!contains(&overlay, 7)); - db.commit(&overlay.revert_one().unwrap()); - assert_eq!(overlay.levels.len(), 0); - assert_eq!(overlay.parents.len(), 0); - assert!(overlay.revert_one().is_none()); - } - - #[test] - fn revert_pending_insertion() { - let h1 = H256::random(); - let h2_1 = H256::random(); - let h2_2 = H256::random(); - let db = make_db(&[]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset1 = make_changeset(&[5, 6], &[2]); - let changeset2 = make_changeset(&[7, 8], &[5, 3]); - let changeset3 = make_changeset(&[9], &[]); - overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap(); - assert!(contains(&overlay, 5)); - overlay.insert::(&h2_1, 2, &h1, changeset2).unwrap(); - overlay.insert::(&h2_2, 2, &h1, changeset3).unwrap(); - assert!(contains(&overlay, 7)); - assert!(contains(&overlay, 5)); - assert!(contains(&overlay, 9)); - assert_eq!(overlay.levels.len(), 2); - assert_eq!(overlay.parents.len(), 3); - overlay.revert_pending(); - assert!(!contains(&overlay, 5)); - assert_eq!(overlay.levels.len(), 0); - assert_eq!(overlay.parents.len(), 0); - } + use super::{to_journal_key, NonCanonicalOverlay}; + use crate::test::{make_changeset, make_db}; + use crate::ChangeSet; + use primitives::H256; + use std::io; + + fn contains(overlay: &NonCanonicalOverlay, key: u64) -> bool { + overlay.get(&H256::from_low_u64_be(key)) + == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) + } + + #[test] + fn created_from_empty_db() { + let db = make_db(&[]); + let overlay: NonCanonicalOverlay = NonCanonicalOverlay::new(&db).unwrap(); + assert_eq!(overlay.last_canonicalized, None); + assert!(overlay.levels.is_empty()); + assert!(overlay.parents.is_empty()); + } + + #[test] + #[should_panic] + fn canonicalize_empty_panics() { + let db = make_db(&[]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + overlay.canonicalize::(&H256::default()).unwrap(); + } + + #[test] + #[should_panic] + fn insert_ahead_panics() { + let db = make_db(&[]); + let h1 = H256::random(); + let h2 = H256::random(); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + overlay + .insert::(&h1, 2, &H256::default(), ChangeSet::default()) + .unwrap(); + overlay + .insert::(&h2, 1, &h1, ChangeSet::default()) + .unwrap(); + } + + #[test] + #[should_panic] + fn insert_behind_panics() { + let h1 = H256::random(); + let h2 = H256::random(); + let db = make_db(&[]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + overlay + .insert::(&h1, 1, &H256::default(), ChangeSet::default()) + .unwrap(); + overlay + .insert::(&h2, 3, &h1, ChangeSet::default()) + .unwrap(); + } + + #[test] + #[should_panic] + fn insert_unknown_parent_panics() { + let db = make_db(&[]); + let h1 = H256::random(); + let h2 = H256::random(); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + overlay + .insert::(&h1, 1, &H256::default(), ChangeSet::default()) + .unwrap(); + overlay + .insert::(&h2, 2, &H256::default(), ChangeSet::default()) + .unwrap(); + } + + #[test] + #[should_panic] + fn canonicalize_unknown_panics() { + let h1 = H256::random(); + let h2 = H256::random(); + let db = make_db(&[]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + overlay + .insert::(&h1, 1, &H256::default(), ChangeSet::default()) + .unwrap(); + overlay.canonicalize::(&h2).unwrap(); + } + + #[test] + fn insert_canonicalize_one() { + let h1 = H256::random(); + let mut db = make_db(&[1, 2]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + let changeset = make_changeset(&[3, 4], &[2]); + let insertion = overlay + .insert::(&h1, 1, &H256::default(), changeset.clone()) + .unwrap(); + assert_eq!(insertion.data.inserted.len(), 0); + assert_eq!(insertion.data.deleted.len(), 0); + assert_eq!(insertion.meta.inserted.len(), 2); + assert_eq!(insertion.meta.deleted.len(), 0); + db.commit(&insertion); + let finalization = overlay.canonicalize::(&h1).unwrap(); + assert_eq!(finalization.data.inserted.len(), changeset.inserted.len()); + assert_eq!(finalization.data.deleted.len(), changeset.deleted.len()); + assert_eq!(finalization.meta.inserted.len(), 1); + assert_eq!(finalization.meta.deleted.len(), 1); + db.commit(&finalization); + assert!(db.data_eq(&make_db(&[1, 3, 4]))); + } + + #[test] + fn restore_from_journal() { + let h1 = H256::random(); + let h2 = H256::random(); + let mut db = make_db(&[1, 2]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + db.commit( + &overlay + .insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h2, 11, &h1, make_changeset(&[5], &[3])) + .unwrap(), + ); + assert_eq!(db.meta.len(), 3); + + let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); + assert_eq!(overlay.levels, overlay2.levels); + assert_eq!(overlay.parents, overlay2.parents); + assert_eq!(overlay.last_canonicalized, overlay2.last_canonicalized); + } + + #[test] + fn restore_from_journal_after_canonicalize() { + let h1 = H256::random(); + let h2 = H256::random(); + let mut db = make_db(&[1, 2]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + db.commit( + &overlay + .insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h2, 11, &h1, make_changeset(&[5], &[3])) + .unwrap(), + ); + db.commit(&overlay.canonicalize::(&h1).unwrap()); + overlay.apply_pending(); + assert_eq!(overlay.levels.len(), 1); + + let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); + assert_eq!(overlay.levels, overlay2.levels); + assert_eq!(overlay.parents, overlay2.parents); + assert_eq!(overlay.last_canonicalized, overlay2.last_canonicalized); + } + + #[test] + fn insert_canonicalize_two() { + let h1 = H256::random(); + let h2 = H256::random(); + let mut db = make_db(&[1, 2, 3, 4]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + let changeset1 = make_changeset(&[5, 6], &[2]); + let changeset2 = make_changeset(&[7, 8], &[5, 3]); + db.commit( + &overlay + .insert::(&h1, 1, &H256::default(), changeset1) + .unwrap(), + ); + assert!(contains(&overlay, 5)); + db.commit( + &overlay + .insert::(&h2, 2, &h1, changeset2) + .unwrap(), + ); + assert!(contains(&overlay, 7)); + assert!(contains(&overlay, 5)); + assert_eq!(overlay.levels.len(), 2); + assert_eq!(overlay.parents.len(), 2); + db.commit(&overlay.canonicalize::(&h1).unwrap()); + assert!(contains(&overlay, 5)); + assert_eq!(overlay.levels.len(), 2); + assert_eq!(overlay.parents.len(), 2); + overlay.apply_pending(); + assert_eq!(overlay.levels.len(), 1); + assert_eq!(overlay.parents.len(), 1); + assert!(!contains(&overlay, 5)); + assert!(contains(&overlay, 7)); + db.commit(&overlay.canonicalize::(&h2).unwrap()); + overlay.apply_pending(); + assert_eq!(overlay.levels.len(), 0); + assert_eq!(overlay.parents.len(), 0); + assert!(db.data_eq(&make_db(&[1, 4, 6, 7, 8]))); + } + + #[test] + fn insert_same_key() { + let mut db = make_db(&[]); + let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_changeset(&[1], &[])); + + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + db.commit( + &overlay + .insert::(&h_1, 1, &H256::default(), c_1) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h_2, 1, &H256::default(), c_2) + .unwrap(), + ); + assert!(contains(&overlay, 1)); + db.commit(&overlay.canonicalize::(&h_1).unwrap()); + assert!(contains(&overlay, 1)); + overlay.apply_pending(); + assert!(!contains(&overlay, 1)); + } + + #[test] + fn insert_with_pending_canonicalization() { + let h1 = H256::random(); + let h2 = H256::random(); + let h3 = H256::random(); + let mut db = make_db(&[]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + let changeset = make_changeset(&[], &[]); + db.commit( + &overlay + .insert::(&h1, 1, &H256::default(), changeset.clone()) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h2, 2, &h1, changeset.clone()) + .unwrap(), + ); + overlay.apply_pending(); + db.commit(&overlay.canonicalize::(&h1).unwrap()); + db.commit(&overlay.canonicalize::(&h2).unwrap()); + db.commit( + &overlay + .insert::(&h3, 3, &h2, changeset.clone()) + .unwrap(), + ); + overlay.apply_pending(); + assert_eq!(overlay.levels.len(), 1); + } + + #[test] + fn complex_tree() { + use crate::MetaDb; + let mut db = make_db(&[]); + + // - 1 - 1_1 - 1_1_1 + // \ 1_2 - 1_2_1 + // \ 1_2_2 + // \ 1_2_3 + // + // - 2 - 2_1 - 2_1_1 + // \ 2_2 + // + // 1_2_2 is the winner + + let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_changeset(&[2], &[])); + + let (h_1_1, c_1_1) = (H256::random(), make_changeset(&[11], &[])); + let (h_1_2, c_1_2) = (H256::random(), make_changeset(&[12], &[])); + let (h_2_1, c_2_1) = (H256::random(), make_changeset(&[21], &[])); + let (h_2_2, c_2_2) = (H256::random(), make_changeset(&[22], &[])); + + let (h_1_1_1, c_1_1_1) = (H256::random(), make_changeset(&[111], &[])); + let (h_1_2_1, c_1_2_1) = (H256::random(), make_changeset(&[121], &[])); + let (h_1_2_2, c_1_2_2) = (H256::random(), make_changeset(&[122], &[])); + let (h_1_2_3, c_1_2_3) = (H256::random(), make_changeset(&[123], &[])); + let (h_2_1_1, c_2_1_1) = (H256::random(), make_changeset(&[211], &[])); + + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + db.commit( + &overlay + .insert::(&h_1, 1, &H256::default(), c_1) + .unwrap(), + ); + + db.commit(&overlay.insert::(&h_1_1, 2, &h_1, c_1_1).unwrap()); + db.commit(&overlay.insert::(&h_1_2, 2, &h_1, c_1_2).unwrap()); + + db.commit( + &overlay + .insert::(&h_2, 1, &H256::default(), c_2) + .unwrap(), + ); + + db.commit(&overlay.insert::(&h_2_1, 2, &h_2, c_2_1).unwrap()); + db.commit(&overlay.insert::(&h_2_2, 2, &h_2, c_2_2).unwrap()); + + db.commit( + &overlay + .insert::(&h_1_1_1, 3, &h_1_1, c_1_1_1) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h_1_2_1, 3, &h_1_2, c_1_2_1) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h_1_2_2, 3, &h_1_2, c_1_2_2) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h_1_2_3, 3, &h_1_2, c_1_2_3) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h_2_1_1, 3, &h_2_1, c_2_1_1) + .unwrap(), + ); + + assert!(contains(&overlay, 2)); + assert!(contains(&overlay, 11)); + assert!(contains(&overlay, 21)); + assert!(contains(&overlay, 111)); + assert!(contains(&overlay, 122)); + assert!(contains(&overlay, 211)); + assert_eq!(overlay.levels.len(), 3); + assert_eq!(overlay.parents.len(), 11); + assert_eq!(overlay.last_canonicalized, Some((H256::default(), 0))); + + // check if restoration from journal results in the same tree + let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); + assert_eq!(overlay.levels, overlay2.levels); + assert_eq!(overlay.parents, overlay2.parents); + assert_eq!(overlay.last_canonicalized, overlay2.last_canonicalized); + + // canonicalize 1. 2 and all its children should be discarded + db.commit(&overlay.canonicalize::(&h_1).unwrap()); + overlay.apply_pending(); + assert_eq!(overlay.levels.len(), 2); + assert_eq!(overlay.parents.len(), 6); + assert!(!contains(&overlay, 1)); + assert!(!contains(&overlay, 2)); + assert!(!contains(&overlay, 21)); + assert!(!contains(&overlay, 22)); + assert!(!contains(&overlay, 211)); + assert!(contains(&overlay, 111)); + assert!(!contains(&overlay, 211)); + // check that journals are deleted + assert!(db.get_meta(&to_journal_key(1, 0)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key(1, 1)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key(2, 1)).unwrap().is_some()); + assert!(db.get_meta(&to_journal_key(2, 2)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key(2, 3)).unwrap().is_none()); + + // canonicalize 1_2. 1_1 and all its children should be discarded + db.commit(&overlay.canonicalize::(&h_1_2).unwrap()); + overlay.apply_pending(); + assert_eq!(overlay.levels.len(), 1); + assert_eq!(overlay.parents.len(), 3); + assert!(!contains(&overlay, 11)); + assert!(!contains(&overlay, 111)); + assert!(contains(&overlay, 121)); + assert!(contains(&overlay, 122)); + assert!(contains(&overlay, 123)); + assert!(overlay.have_block(&h_1_2_1)); + assert!(!overlay.have_block(&h_1_2)); + assert!(!overlay.have_block(&h_1_1)); + assert!(!overlay.have_block(&h_1_1_1)); + + // canonicalize 1_2_2 + db.commit(&overlay.canonicalize::(&h_1_2_2).unwrap()); + overlay.apply_pending(); + assert_eq!(overlay.levels.len(), 0); + assert_eq!(overlay.parents.len(), 0); + assert!(db.data_eq(&make_db(&[1, 12, 122]))); + assert_eq!(overlay.last_canonicalized, Some((h_1_2_2, 3))); + } + + #[test] + fn insert_revert() { + let h1 = H256::random(); + let h2 = H256::random(); + let mut db = make_db(&[1, 2, 3, 4]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + assert!(overlay.revert_one().is_none()); + let changeset1 = make_changeset(&[5, 6], &[2]); + let changeset2 = make_changeset(&[7, 8], &[5, 3]); + db.commit( + &overlay + .insert::(&h1, 1, &H256::default(), changeset1) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h2, 2, &h1, changeset2) + .unwrap(), + ); + assert!(contains(&overlay, 7)); + db.commit(&overlay.revert_one().unwrap()); + assert_eq!(overlay.parents.len(), 1); + assert!(contains(&overlay, 5)); + assert!(!contains(&overlay, 7)); + db.commit(&overlay.revert_one().unwrap()); + assert_eq!(overlay.levels.len(), 0); + assert_eq!(overlay.parents.len(), 0); + assert!(overlay.revert_one().is_none()); + } + + #[test] + fn revert_pending_insertion() { + let h1 = H256::random(); + let h2_1 = H256::random(); + let h2_2 = H256::random(); + let db = make_db(&[]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + let changeset1 = make_changeset(&[5, 6], &[2]); + let changeset2 = make_changeset(&[7, 8], &[5, 3]); + let changeset3 = make_changeset(&[9], &[]); + overlay + .insert::(&h1, 1, &H256::default(), changeset1) + .unwrap(); + assert!(contains(&overlay, 5)); + overlay + .insert::(&h2_1, 2, &h1, changeset2) + .unwrap(); + overlay + .insert::(&h2_2, 2, &h1, changeset3) + .unwrap(); + assert!(contains(&overlay, 7)); + assert!(contains(&overlay, 5)); + assert!(contains(&overlay, 9)); + assert_eq!(overlay.levels.len(), 2); + assert_eq!(overlay.parents.len(), 3); + overlay.revert_pending(); + assert!(!contains(&overlay, 5)); + assert_eq!(overlay.levels.len(), 0); + assert_eq!(overlay.parents.len(), 0); + } } - diff --git a/core/state-db/src/pruning.rs b/core/state-db/src/pruning.rs index 078745c7a2..dc327a42c2 100644 --- a/core/state-db/src/pruning.rs +++ b/core/state-db/src/pruning.rs @@ -22,358 +22,396 @@ //! the death list. //! The changes are journaled in the DB. -use std::collections::{HashMap, HashSet, VecDeque}; -use crate::codec::{Encode, Decode}; -use crate::{CommitSet, Error, MetaDb, to_meta_key, Hash}; +use crate::codec::{Decode, Encode}; +use crate::{to_meta_key, CommitSet, Error, Hash, MetaDb}; use log::{trace, warn}; +use std::collections::{HashMap, HashSet, VecDeque}; const LAST_PRUNED: &[u8] = b"last_pruned"; const PRUNING_JOURNAL: &[u8] = b"pruning_journal"; /// See module documentation. pub struct RefWindow { - /// A queue of keys that should be deleted for each block in the pruning window. - death_rows: VecDeque>, - /// An index that maps each key from `death_rows` to block number. - death_index: HashMap, - /// Block number that corresponts to the front of `death_rows` - pending_number: u64, - /// Number of call of `note_canonical` after - /// last call `apply_pending` or `revert_pending` - pending_canonicalizations: usize, - /// Number of calls of `prune_one` after - /// last call `apply_pending` or `revert_pending` - pending_prunings: usize, + /// A queue of keys that should be deleted for each block in the pruning window. + death_rows: VecDeque>, + /// An index that maps each key from `death_rows` to block number. + death_index: HashMap, + /// Block number that corresponts to the front of `death_rows` + pending_number: u64, + /// Number of call of `note_canonical` after + /// last call `apply_pending` or `revert_pending` + pending_canonicalizations: usize, + /// Number of calls of `prune_one` after + /// last call `apply_pending` or `revert_pending` + pending_prunings: usize, } #[derive(Debug, PartialEq, Eq)] struct DeathRow { - hash: BlockHash, - journal_key: Vec, - deleted: HashSet, + hash: BlockHash, + journal_key: Vec, + deleted: HashSet, } #[derive(Encode, Decode)] struct JournalRecord { - hash: BlockHash, - inserted: Vec, - deleted: Vec, + hash: BlockHash, + inserted: Vec, + deleted: Vec, } fn to_journal_key(block: u64) -> Vec { - to_meta_key(PRUNING_JOURNAL, &block) + to_meta_key(PRUNING_JOURNAL, &block) } impl RefWindow { - pub fn new(db: &D) -> Result, Error> { - let last_pruned = db.get_meta(&to_meta_key(LAST_PRUNED, &())) - .map_err(|e| Error::Db(e))?; - let pending_number: u64 = match last_pruned { - Some(buffer) => u64::decode(&mut buffer.as_slice()).ok_or(Error::Decoding)? + 1, - None => 0, - }; - let mut block = pending_number; - let mut pruning = RefWindow { - death_rows: Default::default(), - death_index: Default::default(), - pending_number: pending_number, - pending_canonicalizations: 0, - pending_prunings: 0, - }; - // read the journal - trace!(target: "state-db", "Reading pruning journal. Pending #{}", pending_number); - loop { - let journal_key = to_journal_key(block); - match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { - Some(record) => { - let record: JournalRecord = Decode::decode(&mut record.as_slice()).ok_or(Error::Decoding)?; - trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len()); - pruning.import(&record.hash, journal_key, record.inserted.into_iter(), record.deleted); - }, - None => break, - } - block += 1; - } - Ok(pruning) - } - - fn import>(&mut self, hash: &BlockHash, journal_key: Vec, inserted: I, deleted: Vec) { - // remove all re-inserted keys from death rows - for k in inserted { - if let Some(block) = self.death_index.remove(&k) { - self.death_rows[(block - self.pending_number) as usize].deleted.remove(&k); - } - } - - // add new keys - let imported_block = self.pending_number + self.death_rows.len() as u64; - for k in deleted.iter() { - self.death_index.insert(k.clone(), imported_block); - } - self.death_rows.push_back( - DeathRow { - hash: hash.clone(), - deleted: deleted.into_iter().collect(), - journal_key: journal_key, - } - ); - } - - pub fn window_size(&self) -> u64 { - (self.death_rows.len() - self.pending_prunings) as u64 - } - - pub fn next_hash(&self) -> Option { - self.death_rows.get(self.pending_prunings).map(|r| r.hash.clone()) - } - - pub fn mem_used(&self) -> usize { - 0 - } - - pub fn pending(&self) -> u64 { - self.pending_number + self.pending_prunings as u64 - } - - pub fn have_block(&self, hash: &BlockHash) -> bool { - self.death_rows.iter().skip(self.pending_prunings).any(|r| r.hash == *hash) - } - - /// Prune next block. Expects at least one block in the window. Adds changes to `commit`. - pub fn prune_one(&mut self, commit: &mut CommitSet) { - if let Some(pruned) = self.death_rows.get(self.pending_prunings) { - trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); - let index = self.pending_number + self.pending_prunings as u64; - commit.data.deleted.extend(pruned.deleted.iter().cloned()); - commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), index.encode())); - commit.meta.deleted.push(pruned.journal_key.clone()); - self.pending_prunings += 1; - } else { - warn!(target: "state-db", "Trying to prune when there's nothing to prune"); - } - } - - /// Add a change set to the window. Creates a journal record and pushes it to `commit` - pub fn note_canonical(&mut self, hash: &BlockHash, commit: &mut CommitSet) { - trace!(target: "state-db", "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), commit.data.deleted.len()); - let inserted = commit.data.inserted.iter().map(|(k, _)| k.clone()).collect(); - let deleted = ::std::mem::replace(&mut commit.data.deleted, Vec::new()); - let journal_record = JournalRecord { - hash: hash.clone(), - inserted, - deleted, - }; - let block = self.pending_number + self.death_rows.len() as u64; - let journal_key = to_journal_key(block); - commit.meta.inserted.push((journal_key.clone(), journal_record.encode())); - self.import(&journal_record.hash, journal_key, journal_record.inserted.into_iter(), journal_record.deleted); - self.pending_canonicalizations += 1; - } - - /// Apply all pending changes - pub fn apply_pending(&mut self) { - self.pending_canonicalizations = 0; - for _ in 0 .. self.pending_prunings { - let pruned = self.death_rows.pop_front().expect("pending_prunings is always < death_rows.len()"); - trace!(target: "state-db", "Applying pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); - for k in pruned.deleted.iter() { - self.death_index.remove(&k); - } - self.pending_number += 1; - } - self.pending_prunings = 0; - } - - /// Revert all pending changes - pub fn revert_pending(&mut self) { - // Revert pending deletions. - // Note that pending insertions might cause some existing deletions to be removed from `death_index` - // We don't bother to track and revert that for now. This means that a few nodes might end up no being - // deleted in case transaction fails and `revert_pending` is called. - self.death_rows.truncate(self.death_rows.len() - self.pending_canonicalizations); - let new_max_block = self.death_rows.len() as u64 + self.pending_number; - self.death_index.retain(|_, block| *block < new_max_block); - self.pending_canonicalizations = 0; - self.pending_prunings = 0; - } + pub fn new(db: &D) -> Result, Error> { + let last_pruned = db + .get_meta(&to_meta_key(LAST_PRUNED, &())) + .map_err(|e| Error::Db(e))?; + let pending_number: u64 = match last_pruned { + Some(buffer) => u64::decode(&mut buffer.as_slice()).ok_or(Error::Decoding)? + 1, + None => 0, + }; + let mut block = pending_number; + let mut pruning = RefWindow { + death_rows: Default::default(), + death_index: Default::default(), + pending_number: pending_number, + pending_canonicalizations: 0, + pending_prunings: 0, + }; + // read the journal + trace!(target: "state-db", "Reading pruning journal. Pending #{}", pending_number); + loop { + let journal_key = to_journal_key(block); + match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { + Some(record) => { + let record: JournalRecord = + Decode::decode(&mut record.as_slice()).ok_or(Error::Decoding)?; + trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len()); + pruning.import( + &record.hash, + journal_key, + record.inserted.into_iter(), + record.deleted, + ); + } + None => break, + } + block += 1; + } + Ok(pruning) + } + + fn import>( + &mut self, + hash: &BlockHash, + journal_key: Vec, + inserted: I, + deleted: Vec, + ) { + // remove all re-inserted keys from death rows + for k in inserted { + if let Some(block) = self.death_index.remove(&k) { + self.death_rows[(block - self.pending_number) as usize] + .deleted + .remove(&k); + } + } + + // add new keys + let imported_block = self.pending_number + self.death_rows.len() as u64; + for k in deleted.iter() { + self.death_index.insert(k.clone(), imported_block); + } + self.death_rows.push_back(DeathRow { + hash: hash.clone(), + deleted: deleted.into_iter().collect(), + journal_key: journal_key, + }); + } + + pub fn window_size(&self) -> u64 { + (self.death_rows.len() - self.pending_prunings) as u64 + } + + pub fn next_hash(&self) -> Option { + self.death_rows + .get(self.pending_prunings) + .map(|r| r.hash.clone()) + } + + pub fn mem_used(&self) -> usize { + 0 + } + + pub fn pending(&self) -> u64 { + self.pending_number + self.pending_prunings as u64 + } + + pub fn have_block(&self, hash: &BlockHash) -> bool { + self.death_rows + .iter() + .skip(self.pending_prunings) + .any(|r| r.hash == *hash) + } + + /// Prune next block. Expects at least one block in the window. Adds changes to `commit`. + pub fn prune_one(&mut self, commit: &mut CommitSet) { + if let Some(pruned) = self.death_rows.get(self.pending_prunings) { + trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); + let index = self.pending_number + self.pending_prunings as u64; + commit.data.deleted.extend(pruned.deleted.iter().cloned()); + commit + .meta + .inserted + .push((to_meta_key(LAST_PRUNED, &()), index.encode())); + commit.meta.deleted.push(pruned.journal_key.clone()); + self.pending_prunings += 1; + } else { + warn!(target: "state-db", "Trying to prune when there's nothing to prune"); + } + } + + /// Add a change set to the window. Creates a journal record and pushes it to `commit` + pub fn note_canonical(&mut self, hash: &BlockHash, commit: &mut CommitSet) { + trace!(target: "state-db", "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), commit.data.deleted.len()); + let inserted = commit + .data + .inserted + .iter() + .map(|(k, _)| k.clone()) + .collect(); + let deleted = ::std::mem::replace(&mut commit.data.deleted, Vec::new()); + let journal_record = JournalRecord { + hash: hash.clone(), + inserted, + deleted, + }; + let block = self.pending_number + self.death_rows.len() as u64; + let journal_key = to_journal_key(block); + commit + .meta + .inserted + .push((journal_key.clone(), journal_record.encode())); + self.import( + &journal_record.hash, + journal_key, + journal_record.inserted.into_iter(), + journal_record.deleted, + ); + self.pending_canonicalizations += 1; + } + + /// Apply all pending changes + pub fn apply_pending(&mut self) { + self.pending_canonicalizations = 0; + for _ in 0..self.pending_prunings { + let pruned = self + .death_rows + .pop_front() + .expect("pending_prunings is always < death_rows.len()"); + trace!(target: "state-db", "Applying pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); + for k in pruned.deleted.iter() { + self.death_index.remove(&k); + } + self.pending_number += 1; + } + self.pending_prunings = 0; + } + + /// Revert all pending changes + pub fn revert_pending(&mut self) { + // Revert pending deletions. + // Note that pending insertions might cause some existing deletions to be removed from `death_index` + // We don't bother to track and revert that for now. This means that a few nodes might end up no being + // deleted in case transaction fails and `revert_pending` is called. + self.death_rows + .truncate(self.death_rows.len() - self.pending_canonicalizations); + let new_max_block = self.death_rows.len() as u64 + self.pending_number; + self.death_index.retain(|_, block| *block < new_max_block); + self.pending_canonicalizations = 0; + self.pending_prunings = 0; + } } #[cfg(test)] mod tests { - use super::RefWindow; - use primitives::H256; - use crate::CommitSet; - use crate::test::{make_db, make_commit, TestDb}; - - fn check_journal(pruning: &RefWindow, db: &TestDb) { - let restored: RefWindow = RefWindow::new(db).unwrap(); - assert_eq!(pruning.pending_number, restored.pending_number); - assert_eq!(pruning.death_rows, restored.death_rows); - assert_eq!(pruning.death_index, restored.death_index); - } - - #[test] - fn created_from_empty_db() { - let db = make_db(&[]); - let pruning: RefWindow = RefWindow::new(&db).unwrap(); - assert_eq!(pruning.pending_number, 0); - assert!(pruning.death_rows.is_empty()); - assert!(pruning.death_index.is_empty()); - } - - #[test] - fn prune_empty() { - let db = make_db(&[]); - let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - assert_eq!(pruning.pending_number, 0); - assert!(pruning.death_rows.is_empty()); - assert!(pruning.death_index.is_empty()); - assert!(pruning.pending_prunings == 0); - assert!(pruning.pending_canonicalizations == 0); - } - - #[test] - fn prune_one() { - let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); - let mut commit = make_commit(&[4, 5], &[1, 3]); - let h = H256::random(); - pruning.note_canonical(&h, &mut commit); - db.commit(&commit); - assert!(pruning.have_block(&h)); - pruning.apply_pending(); - assert!(pruning.have_block(&h)); - assert!(commit.data.deleted.is_empty()); - assert_eq!(pruning.death_rows.len(), 1); - assert_eq!(pruning.death_index.len(), 2); - assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); - check_journal(&pruning, &db); - - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - assert!(!pruning.have_block(&h)); - db.commit(&commit); - pruning.apply_pending(); - assert!(!pruning.have_block(&h)); - assert!(db.data_eq(&make_db(&[2, 4, 5]))); - assert!(pruning.death_rows.is_empty()); - assert!(pruning.death_index.is_empty()); - assert_eq!(pruning.pending_number, 1); - } - - #[test] - fn prune_two() { - let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); - let mut commit = make_commit(&[4], &[1]); - pruning.note_canonical(&H256::random(), &mut commit); - db.commit(&commit); - let mut commit = make_commit(&[5], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); - db.commit(&commit); - pruning.apply_pending(); - assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); - - check_journal(&pruning, &db); - - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - db.commit(&commit); - pruning.apply_pending(); - assert!(db.data_eq(&make_db(&[2, 3, 4, 5]))); - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - db.commit(&commit); - pruning.apply_pending(); - assert!(db.data_eq(&make_db(&[3, 4, 5]))); - assert_eq!(pruning.pending_number, 2); - } - - #[test] - fn prune_two_pending() { - let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); - let mut commit = make_commit(&[4], &[1]); - pruning.note_canonical(&H256::random(), &mut commit); - db.commit(&commit); - let mut commit = make_commit(&[5], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[2, 3, 4, 5]))); - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - db.commit(&commit); - pruning.apply_pending(); - assert!(db.data_eq(&make_db(&[3, 4, 5]))); - assert_eq!(pruning.pending_number, 2); - } - - #[test] - fn reinserted_survives() { - let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); - let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); - db.commit(&commit); - let mut commit = make_commit(&[2], &[]); - pruning.note_canonical(&H256::random(), &mut commit); - db.commit(&commit); - let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3]))); - pruning.apply_pending(); - - check_journal(&pruning, &db); - - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3]))); - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3]))); - pruning.prune_one(&mut commit); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 3]))); - pruning.apply_pending(); - assert_eq!(pruning.pending_number, 3); - } - - #[test] - fn reinserted_survivew_pending() { - let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); - let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); - db.commit(&commit); - let mut commit = make_commit(&[2], &[]); - pruning.note_canonical(&H256::random(), &mut commit); - db.commit(&commit); - let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3]))); - - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3]))); - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3]))); - pruning.prune_one(&mut commit); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 3]))); - pruning.apply_pending(); - assert_eq!(pruning.pending_number, 3); - } + use super::RefWindow; + use crate::test::{make_commit, make_db, TestDb}; + use crate::CommitSet; + use primitives::H256; + + fn check_journal(pruning: &RefWindow, db: &TestDb) { + let restored: RefWindow = RefWindow::new(db).unwrap(); + assert_eq!(pruning.pending_number, restored.pending_number); + assert_eq!(pruning.death_rows, restored.death_rows); + assert_eq!(pruning.death_index, restored.death_index); + } + + #[test] + fn created_from_empty_db() { + let db = make_db(&[]); + let pruning: RefWindow = RefWindow::new(&db).unwrap(); + assert_eq!(pruning.pending_number, 0); + assert!(pruning.death_rows.is_empty()); + assert!(pruning.death_index.is_empty()); + } + + #[test] + fn prune_empty() { + let db = make_db(&[]); + let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit); + assert_eq!(pruning.pending_number, 0); + assert!(pruning.death_rows.is_empty()); + assert!(pruning.death_index.is_empty()); + assert!(pruning.pending_prunings == 0); + assert!(pruning.pending_canonicalizations == 0); + } + + #[test] + fn prune_one() { + let mut db = make_db(&[1, 2, 3]); + let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); + let mut commit = make_commit(&[4, 5], &[1, 3]); + let h = H256::random(); + pruning.note_canonical(&h, &mut commit); + db.commit(&commit); + assert!(pruning.have_block(&h)); + pruning.apply_pending(); + assert!(pruning.have_block(&h)); + assert!(commit.data.deleted.is_empty()); + assert_eq!(pruning.death_rows.len(), 1); + assert_eq!(pruning.death_index.len(), 2); + assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); + check_journal(&pruning, &db); + + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit); + assert!(!pruning.have_block(&h)); + db.commit(&commit); + pruning.apply_pending(); + assert!(!pruning.have_block(&h)); + assert!(db.data_eq(&make_db(&[2, 4, 5]))); + assert!(pruning.death_rows.is_empty()); + assert!(pruning.death_index.is_empty()); + assert_eq!(pruning.pending_number, 1); + } + + #[test] + fn prune_two() { + let mut db = make_db(&[1, 2, 3]); + let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); + let mut commit = make_commit(&[4], &[1]); + pruning.note_canonical(&H256::random(), &mut commit); + db.commit(&commit); + let mut commit = make_commit(&[5], &[2]); + pruning.note_canonical(&H256::random(), &mut commit); + db.commit(&commit); + pruning.apply_pending(); + assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); + + check_journal(&pruning, &db); + + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit); + db.commit(&commit); + pruning.apply_pending(); + assert!(db.data_eq(&make_db(&[2, 3, 4, 5]))); + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit); + db.commit(&commit); + pruning.apply_pending(); + assert!(db.data_eq(&make_db(&[3, 4, 5]))); + assert_eq!(pruning.pending_number, 2); + } + + #[test] + fn prune_two_pending() { + let mut db = make_db(&[1, 2, 3]); + let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); + let mut commit = make_commit(&[4], &[1]); + pruning.note_canonical(&H256::random(), &mut commit); + db.commit(&commit); + let mut commit = make_commit(&[5], &[2]); + pruning.note_canonical(&H256::random(), &mut commit); + db.commit(&commit); + assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit); + db.commit(&commit); + assert!(db.data_eq(&make_db(&[2, 3, 4, 5]))); + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit); + db.commit(&commit); + pruning.apply_pending(); + assert!(db.data_eq(&make_db(&[3, 4, 5]))); + assert_eq!(pruning.pending_number, 2); + } + + #[test] + fn reinserted_survives() { + let mut db = make_db(&[1, 2, 3]); + let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); + let mut commit = make_commit(&[], &[2]); + pruning.note_canonical(&H256::random(), &mut commit); + db.commit(&commit); + let mut commit = make_commit(&[2], &[]); + pruning.note_canonical(&H256::random(), &mut commit); + db.commit(&commit); + let mut commit = make_commit(&[], &[2]); + pruning.note_canonical(&H256::random(), &mut commit); + db.commit(&commit); + assert!(db.data_eq(&make_db(&[1, 2, 3]))); + pruning.apply_pending(); + + check_journal(&pruning, &db); + + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit); + db.commit(&commit); + assert!(db.data_eq(&make_db(&[1, 2, 3]))); + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit); + db.commit(&commit); + assert!(db.data_eq(&make_db(&[1, 2, 3]))); + pruning.prune_one(&mut commit); + db.commit(&commit); + assert!(db.data_eq(&make_db(&[1, 3]))); + pruning.apply_pending(); + assert_eq!(pruning.pending_number, 3); + } + + #[test] + fn reinserted_survivew_pending() { + let mut db = make_db(&[1, 2, 3]); + let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); + let mut commit = make_commit(&[], &[2]); + pruning.note_canonical(&H256::random(), &mut commit); + db.commit(&commit); + let mut commit = make_commit(&[2], &[]); + pruning.note_canonical(&H256::random(), &mut commit); + db.commit(&commit); + let mut commit = make_commit(&[], &[2]); + pruning.note_canonical(&H256::random(), &mut commit); + db.commit(&commit); + assert!(db.data_eq(&make_db(&[1, 2, 3]))); + + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit); + db.commit(&commit); + assert!(db.data_eq(&make_db(&[1, 2, 3]))); + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit); + db.commit(&commit); + assert!(db.data_eq(&make_db(&[1, 2, 3]))); + pruning.prune_one(&mut commit); + db.commit(&commit); + assert!(db.data_eq(&make_db(&[1, 3]))); + pruning.apply_pending(); + assert_eq!(pruning.pending_number, 3); + } } diff --git a/core/state-db/src/test.rs b/core/state-db/src/test.rs index d90c369906..8e02910461 100644 --- a/core/state-db/src/test.rs +++ b/core/state-db/src/test.rs @@ -16,79 +16,84 @@ //! Test utils -use std::collections::HashMap; +use crate::{ChangeSet, CommitSet, DBValue, MetaDb, NodeDb}; use primitives::H256; -use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb}; +use std::collections::HashMap; #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct TestDb { - pub data: HashMap, - pub meta: HashMap, DBValue>, + pub data: HashMap, + pub meta: HashMap, DBValue>, } impl MetaDb for TestDb { - type Error = (); + type Error = (); - fn get_meta(&self, key: &[u8]) -> Result, ()> { - Ok(self.meta.get(key).cloned()) - } + fn get_meta(&self, key: &[u8]) -> Result, ()> { + Ok(self.meta.get(key).cloned()) + } } impl NodeDb for TestDb { - type Error = (); - type Key = H256; + type Error = (); + type Key = H256; - fn get(&self, key: &H256) -> Result, ()> { - Ok(self.data.get(key).cloned()) - } + fn get(&self, key: &H256) -> Result, ()> { + Ok(self.data.get(key).cloned()) + } } impl TestDb { - pub fn commit(&mut self, commit: &CommitSet) { - self.data.extend(commit.data.inserted.iter().cloned()); - self.meta.extend(commit.meta.inserted.iter().cloned()); - for k in commit.data.deleted.iter() { - self.data.remove(k); - } - self.meta.extend(commit.meta.inserted.iter().cloned()); - for k in commit.meta.deleted.iter() { - self.meta.remove(k); - } - } - - pub fn data_eq(&self, other: &TestDb) -> bool { - self.data == other.data - } + pub fn commit(&mut self, commit: &CommitSet) { + self.data.extend(commit.data.inserted.iter().cloned()); + self.meta.extend(commit.meta.inserted.iter().cloned()); + for k in commit.data.deleted.iter() { + self.data.remove(k); + } + self.meta.extend(commit.meta.inserted.iter().cloned()); + for k in commit.meta.deleted.iter() { + self.meta.remove(k); + } + } + + pub fn data_eq(&self, other: &TestDb) -> bool { + self.data == other.data + } } pub fn make_changeset(inserted: &[u64], deleted: &[u64]) -> ChangeSet { - ChangeSet { - inserted: inserted - .iter() - .map(|v| { - (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec()) - }) - .collect(), - deleted: deleted.iter().map(|v| H256::from_low_u64_be(*v)).collect(), - } + ChangeSet { + inserted: inserted + .iter() + .map(|v| { + ( + H256::from_low_u64_be(*v), + H256::from_low_u64_be(*v).as_bytes().to_vec(), + ) + }) + .collect(), + deleted: deleted.iter().map(|v| H256::from_low_u64_be(*v)).collect(), + } } pub fn make_commit(inserted: &[u64], deleted: &[u64]) -> CommitSet { - CommitSet { - data: make_changeset(inserted, deleted), - meta: ChangeSet::default(), - } + CommitSet { + data: make_changeset(inserted, deleted), + meta: ChangeSet::default(), + } } pub fn make_db(inserted: &[u64]) -> TestDb { - TestDb { - data: inserted - .iter() - .map(|v| { - (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec()) - }) - .collect(), - meta: Default::default(), - } + TestDb { + data: inserted + .iter() + .map(|v| { + ( + H256::from_low_u64_be(*v), + H256::from_low_u64_be(*v).as_bytes().to_vec(), + ) + }) + .collect(), + meta: Default::default(), + } } - diff --git a/core/state-machine/src/backend.rs b/core/state-machine/src/backend.rs index 14e60b140a..323acd3a46 100644 --- a/core/state-machine/src/backend.rs +++ b/core/state-machine/src/backend.rs @@ -16,106 +16,111 @@ //! State machine backends. These manage the code and storage of contracts. -use std::{error, fmt}; -use std::cmp::Ord; -use std::collections::HashMap; -use std::marker::PhantomData; -use log::warn; -use hash_db::Hasher; use crate::trie_backend::TrieBackend; use crate::trie_backend_essence::TrieBackendStorage; -use trie::{TrieDBMut, TrieMut, MemoryDB, trie_root, child_trie_root, default_child_trie_root}; +use hash_db::Hasher; use heapsize::HeapSizeOf; +use log::warn; +use std::cmp::Ord; +use std::collections::HashMap; +use std::marker::PhantomData; +use std::{error, fmt}; +use trie::{child_trie_root, default_child_trie_root, trie_root, MemoryDB, TrieDBMut, TrieMut}; /// A state backend is used to read state data and can have changes committed /// to it. /// /// The clone operation (if implemented) should be cheap. pub trait Backend { - /// An error type when fetching data is not possible. - type Error: super::Error; - - /// Storage changes to be applied if committing - type Transaction: Consolidate + Default; - - /// Type of trie backend storage. - type TrieBackendStorage: TrieBackendStorage; - - /// Get keyed storage or None if there is nothing associated. - fn storage(&self, key: &[u8]) -> Result>, Self::Error>; - - /// Get keyed storage value hash or None if there is nothing associated. - fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { - self.storage(key).map(|v| v.map(|v| H::hash(&v))) - } - - /// Get keyed child storage or None if there is nothing associated. - fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error>; - - /// true if a key exists in storage. - fn exists_storage(&self, key: &[u8]) -> Result { - Ok(self.storage(key)?.is_some()) - } - - /// true if a key exists in child storage. - fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result { - Ok(self.child_storage(storage_key, key)?.is_some()) - } - - /// Retrieve all entries keys of child storage and call `f` for each of those keys. - fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F); - - /// Retrieve all entries keys of which start with the given prefix and - /// call `f` for each of those keys. - fn for_keys_with_prefix(&self, prefix: &[u8], f: F); - - /// Calculate the storage root, with given delta over what is already stored in - /// the backend, and produce a "transaction" that can be used to commit. - fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) - where - I: IntoIterator, Option>)>, - H::Out: Ord; - - /// Calculate the child storage root, with given delta over what is already stored in - /// the backend, and produce a "transaction" that can be used to commit. The second argument - /// is true if child storage root equals default storage root. - fn child_storage_root(&self, storage_key: &[u8], delta: I) -> (Vec, bool, Self::Transaction) - where - I: IntoIterator, Option>)>, - H::Out: Ord; - - /// Get all key/value pairs into a Vec. - fn pairs(&self) -> Vec<(Vec, Vec)>; - - /// Get all keys with given prefix - fn keys(&self, prefix: &Vec) -> Vec>; - - /// Try convert into trie backend. - fn try_into_trie_backend(self) -> Option>; + /// An error type when fetching data is not possible. + type Error: super::Error; + + /// Storage changes to be applied if committing + type Transaction: Consolidate + Default; + + /// Type of trie backend storage. + type TrieBackendStorage: TrieBackendStorage; + + /// Get keyed storage or None if there is nothing associated. + fn storage(&self, key: &[u8]) -> Result>, Self::Error>; + + /// Get keyed storage value hash or None if there is nothing associated. + fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { + self.storage(key).map(|v| v.map(|v| H::hash(&v))) + } + + /// Get keyed child storage or None if there is nothing associated. + fn child_storage(&self, storage_key: &[u8], key: &[u8]) + -> Result>, Self::Error>; + + /// true if a key exists in storage. + fn exists_storage(&self, key: &[u8]) -> Result { + Ok(self.storage(key)?.is_some()) + } + + /// true if a key exists in child storage. + fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result { + Ok(self.child_storage(storage_key, key)?.is_some()) + } + + /// Retrieve all entries keys of child storage and call `f` for each of those keys. + fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F); + + /// Retrieve all entries keys of which start with the given prefix and + /// call `f` for each of those keys. + fn for_keys_with_prefix(&self, prefix: &[u8], f: F); + + /// Calculate the storage root, with given delta over what is already stored in + /// the backend, and produce a "transaction" that can be used to commit. + fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) + where + I: IntoIterator, Option>)>, + H::Out: Ord; + + /// Calculate the child storage root, with given delta over what is already stored in + /// the backend, and produce a "transaction" that can be used to commit. The second argument + /// is true if child storage root equals default storage root. + fn child_storage_root( + &self, + storage_key: &[u8], + delta: I, + ) -> (Vec, bool, Self::Transaction) + where + I: IntoIterator, Option>)>, + H::Out: Ord; + + /// Get all key/value pairs into a Vec. + fn pairs(&self) -> Vec<(Vec, Vec)>; + + /// Get all keys with given prefix + fn keys(&self, prefix: &Vec) -> Vec>; + + /// Try convert into trie backend. + fn try_into_trie_backend(self) -> Option>; } /// Trait that allows consolidate two transactions together. pub trait Consolidate { - /// Consolidate two transactions into one. - fn consolidate(&mut self, other: Self); + /// Consolidate two transactions into one. + fn consolidate(&mut self, other: Self); } impl Consolidate for () { - fn consolidate(&mut self, _: Self) { - () - } + fn consolidate(&mut self, _: Self) { + () + } } impl Consolidate for Vec<(Option>, Vec, Option>)> { - fn consolidate(&mut self, mut other: Self) { - self.append(&mut other); - } + fn consolidate(&mut self, mut other: Self) { + self.append(&mut other); + } } impl> Consolidate for trie::GenericMemoryDB { - fn consolidate(&mut self, other: Self) { - trie::GenericMemoryDB::consolidate(self, other) - } + fn consolidate(&mut self, other: Self) { + trie::GenericMemoryDB::consolidate(self, other) + } } /// Error impossible. @@ -124,208 +129,267 @@ impl> Consolidate for trie::GenericMemoryDB< pub enum Void {} impl fmt::Display for Void { - fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { - match *self {} - } + fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { + match *self {} + } } impl error::Error for Void { - fn description(&self) -> &str { "unreachable error" } + fn description(&self) -> &str { + "unreachable error" + } } /// In-memory backend. Fully recomputes tries on each commit but useful for /// tests. #[derive(Eq)] pub struct InMemory { - inner: HashMap>, HashMap, Vec>>, - _hasher: PhantomData, + inner: HashMap>, HashMap, Vec>>, + _hasher: PhantomData, } impl Default for InMemory { - fn default() -> Self { - InMemory { - inner: Default::default(), - _hasher: PhantomData, - } - } + fn default() -> Self { + InMemory { + inner: Default::default(), + _hasher: PhantomData, + } + } } impl Clone for InMemory { - fn clone(&self) -> Self { - InMemory { - inner: self.inner.clone(), - _hasher: PhantomData, - } - } + fn clone(&self) -> Self { + InMemory { + inner: self.inner.clone(), + _hasher: PhantomData, + } + } } impl PartialEq for InMemory { - fn eq(&self, other: &Self) -> bool { - self.inner.eq(&other.inner) - } + fn eq(&self, other: &Self) -> bool { + self.inner.eq(&other.inner) + } } -impl InMemory where H::Out: HeapSizeOf { - /// Copy the state, with applied updates - pub fn update(&self, changes: >::Transaction) -> Self { - let mut inner: HashMap<_, _> = self.inner.clone(); - for (storage_key, key, val) in changes { - match val { - Some(v) => { inner.entry(storage_key).or_default().insert(key, v); }, - None => { inner.entry(storage_key).or_default().remove(&key); }, - } - } - - inner.into() - } +impl InMemory +where + H::Out: HeapSizeOf, +{ + /// Copy the state, with applied updates + pub fn update(&self, changes: >::Transaction) -> Self { + let mut inner: HashMap<_, _> = self.inner.clone(); + for (storage_key, key, val) in changes { + match val { + Some(v) => { + inner.entry(storage_key).or_default().insert(key, v); + } + None => { + inner.entry(storage_key).or_default().remove(&key); + } + } + } + + inner.into() + } } impl From>, HashMap, Vec>>> for InMemory { - fn from(inner: HashMap>, HashMap, Vec>>) -> Self { - InMemory { - inner: inner, - _hasher: PhantomData, - } - } + fn from(inner: HashMap>, HashMap, Vec>>) -> Self { + InMemory { + inner: inner, + _hasher: PhantomData, + } + } } impl From, Vec>> for InMemory { - fn from(inner: HashMap, Vec>) -> Self { - let mut expanded = HashMap::new(); - expanded.insert(None, inner); - InMemory { - inner: expanded, - _hasher: PhantomData, - } - } + fn from(inner: HashMap, Vec>) -> Self { + let mut expanded = HashMap::new(); + expanded.insert(None, inner); + InMemory { + inner: expanded, + _hasher: PhantomData, + } + } } impl From>, Vec, Option>)>> for InMemory { - fn from(inner: Vec<(Option>, Vec, Option>)>) -> Self { - let mut expanded: HashMap>, HashMap, Vec>> = HashMap::new(); - for (child_key, key, value) in inner { - if let Some(value) = value { - expanded.entry(child_key).or_default().insert(key, value); - } - } - expanded.into() - } + fn from(inner: Vec<(Option>, Vec, Option>)>) -> Self { + let mut expanded: HashMap>, HashMap, Vec>> = HashMap::new(); + for (child_key, key, value) in inner { + if let Some(value) = value { + expanded.entry(child_key).or_default().insert(key, value); + } + } + expanded.into() + } } impl super::Error for Void {} -impl Backend for InMemory where H::Out: HeapSizeOf { - type Error = Void; - type Transaction = Vec<(Option>, Vec, Option>)>; - type TrieBackendStorage = MemoryDB; - - fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - Ok(self.inner.get(&None).and_then(|map| map.get(key).map(Clone::clone))) - } - - fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { - Ok(self.inner.get(&Some(storage_key.to_vec())).and_then(|map| map.get(key).map(Clone::clone))) - } - - fn exists_storage(&self, key: &[u8]) -> Result { - Ok(self.inner.get(&None).map(|map| map.get(key).is_some()).unwrap_or(false)) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.inner.get(&None).map(|map| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f)); - } - - fn for_keys_in_child_storage(&self, storage_key: &[u8], mut f: F) { - self.inner.get(&Some(storage_key.to_vec())).map(|map| map.keys().for_each(|k| f(&k))); - } - - fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) - where - I: IntoIterator, Option>)>, - ::Out: Ord, - { - let existing_pairs = self.inner.get(&None).into_iter().flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); - - let transaction: Vec<_> = delta.into_iter().collect(); - let root = trie_root::(existing_pairs.chain(transaction.iter().cloned()) - .collect::>() - .into_iter() - .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) - ); - - let full_transaction = transaction.into_iter().map(|(k, v)| (None, k, v)).collect(); - - (root, full_transaction) - } - - fn child_storage_root(&self, storage_key: &[u8], delta: I) -> (Vec, bool, Self::Transaction) - where - I: IntoIterator, Option>)>, - H::Out: Ord - { - let storage_key = storage_key.to_vec(); - - let existing_pairs = self.inner.get(&Some(storage_key.clone())).into_iter().flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); - - let transaction: Vec<_> = delta.into_iter().collect(); - let root = child_trie_root::( - &storage_key, - existing_pairs.chain(transaction.iter().cloned()) - .collect::>() - .into_iter() - .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) - ); - - let full_transaction = transaction.into_iter().map(|(k, v)| (Some(storage_key.clone()), k, v)).collect(); - - let is_default = root == default_child_trie_root::(&storage_key); - - (root, is_default, full_transaction) - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - self.inner.get(&None).into_iter().flat_map(|map| map.iter().map(|(k, v)| (k.clone(), v.clone()))).collect() - } - - fn keys(&self, prefix: &Vec) -> Vec> { - self.inner.get(&None).into_iter().flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned()).collect() - } - - fn try_into_trie_backend(self) -> Option> { - let mut mdb = MemoryDB::default(); - let mut root = None; - for (storage_key, map) in self.inner { - if storage_key != None { - let _ = insert_into_memory_db::(&mut mdb, map.into_iter())?; - } else { - root = Some(insert_into_memory_db::(&mut mdb, map.into_iter())?); - } - } - let root = match root { - Some(root) => root, - None => insert_into_memory_db::(&mut mdb, ::std::iter::empty())?, - }; - Some(TrieBackend::new(mdb, root)) - } +impl Backend for InMemory +where + H::Out: HeapSizeOf, +{ + type Error = Void; + type Transaction = Vec<(Option>, Vec, Option>)>; + type TrieBackendStorage = MemoryDB; + + fn storage(&self, key: &[u8]) -> Result>, Self::Error> { + Ok(self + .inner + .get(&None) + .and_then(|map| map.get(key).map(Clone::clone))) + } + + fn child_storage( + &self, + storage_key: &[u8], + key: &[u8], + ) -> Result>, Self::Error> { + Ok(self + .inner + .get(&Some(storage_key.to_vec())) + .and_then(|map| map.get(key).map(Clone::clone))) + } + + fn exists_storage(&self, key: &[u8]) -> Result { + Ok(self + .inner + .get(&None) + .map(|map| map.get(key).is_some()) + .unwrap_or(false)) + } + + fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { + self.inner.get(&None).map(|map| { + map.keys() + .filter(|key| key.starts_with(prefix)) + .map(|k| &**k) + .for_each(f) + }); + } + + fn for_keys_in_child_storage(&self, storage_key: &[u8], mut f: F) { + self.inner + .get(&Some(storage_key.to_vec())) + .map(|map| map.keys().for_each(|k| f(&k))); + } + + fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) + where + I: IntoIterator, Option>)>, + ::Out: Ord, + { + let existing_pairs = self + .inner + .get(&None) + .into_iter() + .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); + + let transaction: Vec<_> = delta.into_iter().collect(); + let root = trie_root::( + existing_pairs + .chain(transaction.iter().cloned()) + .collect::>() + .into_iter() + .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))), + ); + + let full_transaction = transaction.into_iter().map(|(k, v)| (None, k, v)).collect(); + + (root, full_transaction) + } + + fn child_storage_root( + &self, + storage_key: &[u8], + delta: I, + ) -> (Vec, bool, Self::Transaction) + where + I: IntoIterator, Option>)>, + H::Out: Ord, + { + let storage_key = storage_key.to_vec(); + + let existing_pairs = self + .inner + .get(&Some(storage_key.clone())) + .into_iter() + .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); + + let transaction: Vec<_> = delta.into_iter().collect(); + let root = child_trie_root::( + &storage_key, + existing_pairs + .chain(transaction.iter().cloned()) + .collect::>() + .into_iter() + .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))), + ); + + let full_transaction = transaction + .into_iter() + .map(|(k, v)| (Some(storage_key.clone()), k, v)) + .collect(); + + let is_default = root == default_child_trie_root::(&storage_key); + + (root, is_default, full_transaction) + } + + fn pairs(&self) -> Vec<(Vec, Vec)> { + self.inner + .get(&None) + .into_iter() + .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), v.clone()))) + .collect() + } + + fn keys(&self, prefix: &Vec) -> Vec> { + self.inner + .get(&None) + .into_iter() + .flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned()) + .collect() + } + + fn try_into_trie_backend(self) -> Option> { + let mut mdb = MemoryDB::default(); + let mut root = None; + for (storage_key, map) in self.inner { + if storage_key != None { + let _ = insert_into_memory_db::(&mut mdb, map.into_iter())?; + } else { + root = Some(insert_into_memory_db::(&mut mdb, map.into_iter())?); + } + } + let root = match root { + Some(root) => root, + None => insert_into_memory_db::(&mut mdb, ::std::iter::empty())?, + }; + Some(TrieBackend::new(mdb, root)) + } } /// Insert input pairs into memory db. pub(crate) fn insert_into_memory_db(mdb: &mut MemoryDB, input: I) -> Option - where - H: Hasher, - H::Out: HeapSizeOf, - I: IntoIterator, Vec)>, +where + H: Hasher, + H::Out: HeapSizeOf, + I: IntoIterator, Vec)>, { - let mut root = ::Out::default(); - { - let mut trie = TrieDBMut::::new(mdb, &mut root); - for (key, value) in input { - if let Err(e) = trie.insert(&key, &value) { - warn!(target: "trie", "Failed to write to trie: {}", e); - return None; - } - } - } - - Some(root) + let mut root = ::Out::default(); + { + let mut trie = TrieDBMut::::new(mdb, &mut root); + for (key, value) in input { + if let Err(e) = trie.insert(&key, &value) { + warn!(target: "trie", "Failed to write to trie: {}", e); + return None; + } + } + } + + Some(root) } diff --git a/core/state-machine/src/basic.rs b/core/state-machine/src/basic.rs index 7b2a95464e..37ff4617b3 100644 --- a/core/state-machine/src/basic.rs +++ b/core/state-machine/src/basic.rs @@ -16,174 +16,190 @@ //! Basic implementation for Externalities. -use std::collections::HashMap; -use std::iter::FromIterator; +use super::{Externalities, OverlayedChanges}; use hash_db::Hasher; use heapsize::HeapSizeOf; -use trie::trie_root; -use primitives::storage::well_known_keys::{CHANGES_TRIE_CONFIG, CODE, HEAP_PAGES}; -use parity_codec::Encode; -use super::{Externalities, OverlayedChanges}; use log::warn; +use parity_codec::Encode; +use primitives::storage::well_known_keys::{CHANGES_TRIE_CONFIG, CODE, HEAP_PAGES}; +use std::collections::HashMap; +use std::iter::FromIterator; +use trie::trie_root; /// Simple HashMap-based Externalities impl. pub struct BasicExternalities { - inner: HashMap, Vec>, - changes: OverlayedChanges, - code: Option>, + inner: HashMap, Vec>, + changes: OverlayedChanges, + code: Option>, } impl BasicExternalities { - /// Create a new instance of `BasicExternalities` - pub fn new(inner: HashMap, Vec>) -> Self { - Self::new_with_code(&[], inner) - } - - /// Create a new instance of `BasicExternalities` - pub fn new_with_code(code: &[u8], mut inner: HashMap, Vec>) -> Self { - let mut overlay = OverlayedChanges::default(); - super::set_changes_trie_config( - &mut overlay, - inner.get(&CHANGES_TRIE_CONFIG.to_vec()).cloned(), - false, - ).expect("changes trie configuration is correct in test env; qed"); - - inner.insert(HEAP_PAGES.to_vec(), 8u64.encode()); - - BasicExternalities { - inner, - changes: overlay, - code: Some(code.to_vec()), - } - } - - /// Insert key/value - pub fn insert(&mut self, k: Vec, v: Vec) -> Option> { - self.inner.insert(k, v) - } + /// Create a new instance of `BasicExternalities` + pub fn new(inner: HashMap, Vec>) -> Self { + Self::new_with_code(&[], inner) + } + + /// Create a new instance of `BasicExternalities` + pub fn new_with_code(code: &[u8], mut inner: HashMap, Vec>) -> Self { + let mut overlay = OverlayedChanges::default(); + super::set_changes_trie_config( + &mut overlay, + inner.get(&CHANGES_TRIE_CONFIG.to_vec()).cloned(), + false, + ) + .expect("changes trie configuration is correct in test env; qed"); + + inner.insert(HEAP_PAGES.to_vec(), 8u64.encode()); + + BasicExternalities { + inner, + changes: overlay, + code: Some(code.to_vec()), + } + } + + /// Insert key/value + pub fn insert(&mut self, k: Vec, v: Vec) -> Option> { + self.inner.insert(k, v) + } } impl ::std::fmt::Debug for BasicExternalities { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - write!(f, "{:?}", self.inner) - } + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + write!(f, "{:?}", self.inner) + } } impl PartialEq for BasicExternalities { - fn eq(&self, other: &BasicExternalities) -> bool { - self.inner.eq(&other.inner) - } + fn eq(&self, other: &BasicExternalities) -> bool { + self.inner.eq(&other.inner) + } } impl FromIterator<(Vec, Vec)> for BasicExternalities { - fn from_iter, Vec)>>(iter: I) -> Self { - let mut t = Self::new(Default::default()); - t.inner.extend(iter); - t - } + fn from_iter, Vec)>>(iter: I) -> Self { + let mut t = Self::new(Default::default()); + t.inner.extend(iter); + t + } } impl Default for BasicExternalities { - fn default() -> Self { Self::new(Default::default()) } + fn default() -> Self { + Self::new(Default::default()) + } } impl From for HashMap, Vec> { - fn from(tex: BasicExternalities) -> Self { - tex.inner.into() - } + fn from(tex: BasicExternalities) -> Self { + tex.inner.into() + } } -impl From< HashMap, Vec> > for BasicExternalities { - fn from(hashmap: HashMap, Vec>) -> Self { - BasicExternalities { - inner: hashmap, - changes: Default::default(), - code: None, - } - } +impl From, Vec>> for BasicExternalities { + fn from(hashmap: HashMap, Vec>) -> Self { + BasicExternalities { + inner: hashmap, + changes: Default::default(), + code: None, + } + } } -impl Externalities for BasicExternalities where H::Out: Ord + HeapSizeOf { - fn storage(&self, key: &[u8]) -> Option> { - match key { - CODE => self.code.clone(), - _ => self.inner.get(key).cloned(), - } - } - - fn child_storage(&self, _storage_key: &[u8], _key: &[u8]) -> Option> { - None - } - - fn place_storage(&mut self, key: Vec, maybe_value: Option>) { - self.changes.set_storage(key.clone(), maybe_value.clone()); - match key.as_ref() { - CODE => self.code = maybe_value, - _ => { - match maybe_value { - Some(value) => { self.inner.insert(key, value); } - None => { self.inner.remove(&key); } - } - } - } - } - - fn place_child_storage(&mut self, _storage_key: Vec, _key: Vec, _value: Option>) -> bool { - false - } - - fn kill_child_storage(&mut self, _storage_key: &[u8]) { } - - fn clear_prefix(&mut self, prefix: &[u8]) { - self.changes.clear_prefix(prefix); - self.inner.retain(|key, _| !key.starts_with(prefix)); - } - - fn chain_id(&self) -> u64 { 42 } - - fn storage_root(&mut self) -> H::Out { - trie_root::(self.inner.clone()) - } - - fn child_storage_root(&mut self, _storage_key: &[u8]) -> Option> { - None - } - - fn storage_changes_root(&mut self, _parent: H::Out, _parent_num: u64) -> Option { - None - } - - fn submit_extrinsic(&mut self, _extrinsic: Vec) -> Result<(), ()> { - warn!("Call to submit_extrinsic without offchain externalities set."); - Err(()) - } +impl Externalities for BasicExternalities +where + H::Out: Ord + HeapSizeOf, +{ + fn storage(&self, key: &[u8]) -> Option> { + match key { + CODE => self.code.clone(), + _ => self.inner.get(key).cloned(), + } + } + + fn child_storage(&self, _storage_key: &[u8], _key: &[u8]) -> Option> { + None + } + + fn place_storage(&mut self, key: Vec, maybe_value: Option>) { + self.changes.set_storage(key.clone(), maybe_value.clone()); + match key.as_ref() { + CODE => self.code = maybe_value, + _ => match maybe_value { + Some(value) => { + self.inner.insert(key, value); + } + None => { + self.inner.remove(&key); + } + }, + } + } + + fn place_child_storage( + &mut self, + _storage_key: Vec, + _key: Vec, + _value: Option>, + ) -> bool { + false + } + + fn kill_child_storage(&mut self, _storage_key: &[u8]) {} + + fn clear_prefix(&mut self, prefix: &[u8]) { + self.changes.clear_prefix(prefix); + self.inner.retain(|key, _| !key.starts_with(prefix)); + } + + fn chain_id(&self) -> u64 { + 42 + } + + fn storage_root(&mut self) -> H::Out { + trie_root::(self.inner.clone()) + } + + fn child_storage_root(&mut self, _storage_key: &[u8]) -> Option> { + None + } + + fn storage_changes_root(&mut self, _parent: H::Out, _parent_num: u64) -> Option { + None + } + + fn submit_extrinsic(&mut self, _extrinsic: Vec) -> Result<(), ()> { + warn!("Call to submit_extrinsic without offchain externalities set."); + Err(()) + } } #[cfg(test)] mod tests { - use super::*; - use primitives::{Blake2Hasher, H256}; - use hex_literal::{hex, hex_impl}; - - #[test] - fn commit_should_work() { - let mut ext = BasicExternalities::default(); - let ext = &mut ext as &mut Externalities; - ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); - ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); - ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); - const ROOT: [u8; 32] = hex!("0b33ed94e74e0f8e92a55923bece1ed02d16cf424e124613ddebc53ac3eeeabe"); - assert_eq!(ext.storage_root(), H256::from(ROOT)); - } - - #[test] - fn set_and_retrieve_code() { - let mut ext = BasicExternalities::default(); - let ext = &mut ext as &mut Externalities; - - let code = vec![1, 2, 3]; - ext.set_storage(CODE.to_vec(), code.clone()); - - assert_eq!(&ext.storage(CODE).unwrap(), &code); - } + use super::*; + use hex_literal::{hex, hex_impl}; + use primitives::{Blake2Hasher, H256}; + + #[test] + fn commit_should_work() { + let mut ext = BasicExternalities::default(); + let ext = &mut ext as &mut Externalities; + ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); + ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); + ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); + const ROOT: [u8; 32] = + hex!("0b33ed94e74e0f8e92a55923bece1ed02d16cf424e124613ddebc53ac3eeeabe"); + assert_eq!(ext.storage_root(), H256::from(ROOT)); + } + + #[test] + fn set_and_retrieve_code() { + let mut ext = BasicExternalities::default(); + let ext = &mut ext as &mut Externalities; + + let code = vec![1, 2, 3]; + ext.set_storage(CODE.to_vec(), code.clone()); + + assert_eq!(&ext.storage(CODE).unwrap(), &code); + } } diff --git a/core/state-machine/src/changes_trie/build.rs b/core/state-machine/src/changes_trie/build.rs index 9cb766874d..2fbec16f9e 100644 --- a/core/state-machine/src/changes_trie/build.rs +++ b/core/state-machine/src/changes_trie/build.rs @@ -16,16 +16,16 @@ //! Structures and functions required to build changes trie for given block. -use std::collections::{BTreeMap, BTreeSet}; -use parity_codec::Decode; -use hash_db::Hasher; -use heapsize::HeapSizeOf; use crate::backend::Backend; -use crate::overlayed_changes::OverlayedChanges; -use crate::trie_backend_essence::{TrieBackendStorage, TrieBackendEssence}; use crate::changes_trie::build_iterator::digest_build_iterator; -use crate::changes_trie::input::{InputKey, InputPair, DigestIndex, ExtrinsicIndex}; +use crate::changes_trie::input::{DigestIndex, ExtrinsicIndex, InputKey, InputPair}; use crate::changes_trie::{AnchorBlockId, Configuration, Storage}; +use crate::overlayed_changes::OverlayedChanges; +use crate::trie_backend_essence::{TrieBackendEssence, TrieBackendStorage}; +use hash_db::Hasher; +use heapsize::HeapSizeOf; +use parity_codec::Decode; +use std::collections::{BTreeMap, BTreeSet}; /// Prepare input pairs for building a changes trie of given block. /// @@ -34,260 +34,614 @@ use crate::changes_trie::{AnchorBlockId, Configuration, Storage}; /// Returns Ok(None) data required to prepare input pairs is not collected /// or storage is not provided. pub fn prepare_input<'a, B, S, H>( - backend: &B, - storage: Option<&'a S>, - changes: &OverlayedChanges, - parent: &'a AnchorBlockId, + backend: &B, + storage: Option<&'a S>, + changes: &OverlayedChanges, + parent: &'a AnchorBlockId, ) -> Result>, String> - where - B: Backend, - S: Storage, - &'a S: TrieBackendStorage, - H: Hasher, - H::Out: HeapSizeOf, +where + B: Backend, + S: Storage, + &'a S: TrieBackendStorage, + H: Hasher, + H::Out: HeapSizeOf, { - let (storage, config) = match (storage, changes.changes_trie_config.as_ref()) { - (Some(storage), Some(config)) => (storage, config), - _ => return Ok(None), - }; + let (storage, config) = match (storage, changes.changes_trie_config.as_ref()) { + (Some(storage), Some(config)) => (storage, config), + _ => return Ok(None), + }; - let mut input = Vec::new(); - input.extend(prepare_extrinsics_input( - backend, - parent.number + 1, - changes)?); - input.extend(prepare_digest_input::<_, H>( - parent, - config, - storage)?); + let mut input = Vec::new(); + input.extend(prepare_extrinsics_input( + backend, + parent.number + 1, + changes, + )?); + input.extend(prepare_digest_input::<_, H>(parent, config, storage)?); - Ok(Some(input)) + Ok(Some(input)) } /// Prepare ExtrinsicIndex input pairs. fn prepare_extrinsics_input( - backend: &B, - block: u64, - changes: &OverlayedChanges, -) -> Result, String> - where - B: Backend, - H: Hasher, + backend: &B, + block: u64, + changes: &OverlayedChanges, +) -> Result, String> +where + B: Backend, + H: Hasher, { - let mut extrinsic_map = BTreeMap::, BTreeSet>::new(); - for (key, val) in changes.prospective.top.iter().chain(changes.committed.top.iter()) { - let extrinsics = match val.extrinsics { - Some(ref extrinsics) => extrinsics, - None => continue, - }; + let mut extrinsic_map = BTreeMap::, BTreeSet>::new(); + for (key, val) in changes + .prospective + .top + .iter() + .chain(changes.committed.top.iter()) + { + let extrinsics = match val.extrinsics { + Some(ref extrinsics) => extrinsics, + None => continue, + }; - // ignore values that have null value at the end of operation AND are not in storage - // at the beginning of operation - if !changes.storage(key).map(|v| v.is_some()).unwrap_or_default() { - if !backend.exists_storage(key).map_err(|e| format!("{}", e))? { - continue; - } - } + // ignore values that have null value at the end of operation AND are not in storage + // at the beginning of operation + if !changes + .storage(key) + .map(|v| v.is_some()) + .unwrap_or_default() + { + if !backend.exists_storage(key).map_err(|e| format!("{}", e))? { + continue; + } + } - extrinsic_map.entry(key.clone()).or_default() - .extend(extrinsics.iter().cloned()); - } + extrinsic_map + .entry(key.clone()) + .or_default() + .extend(extrinsics.iter().cloned()); + } - Ok(extrinsic_map.into_iter() - .map(move |(key, extrinsics)| InputPair::ExtrinsicIndex(ExtrinsicIndex { - block, - key, - }, extrinsics.iter().cloned().collect()))) + Ok(extrinsic_map.into_iter().map(move |(key, extrinsics)| { + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block, key }, + extrinsics.iter().cloned().collect(), + ) + })) } /// Prepare DigestIndex input pairs. fn prepare_digest_input<'a, S, H>( - parent: &'a AnchorBlockId, - config: &Configuration, - storage: &'a S -) -> Result + 'a, String> - where - S: Storage, - &'a S: TrieBackendStorage, - H: Hasher, - H::Out: 'a + HeapSizeOf, + parent: &'a AnchorBlockId, + config: &Configuration, + storage: &'a S, +) -> Result + 'a, String> +where + S: Storage, + &'a S: TrieBackendStorage, + H: Hasher, + H::Out: 'a + HeapSizeOf, { - let mut digest_map = BTreeMap::, BTreeSet>::new(); - for digest_build_block in digest_build_iterator(config, parent.number + 1) { - let trie_root = storage.root(parent, digest_build_block)?; - let trie_root = trie_root.ok_or_else(|| format!("No changes trie root for block {}", digest_build_block))?; - let trie_storage = TrieBackendEssence::<_, H>::new(storage, trie_root); + let mut digest_map = BTreeMap::, BTreeSet>::new(); + for digest_build_block in digest_build_iterator(config, parent.number + 1) { + let trie_root = storage.root(parent, digest_build_block)?; + let trie_root = trie_root + .ok_or_else(|| format!("No changes trie root for block {}", digest_build_block))?; + let trie_storage = TrieBackendEssence::<_, H>::new(storage, trie_root); - let extrinsic_prefix = ExtrinsicIndex::key_neutral_prefix(digest_build_block); - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| - if let Some(InputKey::ExtrinsicIndex(trie_key)) = Decode::decode(&mut &key[..]) { - digest_map.entry(trie_key.key).or_default() - .insert(digest_build_block); - }); + let extrinsic_prefix = ExtrinsicIndex::key_neutral_prefix(digest_build_block); + trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| { + if let Some(InputKey::ExtrinsicIndex(trie_key)) = Decode::decode(&mut &key[..]) { + digest_map + .entry(trie_key.key) + .or_default() + .insert(digest_build_block); + } + }); - let digest_prefix = DigestIndex::key_neutral_prefix(digest_build_block); - trie_storage.for_keys_with_prefix(&digest_prefix, |key| - if let Some(InputKey::DigestIndex(trie_key)) = Decode::decode(&mut &key[..]) { - digest_map.entry(trie_key.key).or_default() - .insert(digest_build_block); - }); - } + let digest_prefix = DigestIndex::key_neutral_prefix(digest_build_block); + trie_storage.for_keys_with_prefix(&digest_prefix, |key| { + if let Some(InputKey::DigestIndex(trie_key)) = Decode::decode(&mut &key[..]) { + digest_map + .entry(trie_key.key) + .or_default() + .insert(digest_build_block); + } + }); + } - Ok(digest_map.into_iter() - .map(move |(key, set)| InputPair::DigestIndex(DigestIndex { - block: parent.number + 1, - key - }, set.into_iter().collect()))) + Ok(digest_map.into_iter().map(move |(key, set)| { + InputPair::DigestIndex( + DigestIndex { + block: parent.number + 1, + key, + }, + set.into_iter().collect(), + ) + })) } #[cfg(test)] mod test { - use parity_codec::Encode; - use primitives::Blake2Hasher; - use primitives::storage::well_known_keys::EXTRINSIC_INDEX; - use crate::backend::InMemory; - use crate::changes_trie::storage::InMemoryStorage; - use crate::overlayed_changes::OverlayedValue; - use super::*; - - fn prepare_for_build() -> (InMemory, InMemoryStorage, OverlayedChanges) { - let backend: InMemory<_> = vec![ - (vec![100], vec![255]), - (vec![101], vec![255]), - (vec![102], vec![255]), - (vec![103], vec![255]), - (vec![104], vec![255]), - (vec![105], vec![255]), - ].into_iter().collect::<::std::collections::HashMap<_, _>>().into(); - let storage = InMemoryStorage::with_inputs(vec![ - (1, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 1, key: vec![100] }, vec![1, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 1, key: vec![101] }, vec![0, 2]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 1, key: vec![105] }, vec![0, 2, 4]), - ]), - (2, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 2, key: vec![102] }, vec![0]), - ]), - (3, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 3, key: vec![100] }, vec![0]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 3, key: vec![105] }, vec![1]), - ]), - (4, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![100] }, vec![1, 3]), - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![101] }, vec![1]), - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![102] }, vec![2]), - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![105] }, vec![1, 3]), - ]), - (5, Vec::new()), - (6, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 6, key: vec![105] }, vec![2]), - ]), - (7, Vec::new()), - (8, vec![ - InputPair::DigestIndex(DigestIndex { block: 8, key: vec![105] }, vec![6]), - ]), - (9, Vec::new()), (10, Vec::new()), (11, Vec::new()), (12, Vec::new()), (13, Vec::new()), - (14, Vec::new()), (15, Vec::new()), - ]); - let changes = OverlayedChanges { - prospective: vec![ - (vec![100], OverlayedValue { - value: Some(vec![200]), - extrinsics: Some(vec![0, 2].into_iter().collect()) - }), - (vec![103], OverlayedValue { - value: None, - extrinsics: Some(vec![0, 1].into_iter().collect()) - }), - ].into_iter().collect(), - committed: vec![ - (EXTRINSIC_INDEX.to_vec(), OverlayedValue { - value: Some(3u32.encode()), - extrinsics: None, - }), - (vec![100], OverlayedValue { - value: Some(vec![202]), - extrinsics: Some(vec![3].into_iter().collect()) - }), - (vec![101], OverlayedValue { - value: Some(vec![203]), - extrinsics: Some(vec![1].into_iter().collect()) - }), - ].into_iter().collect(), - changes_trie_config: Some(Configuration { digest_interval: 4, digest_levels: 2 }), - }; - - (backend, storage, changes) - } - - #[test] - fn build_changes_trie_nodes_on_non_digest_block() { - let (backend, storage, changes) = prepare_for_build(); - let changes_trie_nodes = prepare_input(&backend, Some(&storage), &changes, &AnchorBlockId { hash: Default::default(), number: 4 }).unwrap(); - assert_eq!(changes_trie_nodes, Some(vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 5, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 5, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 5, key: vec![103] }, vec![0, 1]), - ])); - } + use super::*; + use crate::backend::InMemory; + use crate::changes_trie::storage::InMemoryStorage; + use crate::overlayed_changes::OverlayedValue; + use parity_codec::Encode; + use primitives::storage::well_known_keys::EXTRINSIC_INDEX; + use primitives::Blake2Hasher; - #[test] - fn build_changes_trie_nodes_on_digest_block_l1() { - let (backend, storage, changes) = prepare_for_build(); - let changes_trie_nodes = prepare_input(&backend, Some(&storage), &changes, &AnchorBlockId { hash: Default::default(), number: 3 }).unwrap(); - assert_eq!(changes_trie_nodes, Some(vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![103] }, vec![0, 1]), + fn prepare_for_build() -> ( + InMemory, + InMemoryStorage, + OverlayedChanges, + ) { + let backend: InMemory<_> = vec![ + (vec![100], vec![255]), + (vec![101], vec![255]), + (vec![102], vec![255]), + (vec![103], vec![255]), + (vec![104], vec![255]), + (vec![105], vec![255]), + ] + .into_iter() + .collect::<::std::collections::HashMap<_, _>>() + .into(); + let storage = InMemoryStorage::with_inputs(vec![ + ( + 1, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 1, + key: vec![100], + }, + vec![1, 3], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 1, + key: vec![101], + }, + vec![0, 2], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 1, + key: vec![105], + }, + vec![0, 2, 4], + ), + ], + ), + ( + 2, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 2, + key: vec![102], + }, + vec![0], + )], + ), + ( + 3, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 3, + key: vec![100], + }, + vec![0], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 3, + key: vec![105], + }, + vec![1], + ), + ], + ), + ( + 4, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 4, + key: vec![100], + }, + vec![0, 2, 3], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 4, + key: vec![101], + }, + vec![1], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 4, + key: vec![103], + }, + vec![0, 1], + ), + InputPair::DigestIndex( + DigestIndex { + block: 4, + key: vec![100], + }, + vec![1, 3], + ), + InputPair::DigestIndex( + DigestIndex { + block: 4, + key: vec![101], + }, + vec![1], + ), + InputPair::DigestIndex( + DigestIndex { + block: 4, + key: vec![102], + }, + vec![2], + ), + InputPair::DigestIndex( + DigestIndex { + block: 4, + key: vec![105], + }, + vec![1, 3], + ), + ], + ), + (5, Vec::new()), + ( + 6, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 6, + key: vec![105], + }, + vec![2], + )], + ), + (7, Vec::new()), + ( + 8, + vec![InputPair::DigestIndex( + DigestIndex { + block: 8, + key: vec![105], + }, + vec![6], + )], + ), + (9, Vec::new()), + (10, Vec::new()), + (11, Vec::new()), + (12, Vec::new()), + (13, Vec::new()), + (14, Vec::new()), + (15, Vec::new()), + ]); + let changes = OverlayedChanges { + prospective: vec![ + ( + vec![100], + OverlayedValue { + value: Some(vec![200]), + extrinsics: Some(vec![0, 2].into_iter().collect()), + }, + ), + ( + vec![103], + OverlayedValue { + value: None, + extrinsics: Some(vec![0, 1].into_iter().collect()), + }, + ), + ] + .into_iter() + .collect(), + committed: vec![ + ( + EXTRINSIC_INDEX.to_vec(), + OverlayedValue { + value: Some(3u32.encode()), + extrinsics: None, + }, + ), + ( + vec![100], + OverlayedValue { + value: Some(vec![202]), + extrinsics: Some(vec![3].into_iter().collect()), + }, + ), + ( + vec![101], + OverlayedValue { + value: Some(vec![203]), + extrinsics: Some(vec![1].into_iter().collect()), + }, + ), + ] + .into_iter() + .collect(), + changes_trie_config: Some(Configuration { + digest_interval: 4, + digest_levels: 2, + }), + }; - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![100] }, vec![1, 3]), - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![101] }, vec![1]), - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![102] }, vec![2]), - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![105] }, vec![1, 3]), - ])); - } + (backend, storage, changes) + } - #[test] - fn build_changes_trie_nodes_on_digest_block_l2() { - let (backend, storage, changes) = prepare_for_build(); - let changes_trie_nodes = prepare_input(&backend, Some(&storage), &changes, &AnchorBlockId { hash: Default::default(), number: 15 }).unwrap(); - assert_eq!(changes_trie_nodes, Some(vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![103] }, vec![0, 1]), + #[test] + fn build_changes_trie_nodes_on_non_digest_block() { + let (backend, storage, changes) = prepare_for_build(); + let changes_trie_nodes = prepare_input( + &backend, + Some(&storage), + &changes, + &AnchorBlockId { + hash: Default::default(), + number: 4, + }, + ) + .unwrap(); + assert_eq!( + changes_trie_nodes, + Some(vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 5, + key: vec![100] + }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 5, + key: vec![101] + }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 5, + key: vec![103] + }, + vec![0, 1] + ), + ]) + ); + } - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![100] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![101] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![102] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![103] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![105] }, vec![4, 8]), - ])); - } + #[test] + fn build_changes_trie_nodes_on_digest_block_l1() { + let (backend, storage, changes) = prepare_for_build(); + let changes_trie_nodes = prepare_input( + &backend, + Some(&storage), + &changes, + &AnchorBlockId { + hash: Default::default(), + number: 3, + }, + ) + .unwrap(); + assert_eq!( + changes_trie_nodes, + Some(vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 4, + key: vec![100] + }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 4, + key: vec![101] + }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 4, + key: vec![103] + }, + vec![0, 1] + ), + InputPair::DigestIndex( + DigestIndex { + block: 4, + key: vec![100] + }, + vec![1, 3] + ), + InputPair::DigestIndex( + DigestIndex { + block: 4, + key: vec![101] + }, + vec![1] + ), + InputPair::DigestIndex( + DigestIndex { + block: 4, + key: vec![102] + }, + vec![2] + ), + InputPair::DigestIndex( + DigestIndex { + block: 4, + key: vec![105] + }, + vec![1, 3] + ), + ]) + ); + } - #[test] - fn build_changes_trie_nodes_ignores_temporary_storage_values() { - let (backend, storage, mut changes) = prepare_for_build(); + #[test] + fn build_changes_trie_nodes_on_digest_block_l2() { + let (backend, storage, changes) = prepare_for_build(); + let changes_trie_nodes = prepare_input( + &backend, + Some(&storage), + &changes, + &AnchorBlockId { + hash: Default::default(), + number: 15, + }, + ) + .unwrap(); + assert_eq!( + changes_trie_nodes, + Some(vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 16, + key: vec![100] + }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 16, + key: vec![101] + }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 16, + key: vec![103] + }, + vec![0, 1] + ), + InputPair::DigestIndex( + DigestIndex { + block: 16, + key: vec![100] + }, + vec![4] + ), + InputPair::DigestIndex( + DigestIndex { + block: 16, + key: vec![101] + }, + vec![4] + ), + InputPair::DigestIndex( + DigestIndex { + block: 16, + key: vec![102] + }, + vec![4] + ), + InputPair::DigestIndex( + DigestIndex { + block: 16, + key: vec![103] + }, + vec![4] + ), + InputPair::DigestIndex( + DigestIndex { + block: 16, + key: vec![105] + }, + vec![4, 8] + ), + ]) + ); + } - // 110: missing from backend, set to None in overlay - changes.prospective.top.insert(vec![110], OverlayedValue { - value: None, - extrinsics: Some(vec![1].into_iter().collect()) - }); + #[test] + fn build_changes_trie_nodes_ignores_temporary_storage_values() { + let (backend, storage, mut changes) = prepare_for_build(); - let changes_trie_nodes = prepare_input(&backend, Some(&storage), &changes, &AnchorBlockId { hash: Default::default(), number: 3 }).unwrap(); - assert_eq!(changes_trie_nodes, Some(vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![103] }, vec![0, 1]), + // 110: missing from backend, set to None in overlay + changes.prospective.top.insert( + vec![110], + OverlayedValue { + value: None, + extrinsics: Some(vec![1].into_iter().collect()), + }, + ); - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![100] }, vec![1, 3]), - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![101] }, vec![1]), - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![102] }, vec![2]), - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![105] }, vec![1, 3]), - ])); - } + let changes_trie_nodes = prepare_input( + &backend, + Some(&storage), + &changes, + &AnchorBlockId { + hash: Default::default(), + number: 3, + }, + ) + .unwrap(); + assert_eq!( + changes_trie_nodes, + Some(vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 4, + key: vec![100] + }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 4, + key: vec![101] + }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 4, + key: vec![103] + }, + vec![0, 1] + ), + InputPair::DigestIndex( + DigestIndex { + block: 4, + key: vec![100] + }, + vec![1, 3] + ), + InputPair::DigestIndex( + DigestIndex { + block: 4, + key: vec![101] + }, + vec![1] + ), + InputPair::DigestIndex( + DigestIndex { + block: 4, + key: vec![102] + }, + vec![2] + ), + InputPair::DigestIndex( + DigestIndex { + block: 4, + key: vec![105] + }, + vec![1, 3] + ), + ]) + ); + } } diff --git a/core/state-machine/src/changes_trie/build_iterator.rs b/core/state-machine/src/changes_trie/build_iterator.rs index f9c6ba6e7b..309f8eebad 100644 --- a/core/state-machine/src/changes_trie/build_iterator.rs +++ b/core/state-machine/src/changes_trie/build_iterator.rs @@ -22,172 +22,281 @@ use crate::changes_trie::Configuration; /// Returns iterator of OTHER blocks that are required for inclusion into /// changes trie of given block. pub fn digest_build_iterator(config: &Configuration, block: u64) -> DigestBuildIterator { - // prepare digest build parameters - let (_, _, digest_step) = match config.digest_level_at_block(block) { - Some((current_level, digest_interval, digest_step)) => - (current_level, digest_interval, digest_step), - None => return DigestBuildIterator::empty(), - }; - - DigestBuildIterator::new(block, config.digest_interval, digest_step) + // prepare digest build parameters + let (_, _, digest_step) = match config.digest_level_at_block(block) { + Some((current_level, digest_interval, digest_step)) => { + (current_level, digest_interval, digest_step) + } + None => return DigestBuildIterator::empty(), + }; + + DigestBuildIterator::new(block, config.digest_interval, digest_step) } /// Changes trie build iterator that returns numbers of OTHER blocks that are /// required for inclusion into changes trie of given block. #[derive(Debug)] pub struct DigestBuildIterator { - /// Block we're building changes trie for. - block: u64, - /// Interval for creation digest blocks. - digest_interval: u64, - /// Step of current blocks range. - current_step: u64, - /// Current blocks range. - current_range: Option<::std::iter::StepBy<::std::ops::Range>>, - /// Max step of blocks range. - max_step: u64, + /// Block we're building changes trie for. + block: u64, + /// Interval for creation digest blocks. + digest_interval: u64, + /// Step of current blocks range. + current_step: u64, + /// Current blocks range. + current_range: Option<::std::iter::StepBy<::std::ops::Range>>, + /// Max step of blocks range. + max_step: u64, } impl DigestBuildIterator { - /// Create new digest build iterator. - pub fn new(block: u64, digest_interval: u64, max_step: u64) -> Self { - DigestBuildIterator { - block, digest_interval, max_step, - current_step: 0, - current_range: None, - } - } - - /// Create empty digest build iterator. - pub fn empty() -> Self { - Self::new(0, 0, 0) - } + /// Create new digest build iterator. + pub fn new(block: u64, digest_interval: u64, max_step: u64) -> Self { + DigestBuildIterator { + block, + digest_interval, + max_step, + current_step: 0, + current_range: None, + } + } + + /// Create empty digest build iterator. + pub fn empty() -> Self { + Self::new(0, 0, 0) + } } impl Iterator for DigestBuildIterator { - type Item = u64; - - fn next(&mut self) -> Option { - if let Some(next) = self.current_range.as_mut().and_then(|iter| iter.next()) { - return Some(next); - } - - // we are safe to use non-checking mul/sub versions here because: - // DigestBuildIterator is created only by internal function that is checking - // that all multiplications/subtractions are safe within max_step limit - - let next_step = if self.current_step == 0 { 1 } else { self.current_step * self.digest_interval }; - if next_step > self.max_step { - return None; - } - - self.current_step = next_step; - self.current_range = Some( - ((self.block - self.current_step * self.digest_interval + self.current_step)..self.block) - .step_by(self.current_step as usize) - ); - - Some(self.current_range.as_mut() - .expect("assigned one line above; qed") - .next() - .expect("X - I^(N+1) + I^N > X when X,I,N are > 1; qed")) - } + type Item = u64; + + fn next(&mut self) -> Option { + if let Some(next) = self.current_range.as_mut().and_then(|iter| iter.next()) { + return Some(next); + } + + // we are safe to use non-checking mul/sub versions here because: + // DigestBuildIterator is created only by internal function that is checking + // that all multiplications/subtractions are safe within max_step limit + + let next_step = if self.current_step == 0 { + 1 + } else { + self.current_step * self.digest_interval + }; + if next_step > self.max_step { + return None; + } + + self.current_step = next_step; + self.current_range = Some( + ((self.block - self.current_step * self.digest_interval + self.current_step) + ..self.block) + .step_by(self.current_step as usize), + ); + + Some( + self.current_range + .as_mut() + .expect("assigned one line above; qed") + .next() + .expect("X - I^(N+1) + I^N > X when X,I,N are > 1; qed"), + ) + } } #[cfg(test)] mod tests { - use super::*; - - fn digest_build_iterator(digest_interval: u64, digest_levels: u32, block: u64) -> DigestBuildIterator { - super::digest_build_iterator(&Configuration { digest_interval, digest_levels }, block) - } - - fn digest_build_iterator_basic(digest_interval: u64, digest_levels: u32, block: u64) -> (u64, u64, u64) { - let iter = digest_build_iterator(digest_interval, digest_levels, block); - (iter.block, iter.digest_interval, iter.max_step) - } - - fn digest_build_iterator_blocks(digest_interval: u64, digest_levels: u32, block: u64) -> Vec { - digest_build_iterator(digest_interval, digest_levels, block).collect() - } - - #[test] - fn suggest_digest_inclusion_returns_empty_iterator() { - let empty = (0, 0, 0); - assert_eq!(digest_build_iterator_basic(4, 16, 0), empty, "block is 0"); - assert_eq!(digest_build_iterator_basic(0, 16, 64), empty, "digest_interval is 0"); - assert_eq!(digest_build_iterator_basic(1, 16, 64), empty, "digest_interval is 1"); - assert_eq!(digest_build_iterator_basic(4, 0, 64), empty, "digest_levels is 0"); - assert_eq!(digest_build_iterator_basic(4, 16, 1), empty, "digest is not required for this block"); - assert_eq!(digest_build_iterator_basic(4, 16, 2), empty, "digest is not required for this block"); - assert_eq!(digest_build_iterator_basic(4, 16, 15), empty, "digest is not required for this block"); - assert_eq!(digest_build_iterator_basic(4, 16, 17), empty, "digest is not required for this block"); - assert_eq!(digest_build_iterator_basic(::std::u64::MAX / 2 + 1, 16, ::std::u64::MAX), empty, "digest_interval * 2 is greater than u64::MAX"); - } - - #[test] - fn suggest_digest_inclusion_returns_level1_iterator() { - assert_eq!(digest_build_iterator_basic(16, 1, 16), (16, 16, 1), "!(block % interval) && first digest level == block"); - assert_eq!(digest_build_iterator_basic(16, 1, 256), (256, 16, 1), "!(block % interval^2), but there's only 1 digest level"); - assert_eq!(digest_build_iterator_basic(16, 2, 32), (32, 16, 1), "second level digest is not required for this block"); - assert_eq!(digest_build_iterator_basic(16, 3, 4080), (4080, 16, 1), "second && third level digest are not required for this block"); - } - - #[test] - fn suggest_digest_inclusion_returns_level2_iterator() { - assert_eq!(digest_build_iterator_basic(16, 2, 256), (256, 16, 16), "second level digest"); - assert_eq!(digest_build_iterator_basic(16, 2, 4096), (4096, 16, 16), "!(block % interval^3), but there's only 2 digest levels"); - } - - #[test] - fn suggest_digest_inclusion_returns_level3_iterator() { - assert_eq!(digest_build_iterator_basic(16, 3, 4096), (4096, 16, 256), "third level digest: beginning"); - assert_eq!(digest_build_iterator_basic(16, 3, 8192), (8192, 16, 256), "third level digest: next"); - } - - #[test] - fn digest_iterator_returns_level1_blocks() { - assert_eq!(digest_build_iterator_blocks(16, 1, 16), - vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - assert_eq!(digest_build_iterator_blocks(16, 1, 256), - vec![241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255]); - assert_eq!(digest_build_iterator_blocks(16, 2, 32), - vec![17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]); - assert_eq!(digest_build_iterator_blocks(16, 3, 4080), - vec![4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079]); - } - - #[test] - fn digest_iterator_returns_level1_and_level2_blocks() { - assert_eq!(digest_build_iterator_blocks(16, 2, 256), - vec![ - // level2 is a level1 digest of 16-1 previous blocks: - 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, - // level2 points to previous 16-1 level1 digests: - 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, - ], - ); - assert_eq!(digest_build_iterator_blocks(16, 2, 4096), - vec![ - // level2 is a level1 digest of 16-1 previous blocks: - 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, - // level2 points to previous 16-1 level1 digests: - 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, 4064, 4080, - ], - ); - } - - #[test] - fn digest_iterator_returns_level1_and_level2_and_level3_blocks() { - assert_eq!(digest_build_iterator_blocks(16, 3, 4096), - vec![ - // level3 is a level1 digest of 16-1 previous blocks: - 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, - // level3 points to previous 16-1 level1 digests: - 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, 4064, 4080, - // level3 points to previous 16-1 level2 digests: - 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, 3840, - ], - ); - } + use super::*; + + fn digest_build_iterator( + digest_interval: u64, + digest_levels: u32, + block: u64, + ) -> DigestBuildIterator { + super::digest_build_iterator( + &Configuration { + digest_interval, + digest_levels, + }, + block, + ) + } + + fn digest_build_iterator_basic( + digest_interval: u64, + digest_levels: u32, + block: u64, + ) -> (u64, u64, u64) { + let iter = digest_build_iterator(digest_interval, digest_levels, block); + (iter.block, iter.digest_interval, iter.max_step) + } + + fn digest_build_iterator_blocks( + digest_interval: u64, + digest_levels: u32, + block: u64, + ) -> Vec { + digest_build_iterator(digest_interval, digest_levels, block).collect() + } + + #[test] + fn suggest_digest_inclusion_returns_empty_iterator() { + let empty = (0, 0, 0); + assert_eq!(digest_build_iterator_basic(4, 16, 0), empty, "block is 0"); + assert_eq!( + digest_build_iterator_basic(0, 16, 64), + empty, + "digest_interval is 0" + ); + assert_eq!( + digest_build_iterator_basic(1, 16, 64), + empty, + "digest_interval is 1" + ); + assert_eq!( + digest_build_iterator_basic(4, 0, 64), + empty, + "digest_levels is 0" + ); + assert_eq!( + digest_build_iterator_basic(4, 16, 1), + empty, + "digest is not required for this block" + ); + assert_eq!( + digest_build_iterator_basic(4, 16, 2), + empty, + "digest is not required for this block" + ); + assert_eq!( + digest_build_iterator_basic(4, 16, 15), + empty, + "digest is not required for this block" + ); + assert_eq!( + digest_build_iterator_basic(4, 16, 17), + empty, + "digest is not required for this block" + ); + assert_eq!( + digest_build_iterator_basic(::std::u64::MAX / 2 + 1, 16, ::std::u64::MAX), + empty, + "digest_interval * 2 is greater than u64::MAX" + ); + } + + #[test] + fn suggest_digest_inclusion_returns_level1_iterator() { + assert_eq!( + digest_build_iterator_basic(16, 1, 16), + (16, 16, 1), + "!(block % interval) && first digest level == block" + ); + assert_eq!( + digest_build_iterator_basic(16, 1, 256), + (256, 16, 1), + "!(block % interval^2), but there's only 1 digest level" + ); + assert_eq!( + digest_build_iterator_basic(16, 2, 32), + (32, 16, 1), + "second level digest is not required for this block" + ); + assert_eq!( + digest_build_iterator_basic(16, 3, 4080), + (4080, 16, 1), + "second && third level digest are not required for this block" + ); + } + + #[test] + fn suggest_digest_inclusion_returns_level2_iterator() { + assert_eq!( + digest_build_iterator_basic(16, 2, 256), + (256, 16, 16), + "second level digest" + ); + assert_eq!( + digest_build_iterator_basic(16, 2, 4096), + (4096, 16, 16), + "!(block % interval^3), but there's only 2 digest levels" + ); + } + + #[test] + fn suggest_digest_inclusion_returns_level3_iterator() { + assert_eq!( + digest_build_iterator_basic(16, 3, 4096), + (4096, 16, 256), + "third level digest: beginning" + ); + assert_eq!( + digest_build_iterator_basic(16, 3, 8192), + (8192, 16, 256), + "third level digest: next" + ); + } + + #[test] + fn digest_iterator_returns_level1_blocks() { + assert_eq!( + digest_build_iterator_blocks(16, 1, 16), + vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ); + assert_eq!( + digest_build_iterator_blocks(16, 1, 256), + vec![241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255] + ); + assert_eq!( + digest_build_iterator_blocks(16, 2, 32), + vec![17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] + ); + assert_eq!( + digest_build_iterator_blocks(16, 3, 4080), + vec![ + 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, + 4079 + ] + ); + } + + #[test] + fn digest_iterator_returns_level1_and_level2_blocks() { + assert_eq!( + digest_build_iterator_blocks(16, 2, 256), + vec![ + // level2 is a level1 digest of 16-1 previous blocks: + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, + // level2 points to previous 16-1 level1 digests: + 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, + ], + ); + assert_eq!( + digest_build_iterator_blocks(16, 2, 4096), + vec![ + // level2 is a level1 digest of 16-1 previous blocks: + 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, + 4095, // level2 points to previous 16-1 level1 digests: + 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, 4064, + 4080, + ], + ); + } + + #[test] + fn digest_iterator_returns_level1_and_level2_and_level3_blocks() { + assert_eq!( + digest_build_iterator_blocks(16, 3, 4096), + vec![ + // level3 is a level1 digest of 16-1 previous blocks: + 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, + 4095, // level3 points to previous 16-1 level1 digests: + 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, 4064, + 4080, // level3 points to previous 16-1 level2 digests: + 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, + 3840, + ], + ); + } } diff --git a/core/state-machine/src/changes_trie/changes_iterator.rs b/core/state-machine/src/changes_trie/changes_iterator.rs index ad70117984..c038adb80f 100644 --- a/core/state-machine/src/changes_trie/changes_iterator.rs +++ b/core/state-machine/src/changes_trie/changes_iterator.rs @@ -17,493 +17,688 @@ //! Functions + iterator that traverses changes tries and returns all //! (block, extrinsic) pairs where given key has been changed. -use std::cell::RefCell; -use std::collections::VecDeque; -use parity_codec::{Decode, Encode}; -use hash_db::{HashDB, Hasher}; -use heapsize::HeapSizeOf; -use trie::{Recorder, MemoryDB}; +use crate::changes_trie::input::{ + DigestIndex, DigestIndexValue, ExtrinsicIndex, ExtrinsicIndexValue, +}; +use crate::changes_trie::storage::{InMemoryStorage, TrieBackendAdapter}; use crate::changes_trie::{AnchorBlockId, Configuration, RootsStorage, Storage}; -use crate::changes_trie::input::{DigestIndex, ExtrinsicIndex, DigestIndexValue, ExtrinsicIndexValue}; -use crate::changes_trie::storage::{TrieBackendAdapter, InMemoryStorage}; use crate::proving_backend::ProvingBackendEssence; -use crate::trie_backend_essence::{TrieBackendEssence}; +use crate::trie_backend_essence::TrieBackendEssence; +use hash_db::{HashDB, Hasher}; +use heapsize::HeapSizeOf; +use parity_codec::{Decode, Encode}; +use std::cell::RefCell; +use std::collections::VecDeque; +use trie::{MemoryDB, Recorder}; /// Return changes of given key at given blocks range. /// `max` is the number of best known block. /// Changes are returned in descending order (i.e. last block comes first). pub fn key_changes<'a, S: Storage, H: Hasher>( - config: &'a Configuration, - storage: &'a S, - begin: u64, - end: &'a AnchorBlockId, - max: u64, - key: &'a [u8], -) -> Result, String> where H::Out: HeapSizeOf { - // we can't query any roots before root - let max = ::std::cmp::min(max, end.number); - - Ok(DrilldownIterator { - essence: DrilldownIteratorEssence { - key, - roots_storage: storage, - storage, - begin, - end, - surface: surface_iterator(config, max, begin, end.number)?, - - extrinsics: Default::default(), - blocks: Default::default(), - - _hasher: ::std::marker::PhantomData::::default(), - }, - }) + config: &'a Configuration, + storage: &'a S, + begin: u64, + end: &'a AnchorBlockId, + max: u64, + key: &'a [u8], +) -> Result, String> +where + H::Out: HeapSizeOf, +{ + // we can't query any roots before root + let max = ::std::cmp::min(max, end.number); + + Ok(DrilldownIterator { + essence: DrilldownIteratorEssence { + key, + roots_storage: storage, + storage, + begin, + end, + surface: surface_iterator(config, max, begin, end.number)?, + + extrinsics: Default::default(), + blocks: Default::default(), + + _hasher: ::std::marker::PhantomData::::default(), + }, + }) } /// Returns proof of changes of given key at given blocks range. /// `max` is the number of best known block. pub fn key_changes_proof, H: Hasher>( - config: &Configuration, - storage: &S, - begin: u64, - end: &AnchorBlockId, - max: u64, - key: &[u8], -) -> Result>, String> where H::Out: HeapSizeOf { - // we can't query any roots before root - let max = ::std::cmp::min(max, end.number); - - let mut iter = ProvingDrilldownIterator { - essence: DrilldownIteratorEssence { - key, - roots_storage: storage.clone(), - storage, - begin, - end, - surface: surface_iterator(config, max, begin, end.number)?, - - extrinsics: Default::default(), - blocks: Default::default(), - - _hasher: ::std::marker::PhantomData::::default(), - }, - proof_recorder: Default::default(), - }; - - // iterate to collect proof - while let Some(item) = iter.next() { - item?; - } - - Ok(iter.extract_proof()) + config: &Configuration, + storage: &S, + begin: u64, + end: &AnchorBlockId, + max: u64, + key: &[u8], +) -> Result>, String> +where + H::Out: HeapSizeOf, +{ + // we can't query any roots before root + let max = ::std::cmp::min(max, end.number); + + let mut iter = ProvingDrilldownIterator { + essence: DrilldownIteratorEssence { + key, + roots_storage: storage.clone(), + storage, + begin, + end, + surface: surface_iterator(config, max, begin, end.number)?, + + extrinsics: Default::default(), + blocks: Default::default(), + + _hasher: ::std::marker::PhantomData::::default(), + }, + proof_recorder: Default::default(), + }; + + // iterate to collect proof + while let Some(item) = iter.next() { + item?; + } + + Ok(iter.extract_proof()) } /// Check key changes proog and return changes of the key at given blocks range. /// `max` is the number of best known block. /// Changes are returned in descending order (i.e. last block comes first). pub fn key_changes_proof_check, H: Hasher>( - config: &Configuration, - roots_storage: &S, - proof: Vec>, - begin: u64, - end: &AnchorBlockId, - max: u64, - key: &[u8] -) -> Result, String> where H::Out: HeapSizeOf { - // we can't query any roots before root - let max = ::std::cmp::min(max, end.number); - - let mut proof_db = MemoryDB::::default(); - for item in proof { - proof_db.insert(&[], &item); - } - - let proof_db = InMemoryStorage::with_db(proof_db); - DrilldownIterator { - essence: DrilldownIteratorEssence { - key, - roots_storage, - storage: &proof_db, - begin, - end, - surface: surface_iterator(config, max, begin, end.number)?, - - extrinsics: Default::default(), - blocks: Default::default(), - - _hasher: ::std::marker::PhantomData::::default(), - }, - }.collect() + config: &Configuration, + roots_storage: &S, + proof: Vec>, + begin: u64, + end: &AnchorBlockId, + max: u64, + key: &[u8], +) -> Result, String> +where + H::Out: HeapSizeOf, +{ + // we can't query any roots before root + let max = ::std::cmp::min(max, end.number); + + let mut proof_db = MemoryDB::::default(); + for item in proof { + proof_db.insert(&[], &item); + } + + let proof_db = InMemoryStorage::with_db(proof_db); + DrilldownIterator { + essence: DrilldownIteratorEssence { + key, + roots_storage, + storage: &proof_db, + begin, + end, + surface: surface_iterator(config, max, begin, end.number)?, + + extrinsics: Default::default(), + blocks: Default::default(), + + _hasher: ::std::marker::PhantomData::::default(), + }, + } + .collect() } /// Surface iterator - only traverses top-level digests from given range and tries to find /// all digest changes for the key. pub struct SurfaceIterator<'a> { - config: &'a Configuration, - begin: u64, - max: u64, - current: Option, - current_begin: u64, - digest_step: u64, - digest_level: u32, + config: &'a Configuration, + begin: u64, + max: u64, + current: Option, + current_begin: u64, + digest_step: u64, + digest_level: u32, } impl<'a> Iterator for SurfaceIterator<'a> { - type Item = Result<(u64, u32), String>; - - fn next(&mut self) -> Option { - let current = self.current?; - let digest_level = self.digest_level; - - if current < self.digest_step { - self.current = None; - } - else { - let next = current - self.digest_step; - if next == 0 || next < self.begin { - self.current = None; - } - else if next > self.current_begin { - self.current = Some(next); - } else { - let (current, current_begin, digest_step, digest_level) = match - lower_bound_max_digest(self.config, self.max, self.begin, next) { - Err(err) => return Some(Err(err)), - Ok(range) => range, - }; - - self.current = Some(current); - self.current_begin = current_begin; - self.digest_step = digest_step; - self.digest_level = digest_level; - } - } - - Some(Ok((current, digest_level))) - } + type Item = Result<(u64, u32), String>; + + fn next(&mut self) -> Option { + let current = self.current?; + let digest_level = self.digest_level; + + if current < self.digest_step { + self.current = None; + } else { + let next = current - self.digest_step; + if next == 0 || next < self.begin { + self.current = None; + } else if next > self.current_begin { + self.current = Some(next); + } else { + let (current, current_begin, digest_step, digest_level) = + match lower_bound_max_digest(self.config, self.max, self.begin, next) { + Err(err) => return Some(Err(err)), + Ok(range) => range, + }; + + self.current = Some(current); + self.current_begin = current_begin; + self.digest_step = digest_step; + self.digest_level = digest_level; + } + } + + Some(Ok((current, digest_level))) + } } /// Drilldown iterator - receives 'digest points' from surface iterator and explores /// every point until extrinsic is found. -pub struct DrilldownIteratorEssence<'a, RS: 'a + RootsStorage, S: 'a + Storage, H: Hasher> where H::Out: 'a { - key: &'a [u8], - roots_storage: &'a RS, - storage: &'a S, - begin: u64, - end: &'a AnchorBlockId, - surface: SurfaceIterator<'a>, - - extrinsics: VecDeque<(u64, u32)>, - blocks: VecDeque<(u64, u32)>, - - _hasher: ::std::marker::PhantomData, +pub struct DrilldownIteratorEssence<'a, RS: 'a + RootsStorage, S: 'a + Storage, H: Hasher> +where + H::Out: 'a, +{ + key: &'a [u8], + roots_storage: &'a RS, + storage: &'a S, + begin: u64, + end: &'a AnchorBlockId, + surface: SurfaceIterator<'a>, + + extrinsics: VecDeque<(u64, u32)>, + blocks: VecDeque<(u64, u32)>, + + _hasher: ::std::marker::PhantomData, } -impl<'a, RS: 'a + RootsStorage, S: Storage, H: Hasher> DrilldownIteratorEssence<'a, RS, S, H> { - pub fn next(&mut self, trie_reader: F) -> Option> - where - F: FnMut(&S, H::Out, &[u8]) -> Result>, String>, - { - match self.do_next(trie_reader) { - Ok(Some(res)) => Some(Ok(res)), - Ok(None) => None, - Err(err) => Some(Err(err)), - } - } - - fn do_next(&mut self, mut trie_reader: F) -> Result, String> - where - F: FnMut(&S, H::Out, &[u8]) -> Result>, String>, - { - loop { - if let Some((block, extrinsic)) = self.extrinsics.pop_front() { - return Ok(Some((block, extrinsic))); - } - - if let Some((block, level)) = self.blocks.pop_front() { - // not having a changes trie root is an error because: - // we never query roots for future blocks - // AND trie roots for old blocks are known (both on full + light node) - let trie_root = self.roots_storage.root(&self.end, block)? - .ok_or_else(|| format!("Changes trie root for block {} is not found", block))?; - - // only return extrinsics for blocks before self.max - // most of blocks will be filtered out beore pushing to `self.blocks` - // here we just throwing away changes at digest blocks we're processing - debug_assert!(block >= self.begin, "We shall not touch digests earlier than a range' begin"); - if block <= self.end.number { - let extrinsics_key = ExtrinsicIndex { block, key: self.key.to_vec() }.encode(); - let extrinsics = trie_reader(&self.storage, trie_root, &extrinsics_key); - if let Some(extrinsics) = extrinsics? { - let extrinsics: Option = Decode::decode(&mut &extrinsics[..]); - if let Some(extrinsics) = extrinsics { - self.extrinsics.extend(extrinsics.into_iter().rev().map(|e| (block, e))); - } - } - } - - let blocks_key = DigestIndex { block, key: self.key.to_vec() }.encode(); - let blocks = trie_reader(&self.storage, trie_root, &blocks_key); - if let Some(blocks) = blocks? { - let blocks: Option = Decode::decode(&mut &blocks[..]); - if let Some(blocks) = blocks { - // filter level0 blocks here because we tend to use digest blocks, - // AND digest block changes could also include changes for out-of-range blocks - let begin = self.begin; - let end = self.end.number; - self.blocks.extend(blocks.into_iter() - .rev() - .filter(|b| level > 1 || (*b >= begin && *b <= end)) - .map(|b| (b, level - 1)) - ); - } - } - - continue; - } - - match self.surface.next() { - Some(Ok(block)) => self.blocks.push_back(block), - Some(Err(err)) => return Err(err), - None => return Ok(None), - } - } - } +impl<'a, RS: 'a + RootsStorage, S: Storage, H: Hasher> + DrilldownIteratorEssence<'a, RS, S, H> +{ + pub fn next(&mut self, trie_reader: F) -> Option> + where + F: FnMut(&S, H::Out, &[u8]) -> Result>, String>, + { + match self.do_next(trie_reader) { + Ok(Some(res)) => Some(Ok(res)), + Ok(None) => None, + Err(err) => Some(Err(err)), + } + } + + fn do_next(&mut self, mut trie_reader: F) -> Result, String> + where + F: FnMut(&S, H::Out, &[u8]) -> Result>, String>, + { + loop { + if let Some((block, extrinsic)) = self.extrinsics.pop_front() { + return Ok(Some((block, extrinsic))); + } + + if let Some((block, level)) = self.blocks.pop_front() { + // not having a changes trie root is an error because: + // we never query roots for future blocks + // AND trie roots for old blocks are known (both on full + light node) + let trie_root = self + .roots_storage + .root(&self.end, block)? + .ok_or_else(|| format!("Changes trie root for block {} is not found", block))?; + + // only return extrinsics for blocks before self.max + // most of blocks will be filtered out beore pushing to `self.blocks` + // here we just throwing away changes at digest blocks we're processing + debug_assert!( + block >= self.begin, + "We shall not touch digests earlier than a range' begin" + ); + if block <= self.end.number { + let extrinsics_key = ExtrinsicIndex { + block, + key: self.key.to_vec(), + } + .encode(); + let extrinsics = trie_reader(&self.storage, trie_root, &extrinsics_key); + if let Some(extrinsics) = extrinsics? { + let extrinsics: Option = + Decode::decode(&mut &extrinsics[..]); + if let Some(extrinsics) = extrinsics { + self.extrinsics + .extend(extrinsics.into_iter().rev().map(|e| (block, e))); + } + } + } + + let blocks_key = DigestIndex { + block, + key: self.key.to_vec(), + } + .encode(); + let blocks = trie_reader(&self.storage, trie_root, &blocks_key); + if let Some(blocks) = blocks? { + let blocks: Option = Decode::decode(&mut &blocks[..]); + if let Some(blocks) = blocks { + // filter level0 blocks here because we tend to use digest blocks, + // AND digest block changes could also include changes for out-of-range blocks + let begin = self.begin; + let end = self.end.number; + self.blocks.extend( + blocks + .into_iter() + .rev() + .filter(|b| level > 1 || (*b >= begin && *b <= end)) + .map(|b| (b, level - 1)), + ); + } + } + + continue; + } + + match self.surface.next() { + Some(Ok(block)) => self.blocks.push_back(block), + Some(Err(err)) => return Err(err), + None => return Ok(None), + } + } + } } /// Exploring drilldown operator. -pub struct DrilldownIterator<'a, RS: 'a + RootsStorage, S: 'a + Storage, H: Hasher> where H::Out: 'a { - essence: DrilldownIteratorEssence<'a, RS, S, H>, +pub struct DrilldownIterator<'a, RS: 'a + RootsStorage, S: 'a + Storage, H: Hasher> +where + H::Out: 'a, +{ + essence: DrilldownIteratorEssence<'a, RS, S, H>, } impl<'a, RS: 'a + RootsStorage, S: Storage, H: Hasher> Iterator - for DrilldownIterator<'a, RS, S, H> - where H::Out: HeapSizeOf + for DrilldownIterator<'a, RS, S, H> +where + H::Out: HeapSizeOf, { - type Item = Result<(u64, u32), String>; + type Item = Result<(u64, u32), String>; - fn next(&mut self) -> Option { - self.essence.next(|storage, root, key| - TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root).storage(key)) - } + fn next(&mut self) -> Option { + self.essence.next(|storage, root, key| { + TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root).storage(key) + }) + } } /// Proving drilldown iterator. -struct ProvingDrilldownIterator<'a, RS: 'a + RootsStorage, S: 'a + Storage, H: Hasher> where H::Out: 'a { - essence: DrilldownIteratorEssence<'a, RS, S, H>, - proof_recorder: RefCell>, +struct ProvingDrilldownIterator<'a, RS: 'a + RootsStorage, S: 'a + Storage, H: Hasher> +where + H::Out: 'a, +{ + essence: DrilldownIteratorEssence<'a, RS, S, H>, + proof_recorder: RefCell>, } -impl<'a, RS: 'a + RootsStorage, S: Storage, H: Hasher> ProvingDrilldownIterator<'a, RS, S, H> { - /// Consume the iterator, extracting the gathered proof in lexicographical order - /// by value. - pub fn extract_proof(self) -> Vec> { - self.proof_recorder.into_inner().drain() - .into_iter() - .map(|n| n.data.to_vec()) - .collect() - } +impl<'a, RS: 'a + RootsStorage, S: Storage, H: Hasher> + ProvingDrilldownIterator<'a, RS, S, H> +{ + /// Consume the iterator, extracting the gathered proof in lexicographical order + /// by value. + pub fn extract_proof(self) -> Vec> { + self.proof_recorder + .into_inner() + .drain() + .into_iter() + .map(|n| n.data.to_vec()) + .collect() + } } -impl<'a, RS: 'a + RootsStorage, S: Storage, H: Hasher> Iterator for ProvingDrilldownIterator<'a, RS, S, H> where H::Out: HeapSizeOf { - type Item = Result<(u64, u32), String>; - - fn next(&mut self) -> Option { - let proof_recorder = &mut *self.proof_recorder.try_borrow_mut() - .expect("only fails when already borrowed; storage() is non-reentrant; qed"); - self.essence.next(|storage, root, key| - ProvingBackendEssence::<_, H> { - backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), - proof_recorder, - }.storage(key)) - } +impl<'a, RS: 'a + RootsStorage, S: Storage, H: Hasher> Iterator + for ProvingDrilldownIterator<'a, RS, S, H> +where + H::Out: HeapSizeOf, +{ + type Item = Result<(u64, u32), String>; + + fn next(&mut self) -> Option { + let proof_recorder = &mut *self + .proof_recorder + .try_borrow_mut() + .expect("only fails when already borrowed; storage() is non-reentrant; qed"); + self.essence.next(|storage, root, key| { + ProvingBackendEssence::<_, H> { + backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), + proof_recorder, + } + .storage(key) + }) + } } /// Returns surface iterator for given range of blocks. -fn surface_iterator<'a>(config: &'a Configuration, max: u64, begin: u64, end: u64) -> Result, String> { - let (current, current_begin, digest_step, digest_level) = lower_bound_max_digest(config, max, begin, end)?; - Ok(SurfaceIterator { - config, - begin, - max, - current: Some(current), - current_begin, - digest_step, - digest_level, - }) +fn surface_iterator<'a>( + config: &'a Configuration, + max: u64, + begin: u64, + end: u64, +) -> Result, String> { + let (current, current_begin, digest_step, digest_level) = + lower_bound_max_digest(config, max, begin, end)?; + Ok(SurfaceIterator { + config, + begin, + max, + current: Some(current), + current_begin, + digest_step, + digest_level, + }) } /// Returns parameters of highest level digest block that includes the end of given range /// and tends to include the whole range. fn lower_bound_max_digest( - config: &Configuration, - max: u64, - begin: u64, - end: u64, + config: &Configuration, + max: u64, + begin: u64, + end: u64, ) -> Result<(u64, u64, u64, u32), String> { - if end > max || begin > end { - return Err("invalid changes range".into()); - } - - let mut digest_level = 0u32; - let mut digest_step = 1u64; - let mut digest_interval = 0u64; - let mut current = end; - let mut current_begin = begin; - if begin != end { - while digest_level != config.digest_levels { - let new_digest_level = digest_level + 1; - let new_digest_step = digest_step * config.digest_interval; - let new_digest_interval = config.digest_interval * { - if digest_interval == 0 { 1 } else { digest_interval } - }; - let new_digest_begin = ((current - 1) / new_digest_interval) * new_digest_interval; - let new_digest_end = new_digest_begin + new_digest_interval; - let new_current = new_digest_begin + new_digest_interval; - - if new_digest_end > max { - if begin < new_digest_begin { - current_begin = new_digest_begin; - } - break; - } - - digest_level = new_digest_level; - digest_step = new_digest_step; - digest_interval = new_digest_interval; - current = new_current; - current_begin = new_digest_begin; - - if new_digest_begin <= begin && new_digest_end >= end { - break; - } - } - } - - Ok(( - current, - current_begin, - digest_step, - digest_level, - )) + if end > max || begin > end { + return Err("invalid changes range".into()); + } + + let mut digest_level = 0u32; + let mut digest_step = 1u64; + let mut digest_interval = 0u64; + let mut current = end; + let mut current_begin = begin; + if begin != end { + while digest_level != config.digest_levels { + let new_digest_level = digest_level + 1; + let new_digest_step = digest_step * config.digest_interval; + let new_digest_interval = config.digest_interval * { + if digest_interval == 0 { + 1 + } else { + digest_interval + } + }; + let new_digest_begin = ((current - 1) / new_digest_interval) * new_digest_interval; + let new_digest_end = new_digest_begin + new_digest_interval; + let new_current = new_digest_begin + new_digest_interval; + + if new_digest_end > max { + if begin < new_digest_begin { + current_begin = new_digest_begin; + } + break; + } + + digest_level = new_digest_level; + digest_step = new_digest_step; + digest_interval = new_digest_interval; + current = new_current; + current_begin = new_digest_begin; + + if new_digest_begin <= begin && new_digest_end >= end { + break; + } + } + } + + Ok((current, current_begin, digest_step, digest_level)) } #[cfg(test)] mod tests { - use std::iter::FromIterator; - use primitives::Blake2Hasher; - use crate::changes_trie::input::InputPair; - use crate::changes_trie::storage::InMemoryStorage; - use super::*; - - fn prepare_for_drilldown() -> (Configuration, InMemoryStorage) { - let config = Configuration { digest_interval: 4, digest_levels: 2 }; - let backend = InMemoryStorage::with_inputs(vec![ - // digest: 1..4 => [(3, 0)] - (1, vec![]), - (2, vec![]), - (3, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 3, key: vec![42] }, vec![0]), - ]), - (4, vec![ - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![42] }, vec![3]), - ]), - // digest: 5..8 => [(6, 3), (8, 1+2)] - (5, vec![]), - (6, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 6, key: vec![42] }, vec![3]), - ]), - (7, vec![]), - (8, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 8, key: vec![42] }, vec![1, 2]), - InputPair::DigestIndex(DigestIndex { block: 8, key: vec![42] }, vec![6]), - ]), - // digest: 9..12 => [] - (9, vec![]), - (10, vec![]), - (11, vec![]), - (12, vec![]), - // digest: 0..16 => [4, 8] - (13, vec![]), - (14, vec![]), - (15, vec![]), - (16, vec![ - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![42] }, vec![4, 8]), - ]), - ]); - - (config, backend) - } - - #[test] - fn drilldown_iterator_works() { - let (config, storage) = prepare_for_drilldown(); - let drilldown_result = key_changes::, Blake2Hasher>( - &config, &storage, 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]) - .and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); - - let drilldown_result = key_changes::, Blake2Hasher>( - &config, &storage, 0, &AnchorBlockId { hash: Default::default(), number: 2 }, 4, &[42]) - .and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![])); - - let drilldown_result = key_changes::, Blake2Hasher>( - &config, &storage, 0, &AnchorBlockId { hash: Default::default(), number: 3 }, 4, &[42]) - .and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(3, 0)])); - - let drilldown_result = key_changes::, Blake2Hasher>( - &config, &storage, 7, &AnchorBlockId { hash: Default::default(), number: 8 }, 8, &[42]) - .and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1)])); - - let drilldown_result = key_changes::, Blake2Hasher>( - &config, &storage, 5, &AnchorBlockId { hash: Default::default(), number: 7 }, 8, &[42]) - .and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(6, 3)])); - } - - #[test] - fn drilldown_iterator_fails_when_storage_fails() { - let (config, storage) = prepare_for_drilldown(); - storage.clear_storage(); - - assert!(key_changes::, Blake2Hasher>( - &config, &storage, 0, &AnchorBlockId { hash: Default::default(), number: 100 }, 1000, &[42]) - .and_then(|i| i.collect::, _>>()).is_err()); - } - - #[test] - fn drilldown_iterator_fails_when_range_is_invalid() { - let (config, storage) = prepare_for_drilldown(); - assert!(key_changes::, Blake2Hasher>( - &config, &storage, 0, &AnchorBlockId { hash: Default::default(), number: 100 }, 50, &[42]).is_err()); - assert!(key_changes::, Blake2Hasher>( - &config, &storage, 20, &AnchorBlockId { hash: Default::default(), number: 10 }, 100, &[42]).is_err()); - } - - - #[test] - fn proving_drilldown_iterator_works() { - // happens on remote full node: - - // create drilldown iterator that records all trie nodes during drilldown - let (remote_config, remote_storage) = prepare_for_drilldown(); - let remote_proof = key_changes_proof::, Blake2Hasher>( - &remote_config, &remote_storage, - 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]).unwrap(); - - // happens on local light node: - - // create drilldown iterator that works the same, but only depends on trie - let (local_config, local_storage) = prepare_for_drilldown(); - local_storage.clear_storage(); - let local_result = key_changes_proof_check::, Blake2Hasher>( - &local_config, &local_storage, remote_proof, - 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]); - - // check that drilldown result is the same as if it was happening at the full node - assert_eq!(local_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); - } + use super::*; + use crate::changes_trie::input::InputPair; + use crate::changes_trie::storage::InMemoryStorage; + use primitives::Blake2Hasher; + use std::iter::FromIterator; + + fn prepare_for_drilldown() -> (Configuration, InMemoryStorage) { + let config = Configuration { + digest_interval: 4, + digest_levels: 2, + }; + let backend = InMemoryStorage::with_inputs(vec![ + // digest: 1..4 => [(3, 0)] + (1, vec![]), + (2, vec![]), + ( + 3, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 3, + key: vec![42], + }, + vec![0], + )], + ), + ( + 4, + vec![InputPair::DigestIndex( + DigestIndex { + block: 4, + key: vec![42], + }, + vec![3], + )], + ), + // digest: 5..8 => [(6, 3), (8, 1+2)] + (5, vec![]), + ( + 6, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 6, + key: vec![42], + }, + vec![3], + )], + ), + (7, vec![]), + ( + 8, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 8, + key: vec![42], + }, + vec![1, 2], + ), + InputPair::DigestIndex( + DigestIndex { + block: 8, + key: vec![42], + }, + vec![6], + ), + ], + ), + // digest: 9..12 => [] + (9, vec![]), + (10, vec![]), + (11, vec![]), + (12, vec![]), + // digest: 0..16 => [4, 8] + (13, vec![]), + (14, vec![]), + (15, vec![]), + ( + 16, + vec![InputPair::DigestIndex( + DigestIndex { + block: 16, + key: vec![42], + }, + vec![4, 8], + )], + ), + ]); + + (config, backend) + } + + #[test] + fn drilldown_iterator_works() { + let (config, storage) = prepare_for_drilldown(); + let drilldown_result = key_changes::, Blake2Hasher>( + &config, + &storage, + 0, + &AnchorBlockId { + hash: Default::default(), + number: 16, + }, + 16, + &[42], + ) + .and_then(Result::from_iter); + assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); + + let drilldown_result = key_changes::, Blake2Hasher>( + &config, + &storage, + 0, + &AnchorBlockId { + hash: Default::default(), + number: 2, + }, + 4, + &[42], + ) + .and_then(Result::from_iter); + assert_eq!(drilldown_result, Ok(vec![])); + + let drilldown_result = key_changes::, Blake2Hasher>( + &config, + &storage, + 0, + &AnchorBlockId { + hash: Default::default(), + number: 3, + }, + 4, + &[42], + ) + .and_then(Result::from_iter); + assert_eq!(drilldown_result, Ok(vec![(3, 0)])); + + let drilldown_result = key_changes::, Blake2Hasher>( + &config, + &storage, + 7, + &AnchorBlockId { + hash: Default::default(), + number: 8, + }, + 8, + &[42], + ) + .and_then(Result::from_iter); + assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1)])); + + let drilldown_result = key_changes::, Blake2Hasher>( + &config, + &storage, + 5, + &AnchorBlockId { + hash: Default::default(), + number: 7, + }, + 8, + &[42], + ) + .and_then(Result::from_iter); + assert_eq!(drilldown_result, Ok(vec![(6, 3)])); + } + + #[test] + fn drilldown_iterator_fails_when_storage_fails() { + let (config, storage) = prepare_for_drilldown(); + storage.clear_storage(); + + assert!(key_changes::, Blake2Hasher>( + &config, + &storage, + 0, + &AnchorBlockId { + hash: Default::default(), + number: 100 + }, + 1000, + &[42] + ) + .and_then(|i| i.collect::, _>>()) + .is_err()); + } + + #[test] + fn drilldown_iterator_fails_when_range_is_invalid() { + let (config, storage) = prepare_for_drilldown(); + assert!(key_changes::, Blake2Hasher>( + &config, + &storage, + 0, + &AnchorBlockId { + hash: Default::default(), + number: 100 + }, + 50, + &[42] + ) + .is_err()); + assert!(key_changes::, Blake2Hasher>( + &config, + &storage, + 20, + &AnchorBlockId { + hash: Default::default(), + number: 10 + }, + 100, + &[42] + ) + .is_err()); + } + + #[test] + fn proving_drilldown_iterator_works() { + // happens on remote full node: + + // create drilldown iterator that records all trie nodes during drilldown + let (remote_config, remote_storage) = prepare_for_drilldown(); + let remote_proof = key_changes_proof::, Blake2Hasher>( + &remote_config, + &remote_storage, + 0, + &AnchorBlockId { + hash: Default::default(), + number: 16, + }, + 16, + &[42], + ) + .unwrap(); + + // happens on local light node: + + // create drilldown iterator that works the same, but only depends on trie + let (local_config, local_storage) = prepare_for_drilldown(); + local_storage.clear_storage(); + let local_result = key_changes_proof_check::, Blake2Hasher>( + &local_config, + &local_storage, + remote_proof, + 0, + &AnchorBlockId { + hash: Default::default(), + number: 16, + }, + 16, + &[42], + ); + + // check that drilldown result is the same as if it was happening at the full node + assert_eq!(local_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); + } } diff --git a/core/state-machine/src/changes_trie/input.rs b/core/state-machine/src/changes_trie/input.rs index 3154aff715..6dde7559c6 100644 --- a/core/state-machine/src/changes_trie/input.rs +++ b/core/state-machine/src/changes_trie/input.rs @@ -21,10 +21,10 @@ use parity_codec::{Decode, Encode, Input, Output}; /// Key of { changed key => set of extrinsic indices } mapping. #[derive(Clone, Debug, PartialEq, Eq)] pub struct ExtrinsicIndex { - /// Block at which this key has been inserted in the trie. - pub block: u64, - /// Storage key this node is responsible for. - pub key: Vec, + /// Block at which this key has been inserted in the trie. + pub block: u64, + /// Storage key this node is responsible for. + pub key: Vec, } /// Value of { changed key => set of extrinsic indices } mapping. @@ -33,10 +33,10 @@ pub type ExtrinsicIndexValue = Vec; /// Key of { changed key => block/digest block numbers } mapping. #[derive(Clone, Debug, PartialEq, Eq)] pub struct DigestIndex { - /// Block at which this key has been inserted in the trie. - pub block: u64, - /// Storage key this node is responsible for. - pub key: Vec, + /// Block at which this key has been inserted in the trie. + pub block: u64, + /// Storage key this node is responsible for. + pub key: Vec, } /// Value of { changed key => block/digest block numbers } mapping. @@ -45,105 +45,110 @@ pub type DigestIndexValue = Vec; /// Single input pair of changes trie. #[derive(Clone, Debug, PartialEq, Eq)] pub enum InputPair { - /// Element of { key => set of extrinsics where key has been changed } element mapping. - ExtrinsicIndex(ExtrinsicIndex, ExtrinsicIndexValue), - /// Element of { key => set of blocks/digest blocks where key has been changed } element mapping. - DigestIndex(DigestIndex, DigestIndexValue), + /// Element of { key => set of extrinsics where key has been changed } element mapping. + ExtrinsicIndex(ExtrinsicIndex, ExtrinsicIndexValue), + /// Element of { key => set of blocks/digest blocks where key has been changed } element mapping. + DigestIndex(DigestIndex, DigestIndexValue), } /// Single input key of changes trie. #[derive(Clone, Debug, PartialEq, Eq)] pub enum InputKey { - /// Key of { key => set of extrinsics where key has been changed } element mapping. - ExtrinsicIndex(ExtrinsicIndex), - /// Key of { key => set of blocks/digest blocks where key has been changed } element mapping. - DigestIndex(DigestIndex), + /// Key of { key => set of extrinsics where key has been changed } element mapping. + ExtrinsicIndex(ExtrinsicIndex), + /// Key of { key => set of blocks/digest blocks where key has been changed } element mapping. + DigestIndex(DigestIndex), } impl Into<(Vec, Vec)> for InputPair { - fn into(self) -> (Vec, Vec) { - match self { - InputPair::ExtrinsicIndex(key, value) => (key.encode(), value.encode()), - InputPair::DigestIndex(key, value) => (key.encode(), value.encode()), - } - } + fn into(self) -> (Vec, Vec) { + match self { + InputPair::ExtrinsicIndex(key, value) => (key.encode(), value.encode()), + InputPair::DigestIndex(key, value) => (key.encode(), value.encode()), + } + } } impl Into for InputPair { - fn into(self) -> InputKey { - match self { - InputPair::ExtrinsicIndex(key, _) => InputKey::ExtrinsicIndex(key), - InputPair::DigestIndex(key, _) => InputKey::DigestIndex(key), - } - } + fn into(self) -> InputKey { + match self { + InputPair::ExtrinsicIndex(key, _) => InputKey::ExtrinsicIndex(key), + InputPair::DigestIndex(key, _) => InputKey::DigestIndex(key), + } + } } impl ExtrinsicIndex { - pub fn key_neutral_prefix(block: u64) -> Vec { - let mut prefix = vec![1]; - prefix.extend(block.encode()); - prefix - } + pub fn key_neutral_prefix(block: u64) -> Vec { + let mut prefix = vec![1]; + prefix.extend(block.encode()); + prefix + } } impl Encode for ExtrinsicIndex { - fn encode_to(&self, dest: &mut W) { - dest.push_byte(1); - self.block.encode_to(dest); - self.key.encode_to(dest); - } + fn encode_to(&self, dest: &mut W) { + dest.push_byte(1); + self.block.encode_to(dest); + self.key.encode_to(dest); + } } impl DigestIndex { - pub fn key_neutral_prefix(block: u64) -> Vec { - let mut prefix = vec![2]; - prefix.extend(block.encode()); - prefix - } + pub fn key_neutral_prefix(block: u64) -> Vec { + let mut prefix = vec![2]; + prefix.extend(block.encode()); + prefix + } } - impl Encode for DigestIndex { - fn encode_to(&self, dest: &mut W) { - dest.push_byte(2); - self.block.encode_to(dest); - self.key.encode_to(dest); - } + fn encode_to(&self, dest: &mut W) { + dest.push_byte(2); + self.block.encode_to(dest); + self.key.encode_to(dest); + } } impl Decode for InputKey { - fn decode(input: &mut I) -> Option { - match input.read_byte()? { - 1 => Some(InputKey::ExtrinsicIndex(ExtrinsicIndex { - block: Decode::decode(input)?, - key: Decode::decode(input)?, - })), - 2 => Some(InputKey::DigestIndex(DigestIndex { - block: Decode::decode(input)?, - key: Decode::decode(input)?, - })), - _ => None, - } - } + fn decode(input: &mut I) -> Option { + match input.read_byte()? { + 1 => Some(InputKey::ExtrinsicIndex(ExtrinsicIndex { + block: Decode::decode(input)?, + key: Decode::decode(input)?, + })), + 2 => Some(InputKey::DigestIndex(DigestIndex { + block: Decode::decode(input)?, + key: Decode::decode(input)?, + })), + _ => None, + } + } } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn extrinsic_index_serialized_and_deserialized() { - let original = ExtrinsicIndex { block: 777, key: vec![42] }; - let serialized = original.encode(); - let deserialized: InputKey = Decode::decode(&mut &serialized[..]).unwrap(); - assert_eq!(InputKey::ExtrinsicIndex(original), deserialized); - } - - #[test] - fn digest_index_serialized_and_deserialized() { - let original = DigestIndex { block: 777, key: vec![42] }; - let serialized = original.encode(); - let deserialized: InputKey = Decode::decode(&mut &serialized[..]).unwrap(); - assert_eq!(InputKey::DigestIndex(original), deserialized); - } + use super::*; + + #[test] + fn extrinsic_index_serialized_and_deserialized() { + let original = ExtrinsicIndex { + block: 777, + key: vec![42], + }; + let serialized = original.encode(); + let deserialized: InputKey = Decode::decode(&mut &serialized[..]).unwrap(); + assert_eq!(InputKey::ExtrinsicIndex(original), deserialized); + } + + #[test] + fn digest_index_serialized_and_deserialized() { + let original = DigestIndex { + block: 777, + key: vec![42], + }; + let serialized = original.encode(); + let deserialized: InputKey = Decode::decode(&mut &serialized[..]).unwrap(); + assert_eq!(InputKey::DigestIndex(original), deserialized); + } } diff --git a/core/state-machine/src/changes_trie/mod.rs b/core/state-machine/src/changes_trie/mod.rs index c29131cc0c..4300f18489 100644 --- a/core/state-machine/src/changes_trie/mod.rs +++ b/core/state-machine/src/changes_trie/mod.rs @@ -42,18 +42,18 @@ mod input; mod prune; mod storage; -pub use self::storage::InMemoryStorage; pub use self::changes_iterator::{key_changes, key_changes_proof, key_changes_proof_check}; -pub use self::prune::{prune, oldest_non_pruned_trie}; +pub use self::prune::{oldest_non_pruned_trie, prune}; +pub use self::storage::InMemoryStorage; -use hash_db::Hasher; -use heapsize::HeapSizeOf; use crate::backend::Backend; -use primitives; use crate::changes_trie::build::prepare_input; use crate::overlayed_changes::OverlayedChanges; use crate::trie_backend_essence::TrieBackendStorage; -use trie::{DBValue, trie_root}; +use hash_db::Hasher; +use heapsize::HeapSizeOf; +use primitives; +use trie::{trie_root, DBValue}; /// Changes that are made outside of extrinsics are marked with this index; pub const NO_EXTRINSIC_INDEX: u32 = 0xffffffff; @@ -61,23 +61,23 @@ pub const NO_EXTRINSIC_INDEX: u32 = 0xffffffff; /// Block identifier that could be used to determine fork of this block. #[derive(Debug)] pub struct AnchorBlockId { - /// Hash of this block. - pub hash: Hash, - /// Number of this block. - pub number: u64, + /// Hash of this block. + pub hash: Hash, + /// Number of this block. + pub number: u64, } /// Changes trie storage. Provides access to trie roots and trie nodes. pub trait RootsStorage: Send + Sync { - /// Get changes trie root for the block with given number which is an ancestor (or the block - /// itself) of the anchor_block (i.e. anchor_block.number >= block). - fn root(&self, anchor: &AnchorBlockId, block: u64) -> Result, String>; + /// Get changes trie root for the block with given number which is an ancestor (or the block + /// itself) of the anchor_block (i.e. anchor_block.number >= block). + fn root(&self, anchor: &AnchorBlockId, block: u64) -> Result, String>; } /// Changes trie storage. Provides access to trie roots and trie nodes. pub trait Storage: RootsStorage { - /// Get a trie node. - fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String>; + /// Get a trie node. + fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String>; } /// Changes trie configuration. @@ -86,21 +86,19 @@ pub type Configuration = primitives::ChangesTrieConfiguration; /// Compute the changes trie root and transaction for given block. /// Returns None if there's no data to perform computation. pub fn compute_changes_trie_root<'a, B: Backend, S: Storage, H: Hasher>( - backend: &B, - storage: Option<&'a S>, - changes: &OverlayedChanges, - parent: &'a AnchorBlockId, + backend: &B, + storage: Option<&'a S>, + changes: &OverlayedChanges, + parent: &'a AnchorBlockId, ) -> Option<(H::Out, Vec<(Vec, Vec)>)> - where - &'a S: TrieBackendStorage, - H::Out: Ord + HeapSizeOf, +where + &'a S: TrieBackendStorage, + H::Out: Ord + HeapSizeOf, { - let input_pairs = prepare_input::(backend, storage, changes, parent) - .expect("storage is not allowed to fail within runtime")?; - let transaction = input_pairs.into_iter() - .map(Into::into) - .collect::>(); - let root = trie_root::(transaction.iter().map(|(k, v)| (&*k, &*v))); + let input_pairs = prepare_input::(backend, storage, changes, parent) + .expect("storage is not allowed to fail within runtime")?; + let transaction = input_pairs.into_iter().map(Into::into).collect::>(); + let root = trie_root::(transaction.iter().map(|(k, v)| (&*k, &*v))); - Some((root, transaction)) + Some((root, transaction)) } diff --git a/core/state-machine/src/changes_trie/prune.rs b/core/state-machine/src/changes_trie/prune.rs index de872a3255..0ee77f54cb 100644 --- a/core/state-machine/src/changes_trie/prune.rs +++ b/core/state-machine/src/changes_trie/prune.rs @@ -16,29 +16,29 @@ //! Changes trie pruning-related functions. +use crate::changes_trie::storage::TrieBackendAdapter; +use crate::changes_trie::{AnchorBlockId, Configuration, Storage}; +use crate::proving_backend::ProvingBackendEssence; +use crate::trie_backend_essence::TrieBackendEssence; use hash_db::Hasher; use heapsize::HeapSizeOf; -use trie::Recorder; use log::warn; -use crate::proving_backend::ProvingBackendEssence; -use crate::trie_backend_essence::TrieBackendEssence; -use crate::changes_trie::{AnchorBlockId, Configuration, Storage}; -use crate::changes_trie::storage::TrieBackendAdapter; +use trie::Recorder; /// Get number of oldest block for which changes trie is not pruned /// given changes trie configuration, pruning parameter and number of /// best finalized block. pub fn oldest_non_pruned_trie( - config: &Configuration, - min_blocks_to_keep: u64, - best_finalized_block: u64, + config: &Configuration, + min_blocks_to_keep: u64, + best_finalized_block: u64, ) -> u64 { - let max_digest_interval = config.max_digest_interval(); - let max_digest_block = best_finalized_block - best_finalized_block % max_digest_interval; - match pruning_range(config, min_blocks_to_keep, max_digest_block) { - Some((_, last_pruned_block)) => last_pruned_block + 1, - None => 1, - } + let max_digest_interval = config.max_digest_interval(); + let max_digest_block = best_finalized_block - best_finalized_block % max_digest_interval; + match pruning_range(config, min_blocks_to_keep, max_digest_block) { + Some((_, last_pruned_block)) => last_pruned_block + 1, + None => 1, + } } /// Prune obslete changes tries. Puning happens at the same block, where highest @@ -47,85 +47,88 @@ pub fn oldest_non_pruned_trie( /// ranges. /// Returns MemoryDB that contains all deleted changes tries nodes. pub fn prune, H: Hasher, F: FnMut(H::Out)>( - config: &Configuration, - storage: &S, - min_blocks_to_keep: u64, - current_block: &AnchorBlockId, - mut remove_trie_node: F, -) - where - H::Out: HeapSizeOf, + config: &Configuration, + storage: &S, + min_blocks_to_keep: u64, + current_block: &AnchorBlockId, + mut remove_trie_node: F, +) where + H::Out: HeapSizeOf, { - // select range for pruning - let (first, last) = match pruning_range(config, min_blocks_to_keep, current_block.number) { - Some((first, last)) => (first, last), - None => return, - }; - - // delete changes trie for every block in range - // FIXME: limit `max_digest_interval` so that this cycle won't involve huge ranges - for block in first..last+1 { - let root = match storage.root(current_block, block) { - Ok(Some(root)) => root, - Ok(None) => continue, - Err(error) => { - // try to delete other tries - warn!(target: "trie", "Failed to read changes trie root from DB: {}", error); - continue; - }, - }; - - // enumerate all changes trie' keys, recording all nodes that have been 'touched' - // (effectively - all changes trie nodes) - let mut proof_recorder: Recorder = Default::default(); - { - let mut trie = ProvingBackendEssence::<_, H> { - backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), - proof_recorder: &mut proof_recorder, - }; - trie.record_all_keys(); - } - - // all nodes of this changes trie should be pruned - remove_trie_node(root); - for node in proof_recorder.drain().into_iter().map(|n| n.hash) { - remove_trie_node(node); - } - } + // select range for pruning + let (first, last) = match pruning_range(config, min_blocks_to_keep, current_block.number) { + Some((first, last)) => (first, last), + None => return, + }; + + // delete changes trie for every block in range + // FIXME: limit `max_digest_interval` so that this cycle won't involve huge ranges + for block in first..last + 1 { + let root = match storage.root(current_block, block) { + Ok(Some(root)) => root, + Ok(None) => continue, + Err(error) => { + // try to delete other tries + warn!(target: "trie", "Failed to read changes trie root from DB: {}", error); + continue; + } + }; + + // enumerate all changes trie' keys, recording all nodes that have been 'touched' + // (effectively - all changes trie nodes) + let mut proof_recorder: Recorder = Default::default(); + { + let mut trie = ProvingBackendEssence::<_, H> { + backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), + proof_recorder: &mut proof_recorder, + }; + trie.record_all_keys(); + } + + // all nodes of this changes trie should be pruned + remove_trie_node(root); + for node in proof_recorder.drain().into_iter().map(|n| n.hash) { + remove_trie_node(node); + } + } } /// Select blocks range (inclusive from both ends) for pruning changes tries in. -fn pruning_range(config: &Configuration, min_blocks_to_keep: u64, block: u64) -> Option<(u64, u64)> { - // compute number of changes tries we actually want to keep - let (prune_interval, blocks_to_keep) = if config.is_digest_build_enabled() { - // we only CAN prune at block where max-level-digest is created - let max_digest_interval = match config.digest_level_at_block(block) { - Some((digest_level, digest_interval, _)) if digest_level == config.digest_levels => - digest_interval, - _ => return None, - }; - - // compute maximal number of high-level digests to keep - let max_digest_intervals_to_keep = max_digest_intervals_to_keep(min_blocks_to_keep, max_digest_interval); - - // number of blocks BEFORE current block where changes tries are not pruned - ( - max_digest_interval, - max_digest_intervals_to_keep.checked_mul(max_digest_interval) - ) - } else { - ( - 1, - Some(min_blocks_to_keep) - ) - }; - - // last block for which changes trie is pruned - let last_block_to_prune = blocks_to_keep.and_then(|b| block.checked_sub(b)); - let first_block_to_prune = last_block_to_prune.clone().and_then(|b| b.checked_sub(prune_interval)); - - last_block_to_prune - .and_then(|last| first_block_to_prune.map(|first| (first + 1, last))) +fn pruning_range( + config: &Configuration, + min_blocks_to_keep: u64, + block: u64, +) -> Option<(u64, u64)> { + // compute number of changes tries we actually want to keep + let (prune_interval, blocks_to_keep) = if config.is_digest_build_enabled() { + // we only CAN prune at block where max-level-digest is created + let max_digest_interval = match config.digest_level_at_block(block) { + Some((digest_level, digest_interval, _)) if digest_level == config.digest_levels => { + digest_interval + } + _ => return None, + }; + + // compute maximal number of high-level digests to keep + let max_digest_intervals_to_keep = + max_digest_intervals_to_keep(min_blocks_to_keep, max_digest_interval); + + // number of blocks BEFORE current block where changes tries are not pruned + ( + max_digest_interval, + max_digest_intervals_to_keep.checked_mul(max_digest_interval), + ) + } else { + (1, Some(min_blocks_to_keep)) + }; + + // last block for which changes trie is pruned + let last_block_to_prune = blocks_to_keep.and_then(|b| block.checked_sub(b)); + let first_block_to_prune = last_block_to_prune + .clone() + .and_then(|b| b.checked_sub(prune_interval)); + + last_block_to_prune.and_then(|last| first_block_to_prune.map(|first| (first + 1, last))) } /// Select pruning delay for the changes tries. To make sure we could build a changes @@ -137,171 +140,208 @@ fn pruning_range(config: &Configuration, min_blocks_to_keep: u64, block: u64) -> /// 2: the last chnages trie + previous changes trie /// ... fn max_digest_intervals_to_keep(min_blocks_to_keep: u64, max_digest_interval: u64) -> u64 { - // config.digest_level_at_block ensures that it is not zero - debug_assert!(max_digest_interval != 0); - - let max_digest_intervals_to_keep = min_blocks_to_keep / max_digest_interval; - if max_digest_intervals_to_keep == 0 { - 1 - } else { - max_digest_intervals_to_keep - } + // config.digest_level_at_block ensures that it is not zero + debug_assert!(max_digest_interval != 0); + + let max_digest_intervals_to_keep = min_blocks_to_keep / max_digest_interval; + if max_digest_intervals_to_keep == 0 { + 1 + } else { + max_digest_intervals_to_keep + } } #[cfg(test)] mod tests { - use std::collections::HashSet; - use trie::MemoryDB; - use primitives::Blake2Hasher; - use crate::backend::insert_into_memory_db; - use crate::changes_trie::storage::InMemoryStorage; - use super::*; - - fn config(interval: u64, levels: u32) -> Configuration { - Configuration { - digest_interval: interval, - digest_levels: levels, - } - } - - fn prune_by_collect, H: Hasher>( - config: &Configuration, - storage: &S, - min_blocks_to_keep: u64, - current_block: u64, - ) -> HashSet - where - H::Out: HeapSizeOf, - { - let mut pruned_trie_nodes = HashSet::new(); - prune(config, storage, min_blocks_to_keep, &AnchorBlockId { hash: Default::default(), number: current_block }, - |node| { pruned_trie_nodes.insert(node); }); - pruned_trie_nodes - } - - #[test] - fn prune_works() { - fn prepare_storage() -> InMemoryStorage { - let mut mdb1 = MemoryDB::::default(); - let root1 = insert_into_memory_db::(&mut mdb1, vec![(vec![10], vec![20])]).unwrap(); - let mut mdb2 = MemoryDB::::default(); - let root2 = insert_into_memory_db::(&mut mdb2, vec![(vec![11], vec![21]), (vec![12], vec![22])]).unwrap(); - let mut mdb3 = MemoryDB::::default(); - let root3 = insert_into_memory_db::(&mut mdb3, vec![(vec![13], vec![23]), (vec![14], vec![24])]).unwrap(); - let mut mdb4 = MemoryDB::::default(); - let root4 = insert_into_memory_db::(&mut mdb4, vec![(vec![15], vec![25])]).unwrap(); - let storage = InMemoryStorage::new(); - storage.insert(65, root1, mdb1); - storage.insert(66, root2, mdb2); - storage.insert(67, root3, mdb3); - storage.insert(68, root4, mdb4); - - storage - } - - // l1-digest is created every 2 blocks - // l2-digest is created every 4 blocks - // we do not want to keep any additional changes tries - // => only one l2-digest is saved AND it is pruned once next is created - let config = Configuration { digest_interval: 2, digest_levels: 2 }; - let storage = prepare_storage(); - assert!(prune_by_collect(&config, &storage, 0, 69).is_empty()); - assert!(prune_by_collect(&config, &storage, 0, 70).is_empty()); - assert!(prune_by_collect(&config, &storage, 0, 71).is_empty()); - let non_empty = prune_by_collect(&config, &storage, 0, 72); - assert!(!non_empty.is_empty()); - storage.remove_from_storage(&non_empty); - assert!(storage.into_mdb().drain().is_empty()); - - // l1-digest is created every 2 blocks - // l2-digest is created every 4 blocks - // we want keep 1 additional changes tries - let config = Configuration { digest_interval: 2, digest_levels: 2 }; - let storage = prepare_storage(); - assert!(prune_by_collect(&config, &storage, 8, 69).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, 70).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, 71).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, 72).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, 73).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, 74).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, 75).is_empty()); - let non_empty = prune_by_collect(&config, &storage, 8, 76); - assert!(!non_empty.is_empty()); - storage.remove_from_storage(&non_empty); - assert!(storage.into_mdb().drain().is_empty()); - - // l1-digest is created every 2 blocks - // we want keep 2 additional changes tries - let config = Configuration { digest_interval: 2, digest_levels: 1 }; - let storage = prepare_storage(); - assert!(prune_by_collect(&config, &storage, 4, 69).is_empty()); - let non_empty = prune_by_collect(&config, &storage, 4, 70); - assert!(!non_empty.is_empty()); - storage.remove_from_storage(&non_empty); - assert!(prune_by_collect(&config, &storage, 4, 71).is_empty()); - let non_empty = prune_by_collect(&config, &storage, 4, 72); - assert!(!non_empty.is_empty()); - storage.remove_from_storage(&non_empty); - assert!(storage.into_mdb().drain().is_empty()); - } - - #[test] - fn pruning_range_works() { - // DIGESTS ARE NOT CREATED + NO TRIES ARE PRUNED - assert_eq!(pruning_range(&config(10, 0), 2, 2), None); - - // DIGESTS ARE NOT CREATED + SOME TRIES ARE PRUNED - assert_eq!(pruning_range(&config(10, 0), 100, 110), Some((10, 10))); - assert_eq!(pruning_range(&config(10, 0), 100, 210), Some((110, 110))); - - // DIGESTS ARE CREATED + NO TRIES ARE PRUNED - - assert_eq!(pruning_range(&config(10, 2), 2, 0), None); - assert_eq!(pruning_range(&config(10, 2), 30, 100), None); - assert_eq!(pruning_range(&config(::std::u64::MAX, 2), 1, 1024), None); - assert_eq!(pruning_range(&config(::std::u64::MAX, 2), ::std::u64::MAX, 1024), None); - assert_eq!(pruning_range(&config(32, 2), 2048, 512), None); - assert_eq!(pruning_range(&config(32, 2), 2048, 1024), None); - - // DIGESTS ARE CREATED + SOME TRIES ARE PRUNED - - // when we do not want to keep any highest-level-digests - // (system forces to keep at least one) - assert_eq!(pruning_range(&config(4, 2), 0, 32), Some((1, 16))); - assert_eq!(pruning_range(&config(4, 2), 0, 64), Some((33, 48))); - // when we want to keep 1 (last) highest-level-digest - assert_eq!(pruning_range(&config(4, 2), 16, 32), Some((1, 16))); - assert_eq!(pruning_range(&config(4, 2), 16, 64), Some((33, 48))); - // when we want to keep 1 (last) + 1 additional level digests - assert_eq!(pruning_range(&config(32, 2), 4096, 5120), Some((1, 1024))); - assert_eq!(pruning_range(&config(32, 2), 4096, 6144), Some((1025, 2048))); - } - - #[test] - fn max_digest_intervals_to_keep_works() { - assert_eq!(max_digest_intervals_to_keep(1024, 1025), 1); - assert_eq!(max_digest_intervals_to_keep(1024, 1023), 1); - assert_eq!(max_digest_intervals_to_keep(1024, 512), 2); - assert_eq!(max_digest_intervals_to_keep(1024, 511), 2); - assert_eq!(max_digest_intervals_to_keep(1024, 100), 10); - } - - #[test] - fn oldest_non_pruned_trie_works() { - // when digests are not created at all - assert_eq!(oldest_non_pruned_trie(&config(0, 0), 100, 10), 1); - assert_eq!(oldest_non_pruned_trie(&config(0, 0), 100, 110), 11); - - // when only l1 digests are created - assert_eq!(oldest_non_pruned_trie(&config(100, 1), 100, 50), 1); - assert_eq!(oldest_non_pruned_trie(&config(100, 1), 100, 110), 1); - assert_eq!(oldest_non_pruned_trie(&config(100, 1), 100, 210), 101); - - // when l2 digests are created - assert_eq!(oldest_non_pruned_trie(&config(100, 2), 100, 50), 1); - assert_eq!(oldest_non_pruned_trie(&config(100, 2), 100, 110), 1); - assert_eq!(oldest_non_pruned_trie(&config(100, 2), 100, 210), 1); - assert_eq!(oldest_non_pruned_trie(&config(100, 2), 100, 10110), 1); - assert_eq!(oldest_non_pruned_trie(&config(100, 2), 100, 20110), 10001); - } + use super::*; + use crate::backend::insert_into_memory_db; + use crate::changes_trie::storage::InMemoryStorage; + use primitives::Blake2Hasher; + use std::collections::HashSet; + use trie::MemoryDB; + + fn config(interval: u64, levels: u32) -> Configuration { + Configuration { + digest_interval: interval, + digest_levels: levels, + } + } + + fn prune_by_collect, H: Hasher>( + config: &Configuration, + storage: &S, + min_blocks_to_keep: u64, + current_block: u64, + ) -> HashSet + where + H::Out: HeapSizeOf, + { + let mut pruned_trie_nodes = HashSet::new(); + prune( + config, + storage, + min_blocks_to_keep, + &AnchorBlockId { + hash: Default::default(), + number: current_block, + }, + |node| { + pruned_trie_nodes.insert(node); + }, + ); + pruned_trie_nodes + } + + #[test] + fn prune_works() { + fn prepare_storage() -> InMemoryStorage { + let mut mdb1 = MemoryDB::::default(); + let root1 = + insert_into_memory_db::(&mut mdb1, vec![(vec![10], vec![20])]) + .unwrap(); + let mut mdb2 = MemoryDB::::default(); + let root2 = insert_into_memory_db::( + &mut mdb2, + vec![(vec![11], vec![21]), (vec![12], vec![22])], + ) + .unwrap(); + let mut mdb3 = MemoryDB::::default(); + let root3 = insert_into_memory_db::( + &mut mdb3, + vec![(vec![13], vec![23]), (vec![14], vec![24])], + ) + .unwrap(); + let mut mdb4 = MemoryDB::::default(); + let root4 = + insert_into_memory_db::(&mut mdb4, vec![(vec![15], vec![25])]) + .unwrap(); + let storage = InMemoryStorage::new(); + storage.insert(65, root1, mdb1); + storage.insert(66, root2, mdb2); + storage.insert(67, root3, mdb3); + storage.insert(68, root4, mdb4); + + storage + } + + // l1-digest is created every 2 blocks + // l2-digest is created every 4 blocks + // we do not want to keep any additional changes tries + // => only one l2-digest is saved AND it is pruned once next is created + let config = Configuration { + digest_interval: 2, + digest_levels: 2, + }; + let storage = prepare_storage(); + assert!(prune_by_collect(&config, &storage, 0, 69).is_empty()); + assert!(prune_by_collect(&config, &storage, 0, 70).is_empty()); + assert!(prune_by_collect(&config, &storage, 0, 71).is_empty()); + let non_empty = prune_by_collect(&config, &storage, 0, 72); + assert!(!non_empty.is_empty()); + storage.remove_from_storage(&non_empty); + assert!(storage.into_mdb().drain().is_empty()); + + // l1-digest is created every 2 blocks + // l2-digest is created every 4 blocks + // we want keep 1 additional changes tries + let config = Configuration { + digest_interval: 2, + digest_levels: 2, + }; + let storage = prepare_storage(); + assert!(prune_by_collect(&config, &storage, 8, 69).is_empty()); + assert!(prune_by_collect(&config, &storage, 8, 70).is_empty()); + assert!(prune_by_collect(&config, &storage, 8, 71).is_empty()); + assert!(prune_by_collect(&config, &storage, 8, 72).is_empty()); + assert!(prune_by_collect(&config, &storage, 8, 73).is_empty()); + assert!(prune_by_collect(&config, &storage, 8, 74).is_empty()); + assert!(prune_by_collect(&config, &storage, 8, 75).is_empty()); + let non_empty = prune_by_collect(&config, &storage, 8, 76); + assert!(!non_empty.is_empty()); + storage.remove_from_storage(&non_empty); + assert!(storage.into_mdb().drain().is_empty()); + + // l1-digest is created every 2 blocks + // we want keep 2 additional changes tries + let config = Configuration { + digest_interval: 2, + digest_levels: 1, + }; + let storage = prepare_storage(); + assert!(prune_by_collect(&config, &storage, 4, 69).is_empty()); + let non_empty = prune_by_collect(&config, &storage, 4, 70); + assert!(!non_empty.is_empty()); + storage.remove_from_storage(&non_empty); + assert!(prune_by_collect(&config, &storage, 4, 71).is_empty()); + let non_empty = prune_by_collect(&config, &storage, 4, 72); + assert!(!non_empty.is_empty()); + storage.remove_from_storage(&non_empty); + assert!(storage.into_mdb().drain().is_empty()); + } + + #[test] + fn pruning_range_works() { + // DIGESTS ARE NOT CREATED + NO TRIES ARE PRUNED + assert_eq!(pruning_range(&config(10, 0), 2, 2), None); + + // DIGESTS ARE NOT CREATED + SOME TRIES ARE PRUNED + assert_eq!(pruning_range(&config(10, 0), 100, 110), Some((10, 10))); + assert_eq!(pruning_range(&config(10, 0), 100, 210), Some((110, 110))); + + // DIGESTS ARE CREATED + NO TRIES ARE PRUNED + + assert_eq!(pruning_range(&config(10, 2), 2, 0), None); + assert_eq!(pruning_range(&config(10, 2), 30, 100), None); + assert_eq!(pruning_range(&config(::std::u64::MAX, 2), 1, 1024), None); + assert_eq!( + pruning_range(&config(::std::u64::MAX, 2), ::std::u64::MAX, 1024), + None + ); + assert_eq!(pruning_range(&config(32, 2), 2048, 512), None); + assert_eq!(pruning_range(&config(32, 2), 2048, 1024), None); + + // DIGESTS ARE CREATED + SOME TRIES ARE PRUNED + + // when we do not want to keep any highest-level-digests + // (system forces to keep at least one) + assert_eq!(pruning_range(&config(4, 2), 0, 32), Some((1, 16))); + assert_eq!(pruning_range(&config(4, 2), 0, 64), Some((33, 48))); + // when we want to keep 1 (last) highest-level-digest + assert_eq!(pruning_range(&config(4, 2), 16, 32), Some((1, 16))); + assert_eq!(pruning_range(&config(4, 2), 16, 64), Some((33, 48))); + // when we want to keep 1 (last) + 1 additional level digests + assert_eq!(pruning_range(&config(32, 2), 4096, 5120), Some((1, 1024))); + assert_eq!( + pruning_range(&config(32, 2), 4096, 6144), + Some((1025, 2048)) + ); + } + + #[test] + fn max_digest_intervals_to_keep_works() { + assert_eq!(max_digest_intervals_to_keep(1024, 1025), 1); + assert_eq!(max_digest_intervals_to_keep(1024, 1023), 1); + assert_eq!(max_digest_intervals_to_keep(1024, 512), 2); + assert_eq!(max_digest_intervals_to_keep(1024, 511), 2); + assert_eq!(max_digest_intervals_to_keep(1024, 100), 10); + } + + #[test] + fn oldest_non_pruned_trie_works() { + // when digests are not created at all + assert_eq!(oldest_non_pruned_trie(&config(0, 0), 100, 10), 1); + assert_eq!(oldest_non_pruned_trie(&config(0, 0), 100, 110), 11); + + // when only l1 digests are created + assert_eq!(oldest_non_pruned_trie(&config(100, 1), 100, 50), 1); + assert_eq!(oldest_non_pruned_trie(&config(100, 1), 100, 110), 1); + assert_eq!(oldest_non_pruned_trie(&config(100, 1), 100, 210), 101); + + // when l2 digests are created + assert_eq!(oldest_non_pruned_trie(&config(100, 2), 100, 50), 1); + assert_eq!(oldest_non_pruned_trie(&config(100, 2), 100, 110), 1); + assert_eq!(oldest_non_pruned_trie(&config(100, 2), 100, 210), 1); + assert_eq!(oldest_non_pruned_trie(&config(100, 2), 100, 10110), 1); + assert_eq!(oldest_non_pruned_trie(&config(100, 2), 100, 20110), 10001); + } } diff --git a/core/state-machine/src/changes_trie/storage.rs b/core/state-machine/src/changes_trie/storage.rs index decc332c1a..0a732944f5 100644 --- a/core/state-machine/src/changes_trie/storage.rs +++ b/core/state-machine/src/changes_trie/storage.rs @@ -16,121 +16,140 @@ //! Changes trie storage utilities. -use std::collections::HashMap; +use crate::changes_trie::{AnchorBlockId, RootsStorage, Storage}; +use crate::trie_backend_essence::TrieBackendStorage; use hash_db::Hasher; -use trie::DBValue; use heapsize::HeapSizeOf; -use trie::MemoryDB; use parking_lot::RwLock; -use crate::changes_trie::{AnchorBlockId, RootsStorage, Storage}; -use crate::trie_backend_essence::TrieBackendStorage; +use std::collections::HashMap; +use trie::DBValue; +use trie::MemoryDB; -#[cfg(test)] -use std::collections::HashSet; #[cfg(test)] use crate::backend::insert_into_memory_db; #[cfg(test)] use crate::changes_trie::input::InputPair; +#[cfg(test)] +use std::collections::HashSet; /// In-memory implementation of changes trie storage. -pub struct InMemoryStorage where H::Out: HeapSizeOf { - data: RwLock>, +pub struct InMemoryStorage +where + H::Out: HeapSizeOf, +{ + data: RwLock>, } /// Adapter for using changes trie storage as a TrieBackendEssence' storage. pub struct TrieBackendAdapter<'a, H: Hasher, S: 'a + Storage> { - storage: &'a S, - _hasher: ::std::marker::PhantomData, + storage: &'a S, + _hasher: ::std::marker::PhantomData, } -struct InMemoryStorageData where H::Out: HeapSizeOf { - roots: HashMap, - mdb: MemoryDB, +struct InMemoryStorageData +where + H::Out: HeapSizeOf, +{ + roots: HashMap, + mdb: MemoryDB, } -impl InMemoryStorage where H::Out: HeapSizeOf { - /// Create the storage from given in-memory database. - pub fn with_db(mdb: MemoryDB) -> Self { - Self { - data: RwLock::new(InMemoryStorageData { - roots: HashMap::new(), - mdb, - }), - } - } - - /// Create the storage with empty database. - pub fn new() -> Self { - Self::with_db(Default::default()) - } - - #[cfg(test)] - pub fn with_inputs(inputs: Vec<(u64, Vec)>) -> Self { - let mut mdb = MemoryDB::default(); - let mut roots = HashMap::new(); - for (block, pairs) in inputs { - let root = insert_into_memory_db::(&mut mdb, pairs.into_iter().map(Into::into)); - if let Some(root) = root { - roots.insert(block, root); - } - } - - InMemoryStorage { - data: RwLock::new(InMemoryStorageData { - roots, - mdb, - }), - } - } - - #[cfg(test)] - pub fn clear_storage(&self) { - self.data.write().mdb = MemoryDB::default(); // use new to be more correct - } - - #[cfg(test)] - pub fn remove_from_storage(&self, keys: &HashSet) { - let mut data = self.data.write(); - for key in keys { - data.mdb.remove_and_purge(key, &[]); - } - } - - #[cfg(test)] - pub fn into_mdb(self) -> MemoryDB { - self.data.into_inner().mdb - } - - /// Insert changes trie for given block. - pub fn insert(&self, block: u64, changes_trie_root: H::Out, trie: MemoryDB) { - let mut data = self.data.write(); - data.roots.insert(block, changes_trie_root); - data.mdb.consolidate(trie); - } +impl InMemoryStorage +where + H::Out: HeapSizeOf, +{ + /// Create the storage from given in-memory database. + pub fn with_db(mdb: MemoryDB) -> Self { + Self { + data: RwLock::new(InMemoryStorageData { + roots: HashMap::new(), + mdb, + }), + } + } + + /// Create the storage with empty database. + pub fn new() -> Self { + Self::with_db(Default::default()) + } + + #[cfg(test)] + pub fn with_inputs(inputs: Vec<(u64, Vec)>) -> Self { + let mut mdb = MemoryDB::default(); + let mut roots = HashMap::new(); + for (block, pairs) in inputs { + let root = insert_into_memory_db::(&mut mdb, pairs.into_iter().map(Into::into)); + if let Some(root) = root { + roots.insert(block, root); + } + } + + InMemoryStorage { + data: RwLock::new(InMemoryStorageData { roots, mdb }), + } + } + + #[cfg(test)] + pub fn clear_storage(&self) { + self.data.write().mdb = MemoryDB::default(); // use new to be more correct + } + + #[cfg(test)] + pub fn remove_from_storage(&self, keys: &HashSet) { + let mut data = self.data.write(); + for key in keys { + data.mdb.remove_and_purge(key, &[]); + } + } + + #[cfg(test)] + pub fn into_mdb(self) -> MemoryDB { + self.data.into_inner().mdb + } + + /// Insert changes trie for given block. + pub fn insert(&self, block: u64, changes_trie_root: H::Out, trie: MemoryDB) { + let mut data = self.data.write(); + data.roots.insert(block, changes_trie_root); + data.mdb.consolidate(trie); + } } -impl RootsStorage for InMemoryStorage where H::Out: HeapSizeOf { - fn root(&self, _anchor_block: &AnchorBlockId, block: u64) -> Result, String> { - Ok(self.data.read().roots.get(&block).cloned()) - } +impl RootsStorage for InMemoryStorage +where + H::Out: HeapSizeOf, +{ + fn root( + &self, + _anchor_block: &AnchorBlockId, + block: u64, + ) -> Result, String> { + Ok(self.data.read().roots.get(&block).cloned()) + } } -impl Storage for InMemoryStorage where H::Out: HeapSizeOf { - fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String> { - MemoryDB::::get(&self.data.read().mdb, key, prefix) - } +impl Storage for InMemoryStorage +where + H::Out: HeapSizeOf, +{ + fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String> { + MemoryDB::::get(&self.data.read().mdb, key, prefix) + } } impl<'a, H: Hasher, S: 'a + Storage> TrieBackendAdapter<'a, H, S> { - pub fn new(storage: &'a S) -> Self { - Self { storage, _hasher: Default::default() } - } + pub fn new(storage: &'a S) -> Self { + Self { + storage, + _hasher: Default::default(), + } + } } impl<'a, H: Hasher, S: 'a + Storage> TrieBackendStorage for TrieBackendAdapter<'a, H, S> { - type Overlay = MemoryDB; + type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String> { - self.storage.get(key, prefix) - } + fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String> { + self.storage.get(key, prefix) + } } diff --git a/core/state-machine/src/ext.rs b/core/state-machine/src/ext.rs index 33074c7059..af70fd3def 100644 --- a/core/state-machine/src/ext.rs +++ b/core/state-machine/src/ext.rs @@ -16,416 +16,493 @@ //! Conrete externalities implementation. -use std::{error, fmt, cmp::Ord}; -use log::warn; use crate::backend::{Backend, Consolidate}; -use crate::changes_trie::{AnchorBlockId, Storage as ChangesTrieStorage, compute_changes_trie_root}; -use crate::{Externalities, OverlayedChanges, OffchainExt}; +use crate::changes_trie::{ + compute_changes_trie_root, AnchorBlockId, Storage as ChangesTrieStorage, +}; +use crate::{Externalities, OffchainExt, OverlayedChanges}; use hash_db::Hasher; -use primitives::storage::well_known_keys::is_child_storage_key; -use trie::{MemoryDB, TrieDBMut, TrieMut, default_child_trie_root, is_child_trie_key_valid}; use heapsize::HeapSizeOf; +use log::warn; +use primitives::storage::well_known_keys::is_child_storage_key; +use std::{cmp::Ord, error, fmt}; +use trie::{default_child_trie_root, is_child_trie_key_valid, MemoryDB, TrieDBMut, TrieMut}; const EXT_NOT_ALLOWED_TO_FAIL: &str = "Externalities not allowed to fail within runtime"; /// Errors that can occur when interacting with the externalities. #[derive(Debug, Copy, Clone)] pub enum Error { - /// Failure to load state data from the backend. - #[allow(unused)] - Backend(B), - /// Failure to execute a function. - #[allow(unused)] - Executor(E), + /// Failure to load state data from the backend. + #[allow(unused)] + Backend(B), + /// Failure to execute a function. + #[allow(unused)] + Executor(E), } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Error::Backend(ref e) => write!(f, "Storage backend error: {}", e), - Error::Executor(ref e) => write!(f, "Sub-call execution error: {}", e), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Error::Backend(ref e) => write!(f, "Storage backend error: {}", e), + Error::Executor(ref e) => write!(f, "Sub-call execution error: {}", e), + } + } } impl error::Error for Error { - fn description(&self) -> &str { - match *self { - Error::Backend(..) => "backend error", - Error::Executor(..) => "executor error", - } - } + fn description(&self) -> &str { + match *self { + Error::Backend(..) => "backend error", + Error::Executor(..) => "executor error", + } + } } /// Wraps a read-only backend, call executor, and current overlayed changes. pub struct Ext<'a, H, B, T, O> where - H: Hasher, + H: Hasher, - B: 'a + Backend, + B: 'a + Backend, { - /// The overlayed changes to write to. - overlay: &'a mut OverlayedChanges, - /// The storage backend to read from. - backend: &'a B, - /// The storage transaction necessary to commit to the backend. Is cached when - /// `storage_root` is called and the cache is cleared on every subsequent change. - storage_transaction: Option<(B::Transaction, H::Out)>, - /// Changes trie storage to read from. - changes_trie_storage: Option<&'a T>, - /// The changes trie transaction necessary to commit to the changes trie backend. - /// Set to Some when `storage_changes_root` is called. Could be replaced later - /// by calling `storage_changes_root` again => never used as cache. - /// This differs from `storage_transaction` behavior, because the moment when - /// `storage_changes_root` is called matters + we need to remember additional - /// data at this moment (block number). - changes_trie_transaction: Option<(u64, MemoryDB, H::Out)>, - /// Additional externalities for offchain workers. - /// - /// If None, some methods from the trait might not supported. - offchain_externalities: Option<&'a mut O>, + /// The overlayed changes to write to. + overlay: &'a mut OverlayedChanges, + /// The storage backend to read from. + backend: &'a B, + /// The storage transaction necessary to commit to the backend. Is cached when + /// `storage_root` is called and the cache is cleared on every subsequent change. + storage_transaction: Option<(B::Transaction, H::Out)>, + /// Changes trie storage to read from. + changes_trie_storage: Option<&'a T>, + /// The changes trie transaction necessary to commit to the changes trie backend. + /// Set to Some when `storage_changes_root` is called. Could be replaced later + /// by calling `storage_changes_root` again => never used as cache. + /// This differs from `storage_transaction` behavior, because the moment when + /// `storage_changes_root` is called matters + we need to remember additional + /// data at this moment (block number). + changes_trie_transaction: Option<(u64, MemoryDB, H::Out)>, + /// Additional externalities for offchain workers. + /// + /// If None, some methods from the trait might not supported. + offchain_externalities: Option<&'a mut O>, } impl<'a, H, B, T, O> Ext<'a, H, B, T, O> where - H: Hasher, - B: 'a + Backend, - T: 'a + ChangesTrieStorage, - O: 'a + OffchainExt, - H::Out: Ord + HeapSizeOf, + H: Hasher, + B: 'a + Backend, + T: 'a + ChangesTrieStorage, + O: 'a + OffchainExt, + H::Out: Ord + HeapSizeOf, { - /// Create a new `Ext` from overlayed changes and read-only backend - pub fn new( - overlay: &'a mut OverlayedChanges, - backend: &'a B, - changes_trie_storage: Option<&'a T>, - offchain_externalities: Option<&'a mut O>, - ) -> Self { - Ext { - overlay, - backend, - storage_transaction: None, - changes_trie_storage, - changes_trie_transaction: None, - offchain_externalities, - } - } - - /// Get the transaction necessary to update the backend. - pub fn transaction(mut self) -> (B::Transaction, Option>) { - let _ = self.storage_root(); - - let (storage_transaction, changes_trie_transaction) = ( - self.storage_transaction - .expect("storage_transaction always set after calling storage root; qed"), - self.changes_trie_transaction - .map(|(_, tx, _)| tx), - ); - - ( - storage_transaction.0, - changes_trie_transaction, - ) - } - - /// Invalidates the currently cached storage root and the db transaction. - /// - /// Called when there are changes that likely will invalidate the storage root. - fn mark_dirty(&mut self) { - self.storage_transaction = None; - } - - /// Fetch child storage root together with its transaction. - fn child_storage_root_transaction(&mut self, storage_key: &[u8]) -> (Vec, B::Transaction) { - self.mark_dirty(); - - let (root, is_default, transaction) = { - let delta = self.overlay.committed.children.get(storage_key) - .into_iter() - .flat_map(|map| map.1.iter().map(|(k, v)| (k.clone(), v.clone()))) - .chain(self.overlay.prospective.children.get(storage_key) - .into_iter() - .flat_map(|map| map.1.iter().map(|(k, v)| (k.clone(), v.clone())))); - - self.backend.child_storage_root(storage_key, delta) - }; - - let root_val = if is_default { - None - } else { - Some(root.clone()) - }; - self.overlay.sync_child_storage_root(storage_key, root_val); - - (root, transaction) - } + /// Create a new `Ext` from overlayed changes and read-only backend + pub fn new( + overlay: &'a mut OverlayedChanges, + backend: &'a B, + changes_trie_storage: Option<&'a T>, + offchain_externalities: Option<&'a mut O>, + ) -> Self { + Ext { + overlay, + backend, + storage_transaction: None, + changes_trie_storage, + changes_trie_transaction: None, + offchain_externalities, + } + } + + /// Get the transaction necessary to update the backend. + pub fn transaction(mut self) -> (B::Transaction, Option>) { + let _ = self.storage_root(); + + let (storage_transaction, changes_trie_transaction) = ( + self.storage_transaction + .expect("storage_transaction always set after calling storage root; qed"), + self.changes_trie_transaction.map(|(_, tx, _)| tx), + ); + + (storage_transaction.0, changes_trie_transaction) + } + + /// Invalidates the currently cached storage root and the db transaction. + /// + /// Called when there are changes that likely will invalidate the storage root. + fn mark_dirty(&mut self) { + self.storage_transaction = None; + } + + /// Fetch child storage root together with its transaction. + fn child_storage_root_transaction(&mut self, storage_key: &[u8]) -> (Vec, B::Transaction) { + self.mark_dirty(); + + let (root, is_default, transaction) = { + let delta = self + .overlay + .committed + .children + .get(storage_key) + .into_iter() + .flat_map(|map| map.1.iter().map(|(k, v)| (k.clone(), v.clone()))) + .chain( + self.overlay + .prospective + .children + .get(storage_key) + .into_iter() + .flat_map(|map| map.1.iter().map(|(k, v)| (k.clone(), v.clone()))), + ); + + self.backend.child_storage_root(storage_key, delta) + }; + + let root_val = if is_default { None } else { Some(root.clone()) }; + self.overlay.sync_child_storage_root(storage_key, root_val); + + (root, transaction) + } } #[cfg(test)] impl<'a, H, B, T, O> Ext<'a, H, B, T, O> where - H: Hasher, + H: Hasher, - B: 'a + Backend, - T: 'a + ChangesTrieStorage, - O: 'a + OffchainExt, + B: 'a + Backend, + T: 'a + ChangesTrieStorage, + O: 'a + OffchainExt, { - pub fn storage_pairs(&self) -> Vec<(Vec, Vec)> { - use std::collections::HashMap; - - self.backend.pairs().iter() - .map(|&(ref k, ref v)| (k.to_vec(), Some(v.to_vec()))) - .chain(self.overlay.committed.top.clone().into_iter().map(|(k, v)| (k, v.value))) - .chain(self.overlay.prospective.top.clone().into_iter().map(|(k, v)| (k, v.value))) - .collect::>() - .into_iter() - .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) - .collect() - } + pub fn storage_pairs(&self) -> Vec<(Vec, Vec)> { + use std::collections::HashMap; + + self.backend + .pairs() + .iter() + .map(|&(ref k, ref v)| (k.to_vec(), Some(v.to_vec()))) + .chain( + self.overlay + .committed + .top + .clone() + .into_iter() + .map(|(k, v)| (k, v.value)), + ) + .chain( + self.overlay + .prospective + .top + .clone() + .into_iter() + .map(|(k, v)| (k, v.value)), + ) + .collect::>() + .into_iter() + .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) + .collect() + } } impl<'a, B, T, H, O> Externalities for Ext<'a, H, B, T, O> where - H: Hasher, - B: 'a + Backend, - T: 'a + ChangesTrieStorage, - O: 'a + OffchainExt, - H::Out: Ord + HeapSizeOf, + H: Hasher, + B: 'a + Backend, + T: 'a + ChangesTrieStorage, + O: 'a + OffchainExt, + H::Out: Ord + HeapSizeOf, { - fn storage(&self, key: &[u8]) -> Option> { - let _guard = panic_handler::AbortGuard::new(true); - self.overlay.storage(key).map(|x| x.map(|x| x.to_vec())).unwrap_or_else(|| - self.backend.storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL)) - } - - fn storage_hash(&self, key: &[u8]) -> Option { - let _guard = panic_handler::AbortGuard::new(true); - self.overlay.storage(key).map(|x| x.map(|x| H::hash(x))).unwrap_or_else(|| - self.backend.storage_hash(key).expect(EXT_NOT_ALLOWED_TO_FAIL)) - } - - fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option> { - let _guard = panic_handler::AbortGuard::new(true); - self.overlay.child_storage(storage_key, key).map(|x| x.map(|x| x.to_vec())).unwrap_or_else(|| - self.backend.child_storage(storage_key, key).expect(EXT_NOT_ALLOWED_TO_FAIL)) - } - - fn exists_storage(&self, key: &[u8]) -> bool { - let _guard = panic_handler::AbortGuard::new(true); - match self.overlay.storage(key) { - Some(x) => x.is_some(), - _ => self.backend.exists_storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL), - } - } - - fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> bool { - let _guard = panic_handler::AbortGuard::new(true); - match self.overlay.child_storage(storage_key, key) { - Some(x) => x.is_some(), - _ => self.backend.exists_child_storage(storage_key, key).expect(EXT_NOT_ALLOWED_TO_FAIL), - } - } - - fn place_storage(&mut self, key: Vec, value: Option>) { - let _guard = panic_handler::AbortGuard::new(true); - if is_child_storage_key(&key) { - warn!(target: "trie", "Refuse to directly set child storage key"); - return; - } - - self.mark_dirty(); - self.overlay.set_storage(key, value); - } - - fn place_child_storage(&mut self, storage_key: Vec, key: Vec, value: Option>) -> bool { - let _guard = panic_handler::AbortGuard::new(true); - if !is_child_storage_key(&storage_key) || !is_child_trie_key_valid::(&storage_key) { - return false; - } - - self.mark_dirty(); - self.overlay.set_child_storage(storage_key, key, value); - - true - } - - fn kill_child_storage(&mut self, storage_key: &[u8]) { - let _guard = panic_handler::AbortGuard::new(true); - if !is_child_storage_key(storage_key) || !is_child_trie_key_valid::(storage_key) { - return; - } - - self.mark_dirty(); - self.overlay.clear_child_storage(storage_key); - self.backend.for_keys_in_child_storage(storage_key, |key| { - self.overlay.set_child_storage(storage_key.to_vec(), key.to_vec(), None); - }); - } - - fn clear_prefix(&mut self, prefix: &[u8]) { - let _guard = panic_handler::AbortGuard::new(true); - if is_child_storage_key(prefix) { - warn!(target: "trie", "Refuse to directly clear prefix that is part of child storage key"); - return; - } - - self.mark_dirty(); - self.overlay.clear_prefix(prefix); - self.backend.for_keys_with_prefix(prefix, |key| { - self.overlay.set_storage(key.to_vec(), None); - }); - } - - fn chain_id(&self) -> u64 { - 42 - } - - fn storage_root(&mut self) -> H::Out { - let _guard = panic_handler::AbortGuard::new(true); - if let Some((_, ref root)) = self.storage_transaction { - return root.clone(); - } - - let mut transaction = B::Transaction::default(); - let child_storage_keys: Vec<_> = self.overlay.prospective.children.keys().cloned().collect(); - - for key in child_storage_keys { - let (_, t) = self.child_storage_root_transaction(&key); - transaction.consolidate(t); - } - - // compute and memoize - let delta = self.overlay.committed.top.iter().map(|(k, v)| (k.clone(), v.value.clone())) - .chain(self.overlay.prospective.top.iter().map(|(k, v)| (k.clone(), v.value.clone()))); - - let (root, t) = self.backend.storage_root(delta); - transaction.consolidate(t); - self.storage_transaction = Some((transaction, root)); - root - } - - fn child_storage_root(&mut self, storage_key: &[u8]) -> Option> { - let _guard = panic_handler::AbortGuard::new(true); - if !is_child_storage_key(storage_key) || !is_child_trie_key_valid::(storage_key) { - return None; - } - - if self.storage_transaction.is_some() { - return Some(self.storage(storage_key).unwrap_or(default_child_trie_root::(storage_key))); - } - - Some(self.child_storage_root_transaction(storage_key).0) - } - - fn storage_changes_root(&mut self, parent: H::Out, parent_num: u64) -> Option { - let _guard = panic_handler::AbortGuard::new(true); - let root_and_tx = compute_changes_trie_root::<_, T, H>( - self.backend, - self.changes_trie_storage.clone(), - self.overlay, - &AnchorBlockId { hash: parent, number: parent_num }, - ); - let root_and_tx = root_and_tx.map(|(root, changes)| { - let mut calculated_root = Default::default(); - let mut mdb = MemoryDB::default(); - { - let mut trie = TrieDBMut::::new(&mut mdb, &mut calculated_root); - for (key, value) in changes { - trie.insert(&key, &value).expect(EXT_NOT_ALLOWED_TO_FAIL); - } - } - - (parent_num + 1, mdb, root) - }); - let root = root_and_tx.as_ref().map(|(_, _, root)| root.clone()); - self.changes_trie_transaction = root_and_tx; - root - } - - fn submit_extrinsic(&mut self, extrinsic: Vec) -> Result<(), ()> { - let _guard = panic_handler::AbortGuard::new(true); - if let Some(ext) = self.offchain_externalities.as_mut() { - ext.submit_extrinsic(extrinsic); - Ok(()) - } else { - warn!("Call to submit_extrinsic without offchain externalities set."); - Err(()) - } - } + fn storage(&self, key: &[u8]) -> Option> { + let _guard = panic_handler::AbortGuard::new(true); + self.overlay + .storage(key) + .map(|x| x.map(|x| x.to_vec())) + .unwrap_or_else(|| self.backend.storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL)) + } + + fn storage_hash(&self, key: &[u8]) -> Option { + let _guard = panic_handler::AbortGuard::new(true); + self.overlay + .storage(key) + .map(|x| x.map(|x| H::hash(x))) + .unwrap_or_else(|| { + self.backend + .storage_hash(key) + .expect(EXT_NOT_ALLOWED_TO_FAIL) + }) + } + + fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option> { + let _guard = panic_handler::AbortGuard::new(true); + self.overlay + .child_storage(storage_key, key) + .map(|x| x.map(|x| x.to_vec())) + .unwrap_or_else(|| { + self.backend + .child_storage(storage_key, key) + .expect(EXT_NOT_ALLOWED_TO_FAIL) + }) + } + + fn exists_storage(&self, key: &[u8]) -> bool { + let _guard = panic_handler::AbortGuard::new(true); + match self.overlay.storage(key) { + Some(x) => x.is_some(), + _ => self + .backend + .exists_storage(key) + .expect(EXT_NOT_ALLOWED_TO_FAIL), + } + } + + fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> bool { + let _guard = panic_handler::AbortGuard::new(true); + match self.overlay.child_storage(storage_key, key) { + Some(x) => x.is_some(), + _ => self + .backend + .exists_child_storage(storage_key, key) + .expect(EXT_NOT_ALLOWED_TO_FAIL), + } + } + + fn place_storage(&mut self, key: Vec, value: Option>) { + let _guard = panic_handler::AbortGuard::new(true); + if is_child_storage_key(&key) { + warn!(target: "trie", "Refuse to directly set child storage key"); + return; + } + + self.mark_dirty(); + self.overlay.set_storage(key, value); + } + + fn place_child_storage( + &mut self, + storage_key: Vec, + key: Vec, + value: Option>, + ) -> bool { + let _guard = panic_handler::AbortGuard::new(true); + if !is_child_storage_key(&storage_key) || !is_child_trie_key_valid::(&storage_key) { + return false; + } + + self.mark_dirty(); + self.overlay.set_child_storage(storage_key, key, value); + + true + } + + fn kill_child_storage(&mut self, storage_key: &[u8]) { + let _guard = panic_handler::AbortGuard::new(true); + if !is_child_storage_key(storage_key) || !is_child_trie_key_valid::(storage_key) { + return; + } + + self.mark_dirty(); + self.overlay.clear_child_storage(storage_key); + self.backend.for_keys_in_child_storage(storage_key, |key| { + self.overlay + .set_child_storage(storage_key.to_vec(), key.to_vec(), None); + }); + } + + fn clear_prefix(&mut self, prefix: &[u8]) { + let _guard = panic_handler::AbortGuard::new(true); + if is_child_storage_key(prefix) { + warn!(target: "trie", "Refuse to directly clear prefix that is part of child storage key"); + return; + } + + self.mark_dirty(); + self.overlay.clear_prefix(prefix); + self.backend.for_keys_with_prefix(prefix, |key| { + self.overlay.set_storage(key.to_vec(), None); + }); + } + + fn chain_id(&self) -> u64 { + 42 + } + + fn storage_root(&mut self) -> H::Out { + let _guard = panic_handler::AbortGuard::new(true); + if let Some((_, ref root)) = self.storage_transaction { + return root.clone(); + } + + let mut transaction = B::Transaction::default(); + let child_storage_keys: Vec<_> = + self.overlay.prospective.children.keys().cloned().collect(); + + for key in child_storage_keys { + let (_, t) = self.child_storage_root_transaction(&key); + transaction.consolidate(t); + } + + // compute and memoize + let delta = self + .overlay + .committed + .top + .iter() + .map(|(k, v)| (k.clone(), v.value.clone())) + .chain( + self.overlay + .prospective + .top + .iter() + .map(|(k, v)| (k.clone(), v.value.clone())), + ); + + let (root, t) = self.backend.storage_root(delta); + transaction.consolidate(t); + self.storage_transaction = Some((transaction, root)); + root + } + + fn child_storage_root(&mut self, storage_key: &[u8]) -> Option> { + let _guard = panic_handler::AbortGuard::new(true); + if !is_child_storage_key(storage_key) || !is_child_trie_key_valid::(storage_key) { + return None; + } + + if self.storage_transaction.is_some() { + return Some( + self.storage(storage_key) + .unwrap_or(default_child_trie_root::(storage_key)), + ); + } + + Some(self.child_storage_root_transaction(storage_key).0) + } + + fn storage_changes_root(&mut self, parent: H::Out, parent_num: u64) -> Option { + let _guard = panic_handler::AbortGuard::new(true); + let root_and_tx = compute_changes_trie_root::<_, T, H>( + self.backend, + self.changes_trie_storage.clone(), + self.overlay, + &AnchorBlockId { + hash: parent, + number: parent_num, + }, + ); + let root_and_tx = root_and_tx.map(|(root, changes)| { + let mut calculated_root = Default::default(); + let mut mdb = MemoryDB::default(); + { + let mut trie = TrieDBMut::::new(&mut mdb, &mut calculated_root); + for (key, value) in changes { + trie.insert(&key, &value).expect(EXT_NOT_ALLOWED_TO_FAIL); + } + } + + (parent_num + 1, mdb, root) + }); + let root = root_and_tx.as_ref().map(|(_, _, root)| root.clone()); + self.changes_trie_transaction = root_and_tx; + root + } + + fn submit_extrinsic(&mut self, extrinsic: Vec) -> Result<(), ()> { + let _guard = panic_handler::AbortGuard::new(true); + if let Some(ext) = self.offchain_externalities.as_mut() { + ext.submit_extrinsic(extrinsic); + Ok(()) + } else { + warn!("Call to submit_extrinsic without offchain externalities set."); + Err(()) + } + } } #[cfg(test)] mod tests { - use hex_literal::{hex, hex_impl}; - use parity_codec::Encode; - use primitives::{Blake2Hasher}; - use primitives::storage::well_known_keys::EXTRINSIC_INDEX; - use crate::backend::InMemory; - use crate::changes_trie::{Configuration as ChangesTrieConfiguration, - InMemoryStorage as InMemoryChangesTrieStorage}; - use crate::overlayed_changes::OverlayedValue; - use super::*; - - type TestBackend = InMemory; - type TestChangesTrieStorage = InMemoryChangesTrieStorage; - type TestExt<'a> = Ext<'a, Blake2Hasher, TestBackend, TestChangesTrieStorage, crate::NeverOffchainExt>; - - fn prepare_overlay_with_changes() -> OverlayedChanges { - OverlayedChanges { - prospective: vec![ - (EXTRINSIC_INDEX.to_vec(), OverlayedValue { - value: Some(3u32.encode()), - extrinsics: Some(vec![1].into_iter().collect()) - }), - (vec![1], OverlayedValue { - value: Some(vec![100].into_iter().collect()), - extrinsics: Some(vec![1].into_iter().collect()) - }), - ].into_iter().collect(), - committed: Default::default(), - changes_trie_config: Some(ChangesTrieConfiguration { - digest_interval: 0, - digest_levels: 0, - }), - } - } - - #[test] - fn storage_changes_root_is_none_when_storage_is_not_provided() { - let mut overlay = prepare_overlay_with_changes(); - let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &backend, None, None); - assert_eq!(ext.storage_changes_root(Default::default(), 100), None); - } - - #[test] - fn storage_changes_root_is_none_when_extrinsic_changes_are_none() { - let mut overlay = prepare_overlay_with_changes(); - overlay.changes_trie_config = None; - let storage = TestChangesTrieStorage::new(); - let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &backend, Some(&storage), None); - assert_eq!(ext.storage_changes_root(Default::default(), 100), None); - } - - #[test] - fn storage_changes_root_is_some_when_extrinsic_changes_are_non_empty() { - let mut overlay = prepare_overlay_with_changes(); - let storage = TestChangesTrieStorage::new(); - let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &backend, Some(&storage), None); - assert_eq!(ext.storage_changes_root(Default::default(), 99), - Some(hex!("5b829920b9c8d554a19ee2a1ba593c4f2ee6fc32822d083e04236d693e8358d5").into())); - } - - #[test] - fn storage_changes_root_is_some_when_extrinsic_changes_are_empty() { - let mut overlay = prepare_overlay_with_changes(); - overlay.prospective.top.get_mut(&vec![1]).unwrap().value = None; - let storage = TestChangesTrieStorage::new(); - let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &backend, Some(&storage), None); - assert_eq!(ext.storage_changes_root(Default::default(), 99), - Some(hex!("bcf494e41e29a15c9ae5caa053fe3cb8b446ee3e02a254efbdec7a19235b76e4").into())); - } + use super::*; + use crate::backend::InMemory; + use crate::changes_trie::{ + Configuration as ChangesTrieConfiguration, InMemoryStorage as InMemoryChangesTrieStorage, + }; + use crate::overlayed_changes::OverlayedValue; + use hex_literal::{hex, hex_impl}; + use parity_codec::Encode; + use primitives::storage::well_known_keys::EXTRINSIC_INDEX; + use primitives::Blake2Hasher; + + type TestBackend = InMemory; + type TestChangesTrieStorage = InMemoryChangesTrieStorage; + type TestExt<'a> = + Ext<'a, Blake2Hasher, TestBackend, TestChangesTrieStorage, crate::NeverOffchainExt>; + + fn prepare_overlay_with_changes() -> OverlayedChanges { + OverlayedChanges { + prospective: vec![ + ( + EXTRINSIC_INDEX.to_vec(), + OverlayedValue { + value: Some(3u32.encode()), + extrinsics: Some(vec![1].into_iter().collect()), + }, + ), + ( + vec![1], + OverlayedValue { + value: Some(vec![100].into_iter().collect()), + extrinsics: Some(vec![1].into_iter().collect()), + }, + ), + ] + .into_iter() + .collect(), + committed: Default::default(), + changes_trie_config: Some(ChangesTrieConfiguration { + digest_interval: 0, + digest_levels: 0, + }), + } + } + + #[test] + fn storage_changes_root_is_none_when_storage_is_not_provided() { + let mut overlay = prepare_overlay_with_changes(); + let backend = TestBackend::default(); + let mut ext = TestExt::new(&mut overlay, &backend, None, None); + assert_eq!(ext.storage_changes_root(Default::default(), 100), None); + } + + #[test] + fn storage_changes_root_is_none_when_extrinsic_changes_are_none() { + let mut overlay = prepare_overlay_with_changes(); + overlay.changes_trie_config = None; + let storage = TestChangesTrieStorage::new(); + let backend = TestBackend::default(); + let mut ext = TestExt::new(&mut overlay, &backend, Some(&storage), None); + assert_eq!(ext.storage_changes_root(Default::default(), 100), None); + } + + #[test] + fn storage_changes_root_is_some_when_extrinsic_changes_are_non_empty() { + let mut overlay = prepare_overlay_with_changes(); + let storage = TestChangesTrieStorage::new(); + let backend = TestBackend::default(); + let mut ext = TestExt::new(&mut overlay, &backend, Some(&storage), None); + assert_eq!( + ext.storage_changes_root(Default::default(), 99), + Some(hex!("5b829920b9c8d554a19ee2a1ba593c4f2ee6fc32822d083e04236d693e8358d5").into()) + ); + } + + #[test] + fn storage_changes_root_is_some_when_extrinsic_changes_are_empty() { + let mut overlay = prepare_overlay_with_changes(); + overlay.prospective.top.get_mut(&vec![1]).unwrap().value = None; + let storage = TestChangesTrieStorage::new(); + let backend = TestBackend::default(); + let mut ext = TestExt::new(&mut overlay, &backend, Some(&storage), None); + assert_eq!( + ext.storage_changes_root(Default::default(), 99), + Some(hex!("bcf494e41e29a15c9ae5caa053fe3cb8b446ee3e02a254efbdec7a19235b76e4").into()) + ); + } } diff --git a/core/state-machine/src/lib.rs b/core/state-machine/src/lib.rs index 0500aa72cf..71130067dd 100644 --- a/core/state-machine/src/lib.rs +++ b/core/state-machine/src/lib.rs @@ -18,42 +18,39 @@ #![warn(missing_docs)] -use std::{fmt, panic::UnwindSafe, result, marker::PhantomData}; -use log::warn; use hash_db::Hasher; use heapsize::HeapSizeOf; +use log::warn; use parity_codec::{Decode, Encode}; use primitives::{storage::well_known_keys, NativeOrEncoded, NeverNativeValue, OffchainExt}; +use std::{fmt, marker::PhantomData, panic::UnwindSafe, result}; pub mod backend; +mod basic; mod changes_trie; mod ext; -mod testing; -mod basic; mod overlayed_changes; mod proving_backend; +mod testing; mod trie_backend; mod trie_backend_essence; -use overlayed_changes::OverlayedChangeSet; -pub use trie::{TrieMut, TrieDBMut, DBValue, MemoryDB}; -pub use testing::TestExternalities; -pub use basic::BasicExternalities; -pub use ext::Ext; pub use backend::Backend; +pub use basic::BasicExternalities; pub use changes_trie::{ - AnchorBlockId as ChangesTrieAnchorBlockId, - Storage as ChangesTrieStorage, - RootsStorage as ChangesTrieRootsStorage, - InMemoryStorage as InMemoryChangesTrieStorage, - key_changes, key_changes_proof, key_changes_proof_check, - prune as prune_changes_tries, - oldest_non_pruned_trie as oldest_non_pruned_changes_trie + key_changes, key_changes_proof, key_changes_proof_check, + oldest_non_pruned_trie as oldest_non_pruned_changes_trie, prune as prune_changes_tries, + AnchorBlockId as ChangesTrieAnchorBlockId, InMemoryStorage as InMemoryChangesTrieStorage, + RootsStorage as ChangesTrieRootsStorage, Storage as ChangesTrieStorage, }; +pub use ext::Ext; +use overlayed_changes::OverlayedChangeSet; pub use overlayed_changes::OverlayedChanges; pub use proving_backend::{create_proof_check_backend, create_proof_check_backend_storage}; -pub use trie_backend_essence::{TrieBackendStorage, Storage}; +pub use testing::TestExternalities; +pub use trie::{DBValue, MemoryDB, TrieDBMut, TrieMut}; pub use trie_backend::TrieBackend; +pub use trie_backend_essence::{Storage, TrieBackendStorage}; /// State Machine Error bound. /// @@ -69,443 +66,499 @@ impl Error for ExecutionError {} /// and as a transition away from the pre-existing framework. #[derive(Debug, Eq, PartialEq)] pub enum ExecutionError { - /// Backend error. - Backend(String), - /// The entry `:code` doesn't exist in storage so there's no way we can execute anything. - CodeEntryDoesNotExist, - /// Backend is incompatible with execution proof generation process. - UnableToGenerateProof, - /// Invalid execution proof. - InvalidProof, + /// Backend error. + Backend(String), + /// The entry `:code` doesn't exist in storage so there's no way we can execute anything. + CodeEntryDoesNotExist, + /// Backend is incompatible with execution proof generation process. + UnableToGenerateProof, + /// Invalid execution proof. + InvalidProof, } impl fmt::Display for ExecutionError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Externalities Error") } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Externalities Error") + } } type CallResult = Result, E>; /// Externalities: pinned to specific active address. pub trait Externalities { - /// Read runtime storage. - fn storage(&self, key: &[u8]) -> Option>; - - /// Get storage value hash. This may be optimized for large values. - fn storage_hash(&self, key: &[u8]) -> Option { - self.storage(key).map(|v| H::hash(&v)) - } - - /// Read child runtime storage. - fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option>; - - /// Set storage entry `key` of current contract being called (effective immediately). - fn set_storage(&mut self, key: Vec, value: Vec) { - self.place_storage(key, Some(value)); - } - - /// Set child storage entry `key` of current contract being called (effective immediately). - fn set_child_storage(&mut self, storage_key: Vec, key: Vec, value: Vec) -> bool { - self.place_child_storage(storage_key, key, Some(value)) - } - - /// Clear a storage entry (`key`) of current contract being called (effective immediately). - fn clear_storage(&mut self, key: &[u8]) { - self.place_storage(key.to_vec(), None); - } - - /// Clear a child storage entry (`key`) of current contract being called (effective immediately). - fn clear_child_storage(&mut self, storage_key: &[u8], key: &[u8]) -> bool { - self.place_child_storage(storage_key.to_vec(), key.to_vec(), None) - } - - /// Whether a storage entry exists. - fn exists_storage(&self, key: &[u8]) -> bool { - self.storage(key).is_some() - } - - /// Whether a child storage entry exists. - fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> bool { - self.child_storage(storage_key, key).is_some() - } - - /// Clear an entire child storage. - fn kill_child_storage(&mut self, storage_key: &[u8]); - - /// Clear storage entries which keys are start with the given prefix. - fn clear_prefix(&mut self, prefix: &[u8]); - - /// Set or clear a storage entry (`key`) of current contract being called (effective immediately). - fn place_storage(&mut self, key: Vec, value: Option>); - - /// Set or clear a child storage entry. Return whether the operation succeeds. - fn place_child_storage(&mut self, storage_key: Vec, key: Vec, value: Option>) -> bool; - - /// Get the identity of the chain. - fn chain_id(&self) -> u64; - - /// Get the trie root of the current storage map. This will also update all child storage keys in the top-level storage map. - fn storage_root(&mut self) -> H::Out where H::Out: Ord; - - /// Get the trie root of a child storage map. This will also update the value of the child storage keys in the top-level storage map. If the storage root equals default hash as defined by trie, the key in top-level storage map will be removed. - /// - /// Returns None if key provided is not a storage key. This can due to not being started with CHILD_STORAGE_KEY_PREFIX, or the trie implementation regards the key as invalid. - fn child_storage_root(&mut self, storage_key: &[u8]) -> Option>; - - /// Get the change trie root of the current storage overlay at a block with given parent. - fn storage_changes_root(&mut self, parent: H::Out, parent_num: u64) -> Option where H::Out: Ord; - - /// Submit extrinsic. - /// - /// Returns an error in case the API is not available. - fn submit_extrinsic(&mut self, extrinsic: Vec) -> Result<(), ()>; + /// Read runtime storage. + fn storage(&self, key: &[u8]) -> Option>; + + /// Get storage value hash. This may be optimized for large values. + fn storage_hash(&self, key: &[u8]) -> Option { + self.storage(key).map(|v| H::hash(&v)) + } + + /// Read child runtime storage. + fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option>; + + /// Set storage entry `key` of current contract being called (effective immediately). + fn set_storage(&mut self, key: Vec, value: Vec) { + self.place_storage(key, Some(value)); + } + + /// Set child storage entry `key` of current contract being called (effective immediately). + fn set_child_storage(&mut self, storage_key: Vec, key: Vec, value: Vec) -> bool { + self.place_child_storage(storage_key, key, Some(value)) + } + + /// Clear a storage entry (`key`) of current contract being called (effective immediately). + fn clear_storage(&mut self, key: &[u8]) { + self.place_storage(key.to_vec(), None); + } + + /// Clear a child storage entry (`key`) of current contract being called (effective immediately). + fn clear_child_storage(&mut self, storage_key: &[u8], key: &[u8]) -> bool { + self.place_child_storage(storage_key.to_vec(), key.to_vec(), None) + } + + /// Whether a storage entry exists. + fn exists_storage(&self, key: &[u8]) -> bool { + self.storage(key).is_some() + } + + /// Whether a child storage entry exists. + fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> bool { + self.child_storage(storage_key, key).is_some() + } + + /// Clear an entire child storage. + fn kill_child_storage(&mut self, storage_key: &[u8]); + + /// Clear storage entries which keys are start with the given prefix. + fn clear_prefix(&mut self, prefix: &[u8]); + + /// Set or clear a storage entry (`key`) of current contract being called (effective immediately). + fn place_storage(&mut self, key: Vec, value: Option>); + + /// Set or clear a child storage entry. Return whether the operation succeeds. + fn place_child_storage( + &mut self, + storage_key: Vec, + key: Vec, + value: Option>, + ) -> bool; + + /// Get the identity of the chain. + fn chain_id(&self) -> u64; + + /// Get the trie root of the current storage map. This will also update all child storage keys in the top-level storage map. + fn storage_root(&mut self) -> H::Out + where + H::Out: Ord; + + /// Get the trie root of a child storage map. This will also update the value of the child storage keys in the top-level storage map. If the storage root equals default hash as defined by trie, the key in top-level storage map will be removed. + /// + /// Returns None if key provided is not a storage key. This can due to not being started with CHILD_STORAGE_KEY_PREFIX, or the trie implementation regards the key as invalid. + fn child_storage_root(&mut self, storage_key: &[u8]) -> Option>; + + /// Get the change trie root of the current storage overlay at a block with given parent. + fn storage_changes_root(&mut self, parent: H::Out, parent_num: u64) -> Option + where + H::Out: Ord; + + /// Submit extrinsic. + /// + /// Returns an error in case the API is not available. + fn submit_extrinsic(&mut self, extrinsic: Vec) -> Result<(), ()>; } /// An implementation of offchain extensions that should never be triggered. pub enum NeverOffchainExt {} impl NeverOffchainExt { - /// Create new offchain extensions. - pub fn new<'a>() -> Option<&'a mut Self> { - None - } + /// Create new offchain extensions. + pub fn new<'a>() -> Option<&'a mut Self> { + None + } } impl OffchainExt for NeverOffchainExt { - fn submit_extrinsic(&mut self, _extrinsic: Vec) { unreachable!() } + fn submit_extrinsic(&mut self, _extrinsic: Vec) { + unreachable!() + } } /// Code execution engine. pub trait CodeExecutor: Sized + Send + Sync { - /// Externalities error type. - type Error: Error; - - /// Call a given method in the runtime. Returns a tuple of the result (either the output data - /// or an execution error) together with a `bool`, which is true if native execution was used. - fn call< - E: Externalities, R: Encode + Decode + PartialEq, NC: FnOnce() -> result::Result + UnwindSafe - >( - &self, - ext: &mut E, - method: &str, - data: &[u8], - use_native: bool, - native_call: Option, - ) -> (CallResult, bool); + /// Externalities error type. + type Error: Error; + + /// Call a given method in the runtime. Returns a tuple of the result (either the output data + /// or an execution error) together with a `bool`, which is true if native execution was used. + fn call< + E: Externalities, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + >( + &self, + ext: &mut E, + method: &str, + data: &[u8], + use_native: bool, + native_call: Option, + ) -> (CallResult, bool); } /// Strategy for executing a call into the runtime. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum ExecutionStrategy { - /// Execute with the native equivalent if it is compatible with the given wasm module; otherwise fall back to the wasm. - NativeWhenPossible, - /// Use the given wasm module. - AlwaysWasm, - /// Run with both the wasm and the native variant (if compatible). Report any discrepency as an error. - Both, - /// First native, then if that fails or is not possible, wasm. - NativeElseWasm, + /// Execute with the native equivalent if it is compatible with the given wasm module; otherwise fall back to the wasm. + NativeWhenPossible, + /// Use the given wasm module. + AlwaysWasm, + /// Run with both the wasm and the native variant (if compatible). Report any discrepency as an error. + Both, + /// First native, then if that fails or is not possible, wasm. + NativeElseWasm, } -type DefaultHandler = fn( - CallResult, - CallResult, -) -> CallResult; +type DefaultHandler = fn(CallResult, CallResult) -> CallResult; /// Like `ExecutionStrategy` only it also stores a handler in case of consensus failure. #[derive(Clone)] pub enum ExecutionManager { - /// Execute with the native equivalent if it is compatible with the given wasm module; otherwise fall back to the wasm. - NativeWhenPossible, - /// Use the given wasm module. - AlwaysWasm, - /// Run with both the wasm and the native variant (if compatible). Call `F` in the case of any discrepency. - Both(F), - /// First native, then if that fails or is not possible, wasm. - NativeElseWasm, + /// Execute with the native equivalent if it is compatible with the given wasm module; otherwise fall back to the wasm. + NativeWhenPossible, + /// Use the given wasm module. + AlwaysWasm, + /// Run with both the wasm and the native variant (if compatible). Call `F` in the case of any discrepency. + Both(F), + /// First native, then if that fails or is not possible, wasm. + NativeElseWasm, } impl<'a, F> From<&'a ExecutionManager> for ExecutionStrategy { - fn from(s: &'a ExecutionManager) -> Self { - match *s { - ExecutionManager::NativeWhenPossible => ExecutionStrategy::NativeWhenPossible, - ExecutionManager::AlwaysWasm => ExecutionStrategy::AlwaysWasm, - ExecutionManager::NativeElseWasm => ExecutionStrategy::NativeElseWasm, - ExecutionManager::Both(_) => ExecutionStrategy::Both, - } - } + fn from(s: &'a ExecutionManager) -> Self { + match *s { + ExecutionManager::NativeWhenPossible => ExecutionStrategy::NativeWhenPossible, + ExecutionManager::AlwaysWasm => ExecutionStrategy::AlwaysWasm, + ExecutionManager::NativeElseWasm => ExecutionStrategy::NativeElseWasm, + ExecutionManager::Both(_) => ExecutionStrategy::Both, + } + } } impl ExecutionStrategy { - /// Gets the corresponding manager for the execution strategy. - pub fn get_manager(self) -> ExecutionManager> { - match self { - ExecutionStrategy::AlwaysWasm => ExecutionManager::AlwaysWasm, - ExecutionStrategy::NativeWhenPossible => ExecutionManager::NativeWhenPossible, - ExecutionStrategy::NativeElseWasm => ExecutionManager::NativeElseWasm, - ExecutionStrategy::Both => ExecutionManager::Both(|wasm_result, native_result| { - warn!( - "Consensus error between wasm {:?} and native {:?}. Using wasm.", - wasm_result, - native_result - ); - wasm_result - }), - } - } + /// Gets the corresponding manager for the execution strategy. + pub fn get_manager( + self, + ) -> ExecutionManager> { + match self { + ExecutionStrategy::AlwaysWasm => ExecutionManager::AlwaysWasm, + ExecutionStrategy::NativeWhenPossible => ExecutionManager::NativeWhenPossible, + ExecutionStrategy::NativeElseWasm => ExecutionManager::NativeElseWasm, + ExecutionStrategy::Both => ExecutionManager::Both(|wasm_result, native_result| { + warn!( + "Consensus error between wasm {:?} and native {:?}. Using wasm.", + wasm_result, native_result + ); + wasm_result + }), + } + } } - /// Evaluate to ExecutionManager::NativeWhenPossible, without having to figure out the type. pub fn native_when_possible() -> ExecutionManager> { - ExecutionManager::NativeWhenPossible + ExecutionManager::NativeWhenPossible } /// Evaluate to ExecutionManager::NativeElseWasm, without having to figure out the type. pub fn native_else_wasm() -> ExecutionManager> { - ExecutionManager::NativeElseWasm + ExecutionManager::NativeElseWasm } /// Evaluate to ExecutionManager::NativeWhenPossible, without having to figure out the type. pub fn always_wasm() -> ExecutionManager> { - ExecutionManager::AlwaysWasm + ExecutionManager::AlwaysWasm } /// Creates new substrate state machine. pub fn new<'a, H, B, T, O, Exec>( - backend: &'a B, - changes_trie_storage: Option<&'a T>, - offchain_ext: Option<&'a mut O>, - overlay: &'a mut OverlayedChanges, - exec: &'a Exec, - method: &'a str, - call_data: &'a [u8], + backend: &'a B, + changes_trie_storage: Option<&'a T>, + offchain_ext: Option<&'a mut O>, + overlay: &'a mut OverlayedChanges, + exec: &'a Exec, + method: &'a str, + call_data: &'a [u8], ) -> StateMachine<'a, H, B, T, O, Exec> { - StateMachine { - backend, - changes_trie_storage, - offchain_ext, - overlay, - exec, - method, - call_data, - _hasher: PhantomData, - } + StateMachine { + backend, + changes_trie_storage, + offchain_ext, + overlay, + exec, + method, + call_data, + _hasher: PhantomData, + } } /// The substrate state machine. pub struct StateMachine<'a, H, B, T, O, Exec> { - backend: &'a B, - changes_trie_storage: Option<&'a T>, - offchain_ext: Option<&'a mut O>, - overlay: &'a mut OverlayedChanges, - exec: &'a Exec, - method: &'a str, - call_data: &'a [u8], - _hasher: PhantomData, + backend: &'a B, + changes_trie_storage: Option<&'a T>, + offchain_ext: Option<&'a mut O>, + overlay: &'a mut OverlayedChanges, + exec: &'a Exec, + method: &'a str, + call_data: &'a [u8], + _hasher: PhantomData, } -impl<'a, H, B, T, O, Exec> StateMachine<'a, H, B, T, O, Exec> where - H: Hasher, - Exec: CodeExecutor, - B: Backend, - T: ChangesTrieStorage, - O: OffchainExt, - H::Out: Ord + HeapSizeOf, +impl<'a, H, B, T, O, Exec> StateMachine<'a, H, B, T, O, Exec> +where + H: Hasher, + Exec: CodeExecutor, + B: Backend, + T: ChangesTrieStorage, + O: OffchainExt, + H::Out: Ord + HeapSizeOf, { - /// Execute a call using the given state backend, overlayed changes, and call executor. - /// Produces a state-backend-specific "transaction" which can be used to apply the changes - /// to the backing store, such as the disk. - /// - /// On an error, no prospective changes are written to the overlay. - /// - /// Note: changes to code will be in place if this call is made again. For running partial - /// blocks (e.g. a transaction at a time), ensure a different method is used. - pub fn execute( - &mut self, - strategy: ExecutionStrategy, - ) -> Result<(Vec, B::Transaction, Option>), Box> { - // We are not giving a native call and thus we are sure that the result can never be a native - // value. - self.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - strategy.get_manager(), - true, - None, - ) - .map(|(result, storage_tx, changes_tx)| ( - result.into_encoded(), - storage_tx.expect("storage_tx is always computed when compute_tx is true; qed"), - changes_tx, - )) - } - - fn execute_aux( - &mut self, - compute_tx: bool, - use_native: bool, - native_call: Option, - ) -> (CallResult, bool, Option, Option>) where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - { - let offchain = self.offchain_ext.as_mut(); - let mut externalities = ext::Ext::new( - self.overlay, - self.backend, - self.changes_trie_storage, - offchain.map(|x| &mut **x), - ); - let (result, was_native) = self.exec.call( - &mut externalities, - self.method, - self.call_data, - use_native, - native_call, - ); - let (storage_delta, changes_delta) = if compute_tx { - let (storage_delta, changes_delta) = externalities.transaction(); - (Some(storage_delta), changes_delta) - } else { - (None, None) - }; - (result, was_native, storage_delta, changes_delta) - } - - fn execute_call_with_both_strategy( - &mut self, - compute_tx: bool, - mut native_call: Option, - orig_prospective: OverlayedChangeSet, - on_consensus_failure: Handler, - ) -> (CallResult, Option, Option>) where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - Handler: FnOnce( - CallResult, - CallResult - ) -> CallResult - { - let (result, was_native, storage_delta, changes_delta) = self.execute_aux(compute_tx, true, native_call.take()); - - if was_native { - self.overlay.prospective = orig_prospective.clone(); - let (wasm_result, _, wasm_storage_delta, wasm_changes_delta) = self.execute_aux(compute_tx, false, native_call); - - if (result.is_ok() && wasm_result.is_ok() - && result.as_ref().ok() == wasm_result.as_ref().ok()) - || result.is_err() && wasm_result.is_err() { - (result, storage_delta, changes_delta) - } else { - (on_consensus_failure(wasm_result, result), wasm_storage_delta, wasm_changes_delta) - } - } else { - (result, storage_delta, changes_delta) - } - } - - fn execute_call_with_native_else_wasm_strategy( - &mut self, - compute_tx: bool, - mut native_call: Option, - orig_prospective: OverlayedChangeSet, - ) -> (CallResult, Option, Option>) where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - { - let (result, was_native, storage_delta, changes_delta) = self.execute_aux(compute_tx, true, native_call.take()); - - if !was_native || result.is_ok() { - (result, storage_delta, changes_delta) - } else { - self.overlay.prospective = orig_prospective.clone(); - let (wasm_result, _, wasm_storage_delta, wasm_changes_delta) = self.execute_aux(compute_tx, false, native_call); - (wasm_result, wasm_storage_delta, wasm_changes_delta) - } - } - - /// Execute a call using the given state backend, overlayed changes, and call executor. - /// Produces a state-backend-specific "transaction" which can be used to apply the changes - /// to the backing store, such as the disk. - /// - /// On an error, no prospective changes are written to the overlay. - /// - /// Note: changes to code will be in place if this call is made again. For running partial - /// blocks (e.g. a transaction at a time), ensure a different method is used. - pub fn execute_using_consensus_failure_handler( - &mut self, - manager: ExecutionManager, - compute_tx: bool, - mut native_call: Option, - ) -> Result<(NativeOrEncoded, Option, Option>), Box> where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - Handler: FnOnce( - CallResult, - CallResult - ) -> CallResult - { - // read changes trie configuration. The reason why we're doing it here instead of the - // `OverlayedChanges` constructor is that we need proofs for this read as a part of - // proof-of-execution on light clients. And the proof is recorded by the backend which - // is created after OverlayedChanges - - let backend = self.backend.clone(); - let init_overlay = |overlay: &mut OverlayedChanges, final_check: bool| { - let changes_trie_config = try_read_overlay_value( - overlay, - backend, - well_known_keys::CHANGES_TRIE_CONFIG - )?; - set_changes_trie_config(overlay, changes_trie_config, final_check) - }; - init_overlay(self.overlay, false)?; - - let result = { - let orig_prospective = self.overlay.prospective.clone(); - - let (result, storage_delta, changes_delta) = match manager { - ExecutionManager::Both(on_consensus_failure) => { - self.execute_call_with_both_strategy(compute_tx, native_call.take(), orig_prospective, on_consensus_failure) - }, - ExecutionManager::NativeElseWasm => { - self.execute_call_with_native_else_wasm_strategy(compute_tx, native_call.take(), orig_prospective) - }, - ExecutionManager::AlwaysWasm => { - let (result, _, storage_delta, changes_delta) = self.execute_aux(compute_tx, false, native_call); - (result, storage_delta, changes_delta) - }, - ExecutionManager::NativeWhenPossible => { - let (result, _was_native, storage_delta, changes_delta) = self.execute_aux(compute_tx, true, native_call); - (result, storage_delta, changes_delta) - }, - }; - result.map(move |out| (out, storage_delta, changes_delta)) - }; - - if result.is_ok() { - init_overlay(self.overlay, true)?; - } - - result.map_err(|e| Box::new(e) as _) - } + /// Execute a call using the given state backend, overlayed changes, and call executor. + /// Produces a state-backend-specific "transaction" which can be used to apply the changes + /// to the backing store, such as the disk. + /// + /// On an error, no prospective changes are written to the overlay. + /// + /// Note: changes to code will be in place if this call is made again. For running partial + /// blocks (e.g. a transaction at a time), ensure a different method is used. + pub fn execute( + &mut self, + strategy: ExecutionStrategy, + ) -> Result<(Vec, B::Transaction, Option>), Box> { + // We are not giving a native call and thus we are sure that the result can never be a native + // value. + self.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + strategy.get_manager(), + true, + None, + ) + .map(|(result, storage_tx, changes_tx)| { + ( + result.into_encoded(), + storage_tx.expect("storage_tx is always computed when compute_tx is true; qed"), + changes_tx, + ) + }) + } + + fn execute_aux( + &mut self, + compute_tx: bool, + use_native: bool, + native_call: Option, + ) -> ( + CallResult, + bool, + Option, + Option>, + ) + where + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + { + let offchain = self.offchain_ext.as_mut(); + let mut externalities = ext::Ext::new( + self.overlay, + self.backend, + self.changes_trie_storage, + offchain.map(|x| &mut **x), + ); + let (result, was_native) = self.exec.call( + &mut externalities, + self.method, + self.call_data, + use_native, + native_call, + ); + let (storage_delta, changes_delta) = if compute_tx { + let (storage_delta, changes_delta) = externalities.transaction(); + (Some(storage_delta), changes_delta) + } else { + (None, None) + }; + (result, was_native, storage_delta, changes_delta) + } + + fn execute_call_with_both_strategy( + &mut self, + compute_tx: bool, + mut native_call: Option, + orig_prospective: OverlayedChangeSet, + on_consensus_failure: Handler, + ) -> ( + CallResult, + Option, + Option>, + ) + where + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + Handler: FnOnce( + CallResult, + CallResult, + ) -> CallResult, + { + let (result, was_native, storage_delta, changes_delta) = + self.execute_aux(compute_tx, true, native_call.take()); + + if was_native { + self.overlay.prospective = orig_prospective.clone(); + let (wasm_result, _, wasm_storage_delta, wasm_changes_delta) = + self.execute_aux(compute_tx, false, native_call); + + if (result.is_ok() + && wasm_result.is_ok() + && result.as_ref().ok() == wasm_result.as_ref().ok()) + || result.is_err() && wasm_result.is_err() + { + (result, storage_delta, changes_delta) + } else { + ( + on_consensus_failure(wasm_result, result), + wasm_storage_delta, + wasm_changes_delta, + ) + } + } else { + (result, storage_delta, changes_delta) + } + } + + fn execute_call_with_native_else_wasm_strategy( + &mut self, + compute_tx: bool, + mut native_call: Option, + orig_prospective: OverlayedChangeSet, + ) -> ( + CallResult, + Option, + Option>, + ) + where + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + { + let (result, was_native, storage_delta, changes_delta) = + self.execute_aux(compute_tx, true, native_call.take()); + + if !was_native || result.is_ok() { + (result, storage_delta, changes_delta) + } else { + self.overlay.prospective = orig_prospective.clone(); + let (wasm_result, _, wasm_storage_delta, wasm_changes_delta) = + self.execute_aux(compute_tx, false, native_call); + (wasm_result, wasm_storage_delta, wasm_changes_delta) + } + } + + /// Execute a call using the given state backend, overlayed changes, and call executor. + /// Produces a state-backend-specific "transaction" which can be used to apply the changes + /// to the backing store, such as the disk. + /// + /// On an error, no prospective changes are written to the overlay. + /// + /// Note: changes to code will be in place if this call is made again. For running partial + /// blocks (e.g. a transaction at a time), ensure a different method is used. + pub fn execute_using_consensus_failure_handler( + &mut self, + manager: ExecutionManager, + compute_tx: bool, + mut native_call: Option, + ) -> Result< + ( + NativeOrEncoded, + Option, + Option>, + ), + Box, + > + where + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + Handler: FnOnce( + CallResult, + CallResult, + ) -> CallResult, + { + // read changes trie configuration. The reason why we're doing it here instead of the + // `OverlayedChanges` constructor is that we need proofs for this read as a part of + // proof-of-execution on light clients. And the proof is recorded by the backend which + // is created after OverlayedChanges + + let backend = self.backend.clone(); + let init_overlay = |overlay: &mut OverlayedChanges, final_check: bool| { + let changes_trie_config = + try_read_overlay_value(overlay, backend, well_known_keys::CHANGES_TRIE_CONFIG)?; + set_changes_trie_config(overlay, changes_trie_config, final_check) + }; + init_overlay(self.overlay, false)?; + + let result = { + let orig_prospective = self.overlay.prospective.clone(); + + let (result, storage_delta, changes_delta) = match manager { + ExecutionManager::Both(on_consensus_failure) => self + .execute_call_with_both_strategy( + compute_tx, + native_call.take(), + orig_prospective, + on_consensus_failure, + ), + ExecutionManager::NativeElseWasm => self + .execute_call_with_native_else_wasm_strategy( + compute_tx, + native_call.take(), + orig_prospective, + ), + ExecutionManager::AlwaysWasm => { + let (result, _, storage_delta, changes_delta) = + self.execute_aux(compute_tx, false, native_call); + (result, storage_delta, changes_delta) + } + ExecutionManager::NativeWhenPossible => { + let (result, _was_native, storage_delta, changes_delta) = + self.execute_aux(compute_tx, true, native_call); + (result, storage_delta, changes_delta) + } + }; + result.map(move |out| (out, storage_delta, changes_delta)) + }; + + if result.is_ok() { + init_overlay(self.overlay, true)?; + } + + result.map_err(|e| Box::new(e) as _) + } } /// Prove execution using the given state backend, overlayed changes, and call executor. pub fn prove_execution( - backend: B, - overlay: &mut OverlayedChanges, - exec: &Exec, - method: &str, - call_data: &[u8], + backend: B, + overlay: &mut OverlayedChanges, + exec: &Exec, + method: &str, + call_data: &[u8], ) -> Result<(Vec, Vec>), Box> where - B: Backend, - H: Hasher, - Exec: CodeExecutor, - H::Out: Ord + HeapSizeOf, + B: Backend, + H: Hasher, + Exec: CodeExecutor, + H::Out: Ord + HeapSizeOf, { - let trie_backend = backend.try_into_trie_backend() - .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_execution_on_trie_backend(&trie_backend, overlay, exec, method, call_data) + let trie_backend = backend + .try_into_trie_backend() + .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + prove_execution_on_trie_backend(&trie_backend, overlay, exec, method, call_data) } /// Prove execution using the given trie backend, overlayed changes, and call executor. @@ -518,443 +571,505 @@ where /// Note: changes to code will be in place if this call is made again. For running partial /// blocks (e.g. a transaction at a time), ensure a different method is used. pub fn prove_execution_on_trie_backend( - trie_backend: &TrieBackend, - overlay: &mut OverlayedChanges, - exec: &Exec, - method: &str, - call_data: &[u8], + trie_backend: &TrieBackend, + overlay: &mut OverlayedChanges, + exec: &Exec, + method: &str, + call_data: &[u8], ) -> Result<(Vec, Vec>), Box> where - S: trie_backend_essence::TrieBackendStorage, - H: Hasher, - Exec: CodeExecutor, - H::Out: Ord + HeapSizeOf, + S: trie_backend_essence::TrieBackendStorage, + H: Hasher, + Exec: CodeExecutor, + H::Out: Ord + HeapSizeOf, { - let proving_backend = proving_backend::ProvingBackend::new(trie_backend); - let mut sm = StateMachine { - backend: &proving_backend, - changes_trie_storage: None as Option<&changes_trie::InMemoryStorage>, - offchain_ext: NeverOffchainExt::new(), - overlay, - exec, - method, - call_data, - _hasher: PhantomData, - }; - let (result, _, _) = sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - native_else_wasm(), - false, - None, - )?; - let proof = proving_backend.extract_proof(); - Ok((result.into_encoded(), proof)) + let proving_backend = proving_backend::ProvingBackend::new(trie_backend); + let mut sm = StateMachine { + backend: &proving_backend, + changes_trie_storage: None as Option<&changes_trie::InMemoryStorage>, + offchain_ext: NeverOffchainExt::new(), + overlay, + exec, + method, + call_data, + _hasher: PhantomData, + }; + let (result, _, _) = sm + .execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + native_else_wasm(), + false, + None, + )?; + let proof = proving_backend.extract_proof(); + Ok((result.into_encoded(), proof)) } /// Check execution proof, generated by `prove_execution` call. pub fn execution_proof_check( - root: H::Out, - proof: Vec>, - overlay: &mut OverlayedChanges, - exec: &Exec, - method: &str, - call_data: &[u8], + root: H::Out, + proof: Vec>, + overlay: &mut OverlayedChanges, + exec: &Exec, + method: &str, + call_data: &[u8], ) -> Result, Box> where - H: Hasher, - Exec: CodeExecutor, - H::Out: Ord + HeapSizeOf, + H: Hasher, + Exec: CodeExecutor, + H::Out: Ord + HeapSizeOf, { - let trie_backend = proving_backend::create_proof_check_backend::(root.into(), proof)?; - execution_proof_check_on_trie_backend(&trie_backend, overlay, exec, method, call_data) + let trie_backend = proving_backend::create_proof_check_backend::(root.into(), proof)?; + execution_proof_check_on_trie_backend(&trie_backend, overlay, exec, method, call_data) } /// Check execution proof on proving backend, generated by `prove_execution` call. pub fn execution_proof_check_on_trie_backend( - trie_backend: &TrieBackend, H>, - overlay: &mut OverlayedChanges, - exec: &Exec, - method: &str, - call_data: &[u8], + trie_backend: &TrieBackend, H>, + overlay: &mut OverlayedChanges, + exec: &Exec, + method: &str, + call_data: &[u8], ) -> Result, Box> where - H: Hasher, - Exec: CodeExecutor, - H::Out: Ord + HeapSizeOf, + H: Hasher, + Exec: CodeExecutor, + H::Out: Ord + HeapSizeOf, { - let mut sm = StateMachine { - backend: trie_backend, - changes_trie_storage: None as Option<&changes_trie::InMemoryStorage>, - offchain_ext: NeverOffchainExt::new(), - overlay, - exec, - method, - call_data, - _hasher: PhantomData, - }; - sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - native_else_wasm(), - false, - None, - ).map(|(result, _, _)| result.into_encoded()) + let mut sm = StateMachine { + backend: trie_backend, + changes_trie_storage: None as Option<&changes_trie::InMemoryStorage>, + offchain_ext: NeverOffchainExt::new(), + overlay, + exec, + method, + call_data, + _hasher: PhantomData, + }; + sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + native_else_wasm(), + false, + None, + ) + .map(|(result, _, _)| result.into_encoded()) } /// Generate storage read proof. pub fn prove_read( - backend: B, - key: &[u8] + backend: B, + key: &[u8], ) -> Result<(Option>, Vec>), Box> where - B: Backend, - H: Hasher, - H::Out: Ord + HeapSizeOf + B: Backend, + H: Hasher, + H::Out: Ord + HeapSizeOf, { - let trie_backend = backend.try_into_trie_backend() - .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_read_on_trie_backend(&trie_backend, key) + let trie_backend = backend + .try_into_trie_backend() + .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + prove_read_on_trie_backend(&trie_backend, key) } /// Generate storage read proof on pre-created trie backend. pub fn prove_read_on_trie_backend( - trie_backend: &TrieBackend, - key: &[u8] + trie_backend: &TrieBackend, + key: &[u8], ) -> Result<(Option>, Vec>), Box> where - S: trie_backend_essence::TrieBackendStorage, - H: Hasher, - H::Out: Ord + HeapSizeOf + S: trie_backend_essence::TrieBackendStorage, + H: Hasher, + H::Out: Ord + HeapSizeOf, { - let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); - let result = proving_backend.storage(key).map_err(|e| Box::new(e) as Box)?; - Ok((result, proving_backend.extract_proof())) + let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); + let result = proving_backend + .storage(key) + .map_err(|e| Box::new(e) as Box)?; + Ok((result, proving_backend.extract_proof())) } /// Check storage read proof, generated by `prove_read` call. pub fn read_proof_check( - root: H::Out, - proof: Vec>, - key: &[u8], + root: H::Out, + proof: Vec>, + key: &[u8], ) -> Result>, Box> where - H: Hasher, - H::Out: Ord + HeapSizeOf + H: Hasher, + H::Out: Ord + HeapSizeOf, { - let proving_backend = proving_backend::create_proof_check_backend::(root, proof)?; - read_proof_check_on_proving_backend(&proving_backend, key) + let proving_backend = proving_backend::create_proof_check_backend::(root, proof)?; + read_proof_check_on_proving_backend(&proving_backend, key) } /// Check storage read proof on pre-created proving backend. pub fn read_proof_check_on_proving_backend( - proving_backend: &TrieBackend, H>, - key: &[u8], + proving_backend: &TrieBackend, H>, + key: &[u8], ) -> Result>, Box> where - H: Hasher, - H::Out: Ord + HeapSizeOf + H: Hasher, + H::Out: Ord + HeapSizeOf, { - proving_backend.storage(key).map_err(|e| Box::new(e) as Box) + proving_backend + .storage(key) + .map_err(|e| Box::new(e) as Box) } /// Sets overlayed changes' changes trie configuration. Returns error if configuration /// differs from previous OR config decode has failed. -pub(crate) fn set_changes_trie_config(overlay: &mut OverlayedChanges, config: Option>, final_check: bool) -> Result<(), Box> { - let config = match config { - Some(v) => Some(Decode::decode(&mut &v[..]) - .ok_or_else(|| Box::new("Failed to decode changes trie configuration".to_owned()) as Box)?), - None => None, - }; - - if final_check && overlay.changes_trie_config.is_some() != config.is_some() { - return Err(Box::new("Changes trie configuration change is not supported".to_owned())); - } - - if let Some(config) = config { - if !overlay.set_changes_trie_config(config) { - return Err(Box::new("Changes trie configuration change is not supported".to_owned())); - } - } - Ok(()) +pub(crate) fn set_changes_trie_config( + overlay: &mut OverlayedChanges, + config: Option>, + final_check: bool, +) -> Result<(), Box> { + let config = match config { + Some(v) => Some(Decode::decode(&mut &v[..]).ok_or_else(|| { + Box::new("Failed to decode changes trie configuration".to_owned()) as Box + })?), + None => None, + }; + + if final_check && overlay.changes_trie_config.is_some() != config.is_some() { + return Err(Box::new( + "Changes trie configuration change is not supported".to_owned(), + )); + } + + if let Some(config) = config { + if !overlay.set_changes_trie_config(config) { + return Err(Box::new( + "Changes trie configuration change is not supported".to_owned(), + )); + } + } + Ok(()) } /// Reads storage value from overlay or from the backend. -fn try_read_overlay_value(overlay: &OverlayedChanges, backend: &B, key: &[u8]) - -> Result>, Box> +fn try_read_overlay_value( + overlay: &OverlayedChanges, + backend: &B, + key: &[u8], +) -> Result>, Box> where - H: Hasher, + H: Hasher, - B: Backend, + B: Backend, { - match overlay.storage(key).map(|x| x.map(|x| x.to_vec())) { - Some(value) => Ok(value), - None => backend.storage(key) - .map_err(|err| Box::new(ExecutionError::Backend(format!("{}", err))) as Box), - } + match overlay.storage(key).map(|x| x.map(|x| x.to_vec())) { + Some(value) => Ok(value), + None => backend + .storage(key) + .map_err(|err| Box::new(ExecutionError::Backend(format!("{}", err))) as Box), + } } #[cfg(test)] mod tests { - use std::collections::HashMap; - use parity_codec::Encode; - use overlayed_changes::OverlayedValue; - use super::*; - use super::backend::InMemory; - use super::ext::Ext; - use super::changes_trie::{ - InMemoryStorage as InMemoryChangesTrieStorage, - Configuration as ChangesTrieConfig, - }; - use primitives::{Blake2Hasher, map}; - - struct DummyCodeExecutor { - change_changes_trie_config: bool, - native_available: bool, - native_succeeds: bool, - fallback_succeeds: bool, - } - - impl CodeExecutor for DummyCodeExecutor { - type Error = u8; - - fn call, R: Encode + Decode + PartialEq, NC: FnOnce() -> result::Result>( - &self, - ext: &mut E, - _method: &str, - _data: &[u8], - use_native: bool, - _native_call: Option, - ) -> (CallResult, bool) { - if self.change_changes_trie_config { - ext.place_storage( - well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), - Some( - ChangesTrieConfig { - digest_interval: 777, - digest_levels: 333, - }.encode() - ) - ); - } - - let using_native = use_native && self.native_available; - match (using_native, self.native_succeeds, self.fallback_succeeds) { - (true, true, _) | (false, _, true) => { - ( - Ok( - NativeOrEncoded::Encoded( - vec![ - ext.storage(b"value1").unwrap()[0] + - ext.storage(b"value2").unwrap()[0] - ] - ) - ), - using_native - ) - }, - _ => (Err(0), using_native), - } - } - } - - impl Error for u8 {} - - #[test] - fn execute_works() { - assert_eq!(new( - &trie_backend::tests::test_trie(), - Some(&InMemoryChangesTrieStorage::new()), - NeverOffchainExt::new(), - &mut Default::default(), - &DummyCodeExecutor { - change_changes_trie_config: false, - native_available: true, - native_succeeds: true, - fallback_succeeds: true, - }, - "test", - &[], - ).execute( - ExecutionStrategy::NativeWhenPossible - ).unwrap().0, vec![66]); - } - - - #[test] - fn execute_works_with_native_else_wasm() { - assert_eq!(new( - &trie_backend::tests::test_trie(), - Some(&InMemoryChangesTrieStorage::new()), - NeverOffchainExt::new(), - &mut Default::default(), - &DummyCodeExecutor { - change_changes_trie_config: false, - native_available: true, - native_succeeds: true, - fallback_succeeds: true, - }, - "test", - &[], - ).execute( - ExecutionStrategy::NativeElseWasm - ).unwrap().0, vec![66]); - } - - #[test] - fn dual_execution_strategy_detects_consensus_failure() { - let mut consensus_failed = false; - assert!(new( - &trie_backend::tests::test_trie(), - Some(&InMemoryChangesTrieStorage::new()), - NeverOffchainExt::new(), - &mut Default::default(), - &DummyCodeExecutor { - change_changes_trie_config: false, - native_available: true, - native_succeeds: true, - fallback_succeeds: false, - }, - "test", - &[], - ).execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - ExecutionManager::Both(|we, _ne| { - consensus_failed = true; - println!("HELLO!"); - we - }), - true, - None, - ).is_err()); - assert!(consensus_failed); - } - - #[test] - fn prove_execution_and_proof_check_works() { - let executor = DummyCodeExecutor { - change_changes_trie_config: false, - native_available: true, - native_succeeds: true, - fallback_succeeds: true, - }; - - // fetch execution proof from 'remote' full node - let remote_backend = trie_backend::tests::test_trie(); - let remote_root = remote_backend.storage_root(::std::iter::empty()).0; - let (remote_result, remote_proof) = prove_execution(remote_backend, - &mut Default::default(), &executor, "test", &[]).unwrap(); - - // check proof locally - let local_result = execution_proof_check::(remote_root, remote_proof, - &mut Default::default(), &executor, "test", &[]).unwrap(); - - // check that both results are correct - assert_eq!(remote_result, vec![66]); - assert_eq!(remote_result, local_result); - } - - #[test] - fn clear_prefix_in_ext_works() { - let initial: HashMap<_, _> = map![ - b"aaa".to_vec() => b"0".to_vec(), - b"abb".to_vec() => b"1".to_vec(), - b"abc".to_vec() => b"2".to_vec(), - b"bbb".to_vec() => b"3".to_vec() - ]; - let backend = InMemory::::from(initial).try_into_trie_backend().unwrap(); - let mut overlay = OverlayedChanges { - committed: map![ - b"aba".to_vec() => OverlayedValue::from(Some(b"1312".to_vec())), - b"bab".to_vec() => OverlayedValue::from(Some(b"228".to_vec())) - ], - prospective: map![ - b"abd".to_vec() => OverlayedValue::from(Some(b"69".to_vec())), - b"bbd".to_vec() => OverlayedValue::from(Some(b"42".to_vec())) - ], - ..Default::default() - }; - - { - let changes_trie_storage = InMemoryChangesTrieStorage::new(); - let mut ext = Ext::new(&mut overlay, &backend, Some(&changes_trie_storage), NeverOffchainExt::new()); - ext.clear_prefix(b"ab"); - } - overlay.commit_prospective(); - - assert_eq!( - overlay.committed, - map![ - b"abc".to_vec() => None.into(), - b"abb".to_vec() => None.into(), - b"aba".to_vec() => None.into(), - b"abd".to_vec() => None.into(), - - b"bab".to_vec() => Some(b"228".to_vec()).into(), - b"bbd".to_vec() => Some(b"42".to_vec()).into() - ], - ); - } - - #[test] - fn set_child_storage_works() { - let backend = InMemory::::default().try_into_trie_backend().unwrap(); - let changes_trie_storage = InMemoryChangesTrieStorage::new(); - let mut overlay = OverlayedChanges::default(); - let mut ext = Ext::new(&mut overlay, &backend, Some(&changes_trie_storage), NeverOffchainExt::new()); - - assert!(ext.set_child_storage(b":child_storage:testchild".to_vec(), b"abc".to_vec(), b"def".to_vec())); - assert_eq!(ext.child_storage(b":child_storage:testchild", b"abc"), Some(b"def".to_vec())); - ext.kill_child_storage(b":child_storage:testchild"); - assert_eq!(ext.child_storage(b":child_storage:testchild", b"abc"), None); - } - - #[test] - fn prove_read_and_proof_check_works() { - // fetch read proof from 'remote' full node - let remote_backend = trie_backend::tests::test_trie(); - let remote_root = remote_backend.storage_root(::std::iter::empty()).0; - let remote_proof = prove_read(remote_backend, b"value2").unwrap().1; - // check proof locally - let local_result1 = read_proof_check::(remote_root, remote_proof.clone(), b"value2").unwrap(); - let local_result2 = read_proof_check::(remote_root, remote_proof.clone(), &[0xff]).is_ok(); - // check that results are correct - assert_eq!(local_result1, Some(vec![24])); - assert_eq!(local_result2, false); - } - - #[test] - fn cannot_change_changes_trie_config() { - assert!(new( - &trie_backend::tests::test_trie(), - Some(&InMemoryChangesTrieStorage::new()), - NeverOffchainExt::new(), - &mut Default::default(), - &DummyCodeExecutor { - change_changes_trie_config: true, - native_available: false, - native_succeeds: true, - fallback_succeeds: true, - }, - "test", - &[], - ).execute( - ExecutionStrategy::NativeWhenPossible - ).is_err()); - } - - #[test] - fn cannot_change_changes_trie_config_with_native_else_wasm() { - assert!(new( - &trie_backend::tests::test_trie(), - Some(&InMemoryChangesTrieStorage::new()), - NeverOffchainExt::new(), - &mut Default::default(), - &DummyCodeExecutor { - change_changes_trie_config: true, - native_available: false, - native_succeeds: true, - fallback_succeeds: true, - }, - "test", - &[], - ).execute( - ExecutionStrategy::NativeElseWasm - ).is_err()); - } + use super::backend::InMemory; + use super::changes_trie::{ + Configuration as ChangesTrieConfig, InMemoryStorage as InMemoryChangesTrieStorage, + }; + use super::ext::Ext; + use super::*; + use overlayed_changes::OverlayedValue; + use parity_codec::Encode; + use primitives::{map, Blake2Hasher}; + use std::collections::HashMap; + + struct DummyCodeExecutor { + change_changes_trie_config: bool, + native_available: bool, + native_succeeds: bool, + fallback_succeeds: bool, + } + + impl CodeExecutor for DummyCodeExecutor { + type Error = u8; + + fn call< + E: Externalities, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result, + >( + &self, + ext: &mut E, + _method: &str, + _data: &[u8], + use_native: bool, + _native_call: Option, + ) -> (CallResult, bool) { + if self.change_changes_trie_config { + ext.place_storage( + well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), + Some( + ChangesTrieConfig { + digest_interval: 777, + digest_levels: 333, + } + .encode(), + ), + ); + } + + let using_native = use_native && self.native_available; + match (using_native, self.native_succeeds, self.fallback_succeeds) { + (true, true, _) | (false, _, true) => ( + Ok(NativeOrEncoded::Encoded(vec![ + ext.storage(b"value1").unwrap()[0] + ext.storage(b"value2").unwrap()[0], + ])), + using_native, + ), + _ => (Err(0), using_native), + } + } + } + + impl Error for u8 {} + + #[test] + fn execute_works() { + assert_eq!( + new( + &trie_backend::tests::test_trie(), + Some(&InMemoryChangesTrieStorage::new()), + NeverOffchainExt::new(), + &mut Default::default(), + &DummyCodeExecutor { + change_changes_trie_config: false, + native_available: true, + native_succeeds: true, + fallback_succeeds: true, + }, + "test", + &[], + ) + .execute(ExecutionStrategy::NativeWhenPossible) + .unwrap() + .0, + vec![66] + ); + } + + #[test] + fn execute_works_with_native_else_wasm() { + assert_eq!( + new( + &trie_backend::tests::test_trie(), + Some(&InMemoryChangesTrieStorage::new()), + NeverOffchainExt::new(), + &mut Default::default(), + &DummyCodeExecutor { + change_changes_trie_config: false, + native_available: true, + native_succeeds: true, + fallback_succeeds: true, + }, + "test", + &[], + ) + .execute(ExecutionStrategy::NativeElseWasm) + .unwrap() + .0, + vec![66] + ); + } + + #[test] + fn dual_execution_strategy_detects_consensus_failure() { + let mut consensus_failed = false; + assert!(new( + &trie_backend::tests::test_trie(), + Some(&InMemoryChangesTrieStorage::new()), + NeverOffchainExt::new(), + &mut Default::default(), + &DummyCodeExecutor { + change_changes_trie_config: false, + native_available: true, + native_succeeds: true, + fallback_succeeds: false, + }, + "test", + &[], + ) + .execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + ExecutionManager::Both(|we, _ne| { + consensus_failed = true; + println!("HELLO!"); + we + }), + true, + None, + ) + .is_err()); + assert!(consensus_failed); + } + + #[test] + fn prove_execution_and_proof_check_works() { + let executor = DummyCodeExecutor { + change_changes_trie_config: false, + native_available: true, + native_succeeds: true, + fallback_succeeds: true, + }; + + // fetch execution proof from 'remote' full node + let remote_backend = trie_backend::tests::test_trie(); + let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let (remote_result, remote_proof) = prove_execution( + remote_backend, + &mut Default::default(), + &executor, + "test", + &[], + ) + .unwrap(); + + // check proof locally + let local_result = execution_proof_check::( + remote_root, + remote_proof, + &mut Default::default(), + &executor, + "test", + &[], + ) + .unwrap(); + + // check that both results are correct + assert_eq!(remote_result, vec![66]); + assert_eq!(remote_result, local_result); + } + + #[test] + fn clear_prefix_in_ext_works() { + let initial: HashMap<_, _> = map![ + b"aaa".to_vec() => b"0".to_vec(), + b"abb".to_vec() => b"1".to_vec(), + b"abc".to_vec() => b"2".to_vec(), + b"bbb".to_vec() => b"3".to_vec() + ]; + let backend = InMemory::::from(initial) + .try_into_trie_backend() + .unwrap(); + let mut overlay = OverlayedChanges { + committed: map![ + b"aba".to_vec() => OverlayedValue::from(Some(b"1312".to_vec())), + b"bab".to_vec() => OverlayedValue::from(Some(b"228".to_vec())) + ], + prospective: map![ + b"abd".to_vec() => OverlayedValue::from(Some(b"69".to_vec())), + b"bbd".to_vec() => OverlayedValue::from(Some(b"42".to_vec())) + ], + ..Default::default() + }; + + { + let changes_trie_storage = InMemoryChangesTrieStorage::new(); + let mut ext = Ext::new( + &mut overlay, + &backend, + Some(&changes_trie_storage), + NeverOffchainExt::new(), + ); + ext.clear_prefix(b"ab"); + } + overlay.commit_prospective(); + + assert_eq!( + overlay.committed, + map![ + b"abc".to_vec() => None.into(), + b"abb".to_vec() => None.into(), + b"aba".to_vec() => None.into(), + b"abd".to_vec() => None.into(), + + b"bab".to_vec() => Some(b"228".to_vec()).into(), + b"bbd".to_vec() => Some(b"42".to_vec()).into() + ], + ); + } + + #[test] + fn set_child_storage_works() { + let backend = InMemory::::default() + .try_into_trie_backend() + .unwrap(); + let changes_trie_storage = InMemoryChangesTrieStorage::new(); + let mut overlay = OverlayedChanges::default(); + let mut ext = Ext::new( + &mut overlay, + &backend, + Some(&changes_trie_storage), + NeverOffchainExt::new(), + ); + + assert!(ext.set_child_storage( + b":child_storage:testchild".to_vec(), + b"abc".to_vec(), + b"def".to_vec() + )); + assert_eq!( + ext.child_storage(b":child_storage:testchild", b"abc"), + Some(b"def".to_vec()) + ); + ext.kill_child_storage(b":child_storage:testchild"); + assert_eq!(ext.child_storage(b":child_storage:testchild", b"abc"), None); + } + + #[test] + fn prove_read_and_proof_check_works() { + // fetch read proof from 'remote' full node + let remote_backend = trie_backend::tests::test_trie(); + let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let remote_proof = prove_read(remote_backend, b"value2").unwrap().1; + // check proof locally + let local_result1 = + read_proof_check::(remote_root, remote_proof.clone(), b"value2").unwrap(); + let local_result2 = + read_proof_check::(remote_root, remote_proof.clone(), &[0xff]).is_ok(); + // check that results are correct + assert_eq!(local_result1, Some(vec![24])); + assert_eq!(local_result2, false); + } + + #[test] + fn cannot_change_changes_trie_config() { + assert!(new( + &trie_backend::tests::test_trie(), + Some(&InMemoryChangesTrieStorage::new()), + NeverOffchainExt::new(), + &mut Default::default(), + &DummyCodeExecutor { + change_changes_trie_config: true, + native_available: false, + native_succeeds: true, + fallback_succeeds: true, + }, + "test", + &[], + ) + .execute(ExecutionStrategy::NativeWhenPossible) + .is_err()); + } + + #[test] + fn cannot_change_changes_trie_config_with_native_else_wasm() { + assert!(new( + &trie_backend::tests::test_trie(), + Some(&InMemoryChangesTrieStorage::new()), + NeverOffchainExt::new(), + &mut Default::default(), + &DummyCodeExecutor { + change_changes_trie_config: true, + native_available: false, + native_succeeds: true, + fallback_succeeds: true, + }, + "test", + &[], + ) + .execute(ExecutionStrategy::NativeElseWasm) + .is_err()); + } } diff --git a/core/state-machine/src/overlayed_changes.rs b/core/state-machine/src/overlayed_changes.rs index 56e69323e8..4af9f5f3ec 100644 --- a/core/state-machine/src/overlayed_changes.rs +++ b/core/state-machine/src/overlayed_changes.rs @@ -16,11 +16,12 @@ //! The overlayed changes to state. -#[cfg(test)] use std::iter::FromIterator; -use std::collections::{HashMap, HashSet}; +use crate::changes_trie::{Configuration as ChangesTrieConfig, NO_EXTRINSIC_INDEX}; use parity_codec::Decode; -use crate::changes_trie::{NO_EXTRINSIC_INDEX, Configuration as ChangesTrieConfig}; use primitives::storage::well_known_keys::EXTRINSIC_INDEX; +use std::collections::{HashMap, HashSet}; +#[cfg(test)] +use std::iter::FromIterator; /// The overlayed changes to state to be queried on top of the backend. /// @@ -28,460 +29,617 @@ use primitives::storage::well_known_keys::EXTRINSIC_INDEX; /// that can be cleared. #[derive(Debug, Default, Clone)] pub struct OverlayedChanges { - /// Changes that are not yet committed. - pub(crate) prospective: OverlayedChangeSet, - /// Committed changes. - pub(crate) committed: OverlayedChangeSet, - /// Changes trie configuration. None by default, but could be installed by the - /// runtime if it supports change tries. - pub(crate) changes_trie_config: Option, + /// Changes that are not yet committed. + pub(crate) prospective: OverlayedChangeSet, + /// Committed changes. + pub(crate) committed: OverlayedChangeSet, + /// Changes trie configuration. None by default, but could be installed by the + /// runtime if it supports change tries. + pub(crate) changes_trie_config: Option, } /// The storage value, used inside OverlayedChanges. #[derive(Debug, Default, Clone)] #[cfg_attr(test, derive(PartialEq))] pub struct OverlayedValue { - /// Current value. None if value has been deleted. - pub value: Option>, - /// The set of extinsic indices where the values has been changed. - /// Is filled only if runtime has announced changes trie support. - pub extrinsics: Option>, + /// Current value. None if value has been deleted. + pub value: Option>, + /// The set of extinsic indices where the values has been changed. + /// Is filled only if runtime has announced changes trie support. + pub extrinsics: Option>, } /// Prospective or committed overlayed change set. #[derive(Debug, Default, Clone)] #[cfg_attr(test, derive(PartialEq))] pub struct OverlayedChangeSet { - /// Top level storage changes. - pub top: HashMap, OverlayedValue>, - /// Child storage changes. - pub children: HashMap, (Option>, HashMap, Option>>)>, + /// Top level storage changes. + pub top: HashMap, OverlayedValue>, + /// Child storage changes. + pub children: HashMap, (Option>, HashMap, Option>>)>, } #[cfg(test)] impl FromIterator<(Vec, OverlayedValue)> for OverlayedChangeSet { - fn from_iter, OverlayedValue)>>(iter: T) -> Self { - Self { - top: iter.into_iter().collect(), - children: Default::default(), - } - } + fn from_iter, OverlayedValue)>>(iter: T) -> Self { + Self { + top: iter.into_iter().collect(), + children: Default::default(), + } + } } impl OverlayedChangeSet { - /// Whether the change set is empty. - pub fn is_empty(&self) -> bool { - self.top.is_empty() && self.children.is_empty() - } - - /// Clear the change set. - pub fn clear(&mut self) { - self.top.clear(); - self.children.clear(); - } + /// Whether the change set is empty. + pub fn is_empty(&self) -> bool { + self.top.is_empty() && self.children.is_empty() + } + + /// Clear the change set. + pub fn clear(&mut self) { + self.top.clear(); + self.children.clear(); + } } impl OverlayedChanges { - /// Whether the overlayed changes are empty. - pub fn is_empty(&self) -> bool { - self.prospective.is_empty() && self.committed.is_empty() - } - - /// Sets the changes trie configuration. - /// - /// Returns false if configuration has been set already and we now trying - /// to install different configuration. This isn't supported now. - pub(crate) fn set_changes_trie_config(&mut self, config: ChangesTrieConfig) -> bool { - if let Some(ref old_config) = self.changes_trie_config { - // we do not support changes trie configuration' change now - if *old_config != config { - return false; - } - } - - self.changes_trie_config = Some(config); - true - } - - /// Returns a double-Option: None if the key is unknown (i.e. and the query should be refered - /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose - /// value has been set. - pub fn storage(&self, key: &[u8]) -> Option> { - self.prospective.top.get(key) - .or_else(|| self.committed.top.get(key)) - .map(|x| x.value.as_ref().map(AsRef::as_ref)) - } - - /// Returns a double-Option: None if the key is unknown (i.e. and the query should be refered - /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose - /// value has been set. - pub fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option> { - if let Some(map) = self.prospective.children.get(storage_key) { - if let Some(val) = map.1.get(key) { - return Some(val.as_ref().map(AsRef::as_ref)); - } - } - - if let Some(map) = self.committed.children.get(storage_key) { - if let Some(val) = map.1.get(key) { - return Some(val.as_ref().map(AsRef::as_ref)); - } - } - - None - } - - /// Inserts the given key-value pair into the prospective change set. - /// - /// `None` can be used to delete a value specified by the given key. - pub(crate) fn set_storage(&mut self, key: Vec, val: Option>) { - let extrinsic_index = self.extrinsic_index(); - let entry = self.prospective.top.entry(key).or_default(); - entry.value = val; - - if let Some(extrinsic) = extrinsic_index { - entry.extrinsics.get_or_insert_with(Default::default) - .insert(extrinsic); - } - } - - /// Inserts the given key-value pair into the prospective child change set. - /// - /// `None` can be used to delete a value specified by the given key. - pub(crate) fn set_child_storage(&mut self, storage_key: Vec, key: Vec, val: Option>) { - let extrinsic_index = self.extrinsic_index(); - let map_entry = self.prospective.children.entry(storage_key).or_default(); - map_entry.1.insert(key, val); - - if let Some(extrinsic) = extrinsic_index { - map_entry.0.get_or_insert_with(Default::default) - .insert(extrinsic); - } - } - - /// Sync the child storage root. - pub(crate) fn sync_child_storage_root(&mut self, storage_key: &[u8], root: Option>) { - let entry = self.prospective.top.entry(storage_key.to_vec()).or_default(); - entry.value = root; - - if let Some((Some(extrinsics), _)) = self.prospective.children.get(storage_key) { - for extrinsic in extrinsics { - entry.extrinsics.get_or_insert_with(Default::default) - .insert(*extrinsic); - } - } - } - - /// Clear child storage of given storage key. - /// - /// NOTE that this doesn't take place immediately but written into the prospective - /// change set, and still can be reverted by [`discard_prospective`]. - /// - /// [`discard_prospective`]: #method.discard_prospective - pub(crate) fn clear_child_storage(&mut self, storage_key: &[u8]) { - let extrinsic_index = self.extrinsic_index(); - let map_entry = self.prospective.children.entry(storage_key.to_vec()).or_default(); - - if let Some(extrinsic) = extrinsic_index { - map_entry.0.get_or_insert_with(Default::default) - .insert(extrinsic); - } - - map_entry.1.values_mut().for_each(|e| *e = None); - - if let Some((_, committed_map)) = self.committed.children.get(storage_key) { - for (key, _) in committed_map.iter() { - map_entry.1.insert(key.clone(), None); - } - } - } - - /// Removes all key-value pairs which keys share the given prefix. - /// - /// NOTE that this doesn't take place immediately but written into the prospective - /// change set, and still can be reverted by [`discard_prospective`]. - /// - /// [`discard_prospective`]: #method.discard_prospective - pub(crate) fn clear_prefix(&mut self, prefix: &[u8]) { - let extrinsic_index = self.extrinsic_index(); - - // Iterate over all prospective and mark all keys that share - // the given prefix as removed (None). - for (key, entry) in self.prospective.top.iter_mut() { - if key.starts_with(prefix) { - entry.value = None; - - if let Some(extrinsic) = extrinsic_index { - entry.extrinsics.get_or_insert_with(Default::default) - .insert(extrinsic); - } - } - } - - // Then do the same with keys from commited changes. - // NOTE that we are making changes in the prospective change set. - for key in self.committed.top.keys() { - if key.starts_with(prefix) { - let entry = self.prospective.top.entry(key.clone()).or_default(); - entry.value = None; - - if let Some(extrinsic) = extrinsic_index { - entry.extrinsics.get_or_insert_with(Default::default) - .insert(extrinsic); - } - } - } - } - - /// Discard prospective changes to state. - pub fn discard_prospective(&mut self) { - self.prospective.clear(); - } - - /// Commit prospective changes to state. - pub fn commit_prospective(&mut self) { - if self.committed.is_empty() { - ::std::mem::swap(&mut self.prospective, &mut self.committed); - } else { - for (key, val) in self.prospective.top.drain() { - let entry = self.committed.top.entry(key).or_default(); - entry.value = val.value; - - if let Some(prospective_extrinsics) = val.extrinsics { - entry.extrinsics.get_or_insert_with(Default::default) - .extend(prospective_extrinsics); - } - } - for (storage_key, map) in self.prospective.children.drain() { - let entry = self.committed.children.entry(storage_key).or_default(); - entry.1.extend(map.1.iter().map(|(k, v)| (k.clone(), v.clone()))); - - if let Some(prospective_extrinsics) = map.0 { - entry.0.get_or_insert_with(Default::default) - .extend(prospective_extrinsics); - } - } - } - } - - /// Consume `OverlayedChanges` and take committed set. - /// - /// Panics: - /// Will panic if there are any uncommitted prospective changes. - pub fn into_committed(self) -> impl Iterator, Option>)> { - assert!(self.prospective.is_empty()); - self.committed.top.into_iter().map(|(k, v)| (k, v.value)) - } - - /// Inserts storage entry responsible for current extrinsic index. - #[cfg(test)] - pub(crate) fn set_extrinsic_index(&mut self, extrinsic_index: u32) { - use parity_codec::Encode; - self.prospective.top.insert(EXTRINSIC_INDEX.to_vec(), OverlayedValue { - value: Some(extrinsic_index.encode()), - extrinsics: None, - }); - } - - /// Returns current extrinsic index to use in changes trie construction. - /// None is returned if it is not set or changes trie config is not set. - /// Persistent value (from the backend) can be ignored because runtime must - /// set this index before first and unset after last extrinsic is executied. - /// Changes that are made outside of extrinsics, are marked with - /// `NO_EXTRINSIC_INDEX` index. - fn extrinsic_index(&self) -> Option { - match self.changes_trie_config.is_some() { - true => Some( - self.storage(EXTRINSIC_INDEX) - .and_then(|idx| idx.and_then(|idx| Decode::decode(&mut &*idx))) - .unwrap_or(NO_EXTRINSIC_INDEX)), - false => None, - } - } + /// Whether the overlayed changes are empty. + pub fn is_empty(&self) -> bool { + self.prospective.is_empty() && self.committed.is_empty() + } + + /// Sets the changes trie configuration. + /// + /// Returns false if configuration has been set already and we now trying + /// to install different configuration. This isn't supported now. + pub(crate) fn set_changes_trie_config(&mut self, config: ChangesTrieConfig) -> bool { + if let Some(ref old_config) = self.changes_trie_config { + // we do not support changes trie configuration' change now + if *old_config != config { + return false; + } + } + + self.changes_trie_config = Some(config); + true + } + + /// Returns a double-Option: None if the key is unknown (i.e. and the query should be refered + /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose + /// value has been set. + pub fn storage(&self, key: &[u8]) -> Option> { + self.prospective + .top + .get(key) + .or_else(|| self.committed.top.get(key)) + .map(|x| x.value.as_ref().map(AsRef::as_ref)) + } + + /// Returns a double-Option: None if the key is unknown (i.e. and the query should be refered + /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose + /// value has been set. + pub fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option> { + if let Some(map) = self.prospective.children.get(storage_key) { + if let Some(val) = map.1.get(key) { + return Some(val.as_ref().map(AsRef::as_ref)); + } + } + + if let Some(map) = self.committed.children.get(storage_key) { + if let Some(val) = map.1.get(key) { + return Some(val.as_ref().map(AsRef::as_ref)); + } + } + + None + } + + /// Inserts the given key-value pair into the prospective change set. + /// + /// `None` can be used to delete a value specified by the given key. + pub(crate) fn set_storage(&mut self, key: Vec, val: Option>) { + let extrinsic_index = self.extrinsic_index(); + let entry = self.prospective.top.entry(key).or_default(); + entry.value = val; + + if let Some(extrinsic) = extrinsic_index { + entry + .extrinsics + .get_or_insert_with(Default::default) + .insert(extrinsic); + } + } + + /// Inserts the given key-value pair into the prospective child change set. + /// + /// `None` can be used to delete a value specified by the given key. + pub(crate) fn set_child_storage( + &mut self, + storage_key: Vec, + key: Vec, + val: Option>, + ) { + let extrinsic_index = self.extrinsic_index(); + let map_entry = self.prospective.children.entry(storage_key).or_default(); + map_entry.1.insert(key, val); + + if let Some(extrinsic) = extrinsic_index { + map_entry + .0 + .get_or_insert_with(Default::default) + .insert(extrinsic); + } + } + + /// Sync the child storage root. + pub(crate) fn sync_child_storage_root(&mut self, storage_key: &[u8], root: Option>) { + let entry = self + .prospective + .top + .entry(storage_key.to_vec()) + .or_default(); + entry.value = root; + + if let Some((Some(extrinsics), _)) = self.prospective.children.get(storage_key) { + for extrinsic in extrinsics { + entry + .extrinsics + .get_or_insert_with(Default::default) + .insert(*extrinsic); + } + } + } + + /// Clear child storage of given storage key. + /// + /// NOTE that this doesn't take place immediately but written into the prospective + /// change set, and still can be reverted by [`discard_prospective`]. + /// + /// [`discard_prospective`]: #method.discard_prospective + pub(crate) fn clear_child_storage(&mut self, storage_key: &[u8]) { + let extrinsic_index = self.extrinsic_index(); + let map_entry = self + .prospective + .children + .entry(storage_key.to_vec()) + .or_default(); + + if let Some(extrinsic) = extrinsic_index { + map_entry + .0 + .get_or_insert_with(Default::default) + .insert(extrinsic); + } + + map_entry.1.values_mut().for_each(|e| *e = None); + + if let Some((_, committed_map)) = self.committed.children.get(storage_key) { + for (key, _) in committed_map.iter() { + map_entry.1.insert(key.clone(), None); + } + } + } + + /// Removes all key-value pairs which keys share the given prefix. + /// + /// NOTE that this doesn't take place immediately but written into the prospective + /// change set, and still can be reverted by [`discard_prospective`]. + /// + /// [`discard_prospective`]: #method.discard_prospective + pub(crate) fn clear_prefix(&mut self, prefix: &[u8]) { + let extrinsic_index = self.extrinsic_index(); + + // Iterate over all prospective and mark all keys that share + // the given prefix as removed (None). + for (key, entry) in self.prospective.top.iter_mut() { + if key.starts_with(prefix) { + entry.value = None; + + if let Some(extrinsic) = extrinsic_index { + entry + .extrinsics + .get_or_insert_with(Default::default) + .insert(extrinsic); + } + } + } + + // Then do the same with keys from commited changes. + // NOTE that we are making changes in the prospective change set. + for key in self.committed.top.keys() { + if key.starts_with(prefix) { + let entry = self.prospective.top.entry(key.clone()).or_default(); + entry.value = None; + + if let Some(extrinsic) = extrinsic_index { + entry + .extrinsics + .get_or_insert_with(Default::default) + .insert(extrinsic); + } + } + } + } + + /// Discard prospective changes to state. + pub fn discard_prospective(&mut self) { + self.prospective.clear(); + } + + /// Commit prospective changes to state. + pub fn commit_prospective(&mut self) { + if self.committed.is_empty() { + ::std::mem::swap(&mut self.prospective, &mut self.committed); + } else { + for (key, val) in self.prospective.top.drain() { + let entry = self.committed.top.entry(key).or_default(); + entry.value = val.value; + + if let Some(prospective_extrinsics) = val.extrinsics { + entry + .extrinsics + .get_or_insert_with(Default::default) + .extend(prospective_extrinsics); + } + } + for (storage_key, map) in self.prospective.children.drain() { + let entry = self.committed.children.entry(storage_key).or_default(); + entry + .1 + .extend(map.1.iter().map(|(k, v)| (k.clone(), v.clone()))); + + if let Some(prospective_extrinsics) = map.0 { + entry + .0 + .get_or_insert_with(Default::default) + .extend(prospective_extrinsics); + } + } + } + } + + /// Consume `OverlayedChanges` and take committed set. + /// + /// Panics: + /// Will panic if there are any uncommitted prospective changes. + pub fn into_committed(self) -> impl Iterator, Option>)> { + assert!(self.prospective.is_empty()); + self.committed.top.into_iter().map(|(k, v)| (k, v.value)) + } + + /// Inserts storage entry responsible for current extrinsic index. + #[cfg(test)] + pub(crate) fn set_extrinsic_index(&mut self, extrinsic_index: u32) { + use parity_codec::Encode; + self.prospective.top.insert( + EXTRINSIC_INDEX.to_vec(), + OverlayedValue { + value: Some(extrinsic_index.encode()), + extrinsics: None, + }, + ); + } + + /// Returns current extrinsic index to use in changes trie construction. + /// None is returned if it is not set or changes trie config is not set. + /// Persistent value (from the backend) can be ignored because runtime must + /// set this index before first and unset after last extrinsic is executied. + /// Changes that are made outside of extrinsics, are marked with + /// `NO_EXTRINSIC_INDEX` index. + fn extrinsic_index(&self) -> Option { + match self.changes_trie_config.is_some() { + true => Some( + self.storage(EXTRINSIC_INDEX) + .and_then(|idx| idx.and_then(|idx| Decode::decode(&mut &*idx))) + .unwrap_or(NO_EXTRINSIC_INDEX), + ), + false => None, + } + } } #[cfg(test)] impl From>> for OverlayedValue { - fn from(value: Option>) -> OverlayedValue { - OverlayedValue { value, ..Default::default() } - } + fn from(value: Option>) -> OverlayedValue { + OverlayedValue { + value, + ..Default::default() + } + } } #[cfg(test)] mod tests { - use hex_literal::{hex, hex_impl}; - use primitives::{Blake2Hasher, H256}; - use primitives::storage::well_known_keys::EXTRINSIC_INDEX; - use crate::backend::InMemory; - use crate::changes_trie::InMemoryStorage as InMemoryChangesTrieStorage; - use crate::ext::Ext; - use crate::Externalities; - use super::*; - - fn strip_extrinsic_index(map: &HashMap, OverlayedValue>) -> HashMap, OverlayedValue> { - let mut clone = map.clone(); - clone.remove(&EXTRINSIC_INDEX.to_vec()); - clone - } - - #[test] - fn overlayed_storage_works() { - let mut overlayed = OverlayedChanges::default(); - - let key = vec![42, 69, 169, 142]; - - assert!(overlayed.storage(&key).is_none()); - - overlayed.set_storage(key.clone(), Some(vec![1, 2, 3])); - assert_eq!(overlayed.storage(&key).unwrap(), Some(&[1, 2, 3][..])); - - overlayed.commit_prospective(); - assert_eq!(overlayed.storage(&key).unwrap(), Some(&[1, 2, 3][..])); - - overlayed.set_storage(key.clone(), Some(vec![])); - assert_eq!(overlayed.storage(&key).unwrap(), Some(&[][..])); - - overlayed.set_storage(key.clone(), None); - assert!(overlayed.storage(&key).unwrap().is_none()); - - overlayed.discard_prospective(); - assert_eq!(overlayed.storage(&key).unwrap(), Some(&[1, 2, 3][..])); - - overlayed.set_storage(key.clone(), None); - overlayed.commit_prospective(); - assert!(overlayed.storage(&key).unwrap().is_none()); - } - - #[test] - fn overlayed_storage_root_works() { - let initial: HashMap<_, _> = vec![ - (b"doe".to_vec(), b"reindeer".to_vec()), - (b"dog".to_vec(), b"puppyXXX".to_vec()), - (b"dogglesworth".to_vec(), b"catXXX".to_vec()), - (b"doug".to_vec(), b"notadog".to_vec()), - ].into_iter().collect(); - let backend = InMemory::::from(initial); - let mut overlay = OverlayedChanges { - committed: vec![ - (b"dog".to_vec(), Some(b"puppy".to_vec()).into()), - (b"dogglesworth".to_vec(), Some(b"catYYY".to_vec()).into()), - (b"doug".to_vec(), Some(vec![]).into()), - ].into_iter().collect(), - prospective: vec![ - (b"dogglesworth".to_vec(), Some(b"cat".to_vec()).into()), - (b"doug".to_vec(), None.into()), - ].into_iter().collect(), - ..Default::default() - }; - - let changes_trie_storage = InMemoryChangesTrieStorage::new(); - let mut ext = Ext::new( - &mut overlay, - &backend, - Some(&changes_trie_storage), - crate::NeverOffchainExt::new(), - ); - const ROOT: [u8; 32] = hex!("0b41e488cccbd67d1f1089592c2c235f5c5399b053f7fe9152dd4b5f279914cd"); - assert_eq!(ext.storage_root(), H256::from(ROOT)); - } - - #[test] - fn changes_trie_configuration_is_saved() { - let mut overlay = OverlayedChanges::default(); - assert!(overlay.changes_trie_config.is_none()); - assert_eq!(overlay.set_changes_trie_config(ChangesTrieConfig { - digest_interval: 4, digest_levels: 1, - }), true); - assert!(overlay.changes_trie_config.is_some()); - } - - #[test] - fn changes_trie_configuration_is_saved_twice() { - let mut overlay = OverlayedChanges::default(); - assert!(overlay.changes_trie_config.is_none()); - assert_eq!(overlay.set_changes_trie_config(ChangesTrieConfig { - digest_interval: 4, digest_levels: 1, - }), true); - overlay.set_extrinsic_index(0); - overlay.set_storage(vec![1], Some(vec![2])); - assert_eq!(overlay.set_changes_trie_config(ChangesTrieConfig { - digest_interval: 4, digest_levels: 1, - }), true); - assert_eq!( - strip_extrinsic_index(&overlay.prospective.top), - vec![ - (vec![1], OverlayedValue { value: Some(vec![2]), extrinsics: Some(vec![0].into_iter().collect()) }), - ].into_iter().collect(), - ); - } - - #[test] - fn panics_when_trying_to_save_different_changes_trie_configuration() { - let mut overlay = OverlayedChanges::default(); - assert_eq!(overlay.set_changes_trie_config(ChangesTrieConfig { - digest_interval: 4, digest_levels: 1, - }), true); - assert_eq!(overlay.set_changes_trie_config(ChangesTrieConfig { - digest_interval: 2, digest_levels: 1, - }), false); - } - - #[test] - fn extrinsic_changes_are_collected() { - let mut overlay = OverlayedChanges::default(); - let _ = overlay.set_changes_trie_config(ChangesTrieConfig { - digest_interval: 4, digest_levels: 1, - }); - - overlay.set_storage(vec![100], Some(vec![101])); - - overlay.set_extrinsic_index(0); - overlay.set_storage(vec![1], Some(vec![2])); - - overlay.set_extrinsic_index(1); - overlay.set_storage(vec![3], Some(vec![4])); - - overlay.set_extrinsic_index(2); - overlay.set_storage(vec![1], Some(vec![6])); - - assert_eq!(strip_extrinsic_index(&overlay.prospective.top), - vec![ - (vec![1], OverlayedValue { value: Some(vec![6]), extrinsics: Some(vec![0, 2].into_iter().collect()) }), - (vec![3], OverlayedValue { value: Some(vec![4]), extrinsics: Some(vec![1].into_iter().collect()) }), - (vec![100], OverlayedValue { value: Some(vec![101]), extrinsics: Some(vec![NO_EXTRINSIC_INDEX].into_iter().collect()) }), - ].into_iter().collect()); - - overlay.commit_prospective(); - - overlay.set_extrinsic_index(3); - overlay.set_storage(vec![3], Some(vec![7])); - - overlay.set_extrinsic_index(4); - overlay.set_storage(vec![1], Some(vec![8])); - - assert_eq!(strip_extrinsic_index(&overlay.committed.top), - vec![ - (vec![1], OverlayedValue { value: Some(vec![6]), extrinsics: Some(vec![0, 2].into_iter().collect()) }), - (vec![3], OverlayedValue { value: Some(vec![4]), extrinsics: Some(vec![1].into_iter().collect()) }), - (vec![100], OverlayedValue { value: Some(vec![101]), extrinsics: Some(vec![NO_EXTRINSIC_INDEX].into_iter().collect()) }), - ].into_iter().collect()); - - assert_eq!(strip_extrinsic_index(&overlay.prospective.top), - vec![ - (vec![1], OverlayedValue { value: Some(vec![8]), extrinsics: Some(vec![4].into_iter().collect()) }), - (vec![3], OverlayedValue { value: Some(vec![7]), extrinsics: Some(vec![3].into_iter().collect()) }), - ].into_iter().collect()); - - overlay.commit_prospective(); - - assert_eq!(strip_extrinsic_index(&overlay.committed.top), - vec![ - (vec![1], OverlayedValue { value: Some(vec![8]), extrinsics: Some(vec![0, 2, 4].into_iter().collect()) }), - (vec![3], OverlayedValue { value: Some(vec![7]), extrinsics: Some(vec![1, 3].into_iter().collect()) }), - (vec![100], OverlayedValue { value: Some(vec![101]), extrinsics: Some(vec![NO_EXTRINSIC_INDEX].into_iter().collect()) }), - ].into_iter().collect()); - - assert_eq!(overlay.prospective, - Default::default()); - } + use super::*; + use crate::backend::InMemory; + use crate::changes_trie::InMemoryStorage as InMemoryChangesTrieStorage; + use crate::ext::Ext; + use crate::Externalities; + use hex_literal::{hex, hex_impl}; + use primitives::storage::well_known_keys::EXTRINSIC_INDEX; + use primitives::{Blake2Hasher, H256}; + + fn strip_extrinsic_index( + map: &HashMap, OverlayedValue>, + ) -> HashMap, OverlayedValue> { + let mut clone = map.clone(); + clone.remove(&EXTRINSIC_INDEX.to_vec()); + clone + } + + #[test] + fn overlayed_storage_works() { + let mut overlayed = OverlayedChanges::default(); + + let key = vec![42, 69, 169, 142]; + + assert!(overlayed.storage(&key).is_none()); + + overlayed.set_storage(key.clone(), Some(vec![1, 2, 3])); + assert_eq!(overlayed.storage(&key).unwrap(), Some(&[1, 2, 3][..])); + + overlayed.commit_prospective(); + assert_eq!(overlayed.storage(&key).unwrap(), Some(&[1, 2, 3][..])); + + overlayed.set_storage(key.clone(), Some(vec![])); + assert_eq!(overlayed.storage(&key).unwrap(), Some(&[][..])); + + overlayed.set_storage(key.clone(), None); + assert!(overlayed.storage(&key).unwrap().is_none()); + + overlayed.discard_prospective(); + assert_eq!(overlayed.storage(&key).unwrap(), Some(&[1, 2, 3][..])); + + overlayed.set_storage(key.clone(), None); + overlayed.commit_prospective(); + assert!(overlayed.storage(&key).unwrap().is_none()); + } + + #[test] + fn overlayed_storage_root_works() { + let initial: HashMap<_, _> = vec![ + (b"doe".to_vec(), b"reindeer".to_vec()), + (b"dog".to_vec(), b"puppyXXX".to_vec()), + (b"dogglesworth".to_vec(), b"catXXX".to_vec()), + (b"doug".to_vec(), b"notadog".to_vec()), + ] + .into_iter() + .collect(); + let backend = InMemory::::from(initial); + let mut overlay = OverlayedChanges { + committed: vec![ + (b"dog".to_vec(), Some(b"puppy".to_vec()).into()), + (b"dogglesworth".to_vec(), Some(b"catYYY".to_vec()).into()), + (b"doug".to_vec(), Some(vec![]).into()), + ] + .into_iter() + .collect(), + prospective: vec![ + (b"dogglesworth".to_vec(), Some(b"cat".to_vec()).into()), + (b"doug".to_vec(), None.into()), + ] + .into_iter() + .collect(), + ..Default::default() + }; + + let changes_trie_storage = InMemoryChangesTrieStorage::new(); + let mut ext = Ext::new( + &mut overlay, + &backend, + Some(&changes_trie_storage), + crate::NeverOffchainExt::new(), + ); + const ROOT: [u8; 32] = + hex!("0b41e488cccbd67d1f1089592c2c235f5c5399b053f7fe9152dd4b5f279914cd"); + assert_eq!(ext.storage_root(), H256::from(ROOT)); + } + + #[test] + fn changes_trie_configuration_is_saved() { + let mut overlay = OverlayedChanges::default(); + assert!(overlay.changes_trie_config.is_none()); + assert_eq!( + overlay.set_changes_trie_config(ChangesTrieConfig { + digest_interval: 4, + digest_levels: 1, + }), + true + ); + assert!(overlay.changes_trie_config.is_some()); + } + + #[test] + fn changes_trie_configuration_is_saved_twice() { + let mut overlay = OverlayedChanges::default(); + assert!(overlay.changes_trie_config.is_none()); + assert_eq!( + overlay.set_changes_trie_config(ChangesTrieConfig { + digest_interval: 4, + digest_levels: 1, + }), + true + ); + overlay.set_extrinsic_index(0); + overlay.set_storage(vec![1], Some(vec![2])); + assert_eq!( + overlay.set_changes_trie_config(ChangesTrieConfig { + digest_interval: 4, + digest_levels: 1, + }), + true + ); + assert_eq!( + strip_extrinsic_index(&overlay.prospective.top), + vec![( + vec![1], + OverlayedValue { + value: Some(vec![2]), + extrinsics: Some(vec![0].into_iter().collect()) + } + ),] + .into_iter() + .collect(), + ); + } + + #[test] + fn panics_when_trying_to_save_different_changes_trie_configuration() { + let mut overlay = OverlayedChanges::default(); + assert_eq!( + overlay.set_changes_trie_config(ChangesTrieConfig { + digest_interval: 4, + digest_levels: 1, + }), + true + ); + assert_eq!( + overlay.set_changes_trie_config(ChangesTrieConfig { + digest_interval: 2, + digest_levels: 1, + }), + false + ); + } + + #[test] + fn extrinsic_changes_are_collected() { + let mut overlay = OverlayedChanges::default(); + let _ = overlay.set_changes_trie_config(ChangesTrieConfig { + digest_interval: 4, + digest_levels: 1, + }); + + overlay.set_storage(vec![100], Some(vec![101])); + + overlay.set_extrinsic_index(0); + overlay.set_storage(vec![1], Some(vec![2])); + + overlay.set_extrinsic_index(1); + overlay.set_storage(vec![3], Some(vec![4])); + + overlay.set_extrinsic_index(2); + overlay.set_storage(vec![1], Some(vec![6])); + + assert_eq!( + strip_extrinsic_index(&overlay.prospective.top), + vec![ + ( + vec![1], + OverlayedValue { + value: Some(vec![6]), + extrinsics: Some(vec![0, 2].into_iter().collect()) + } + ), + ( + vec![3], + OverlayedValue { + value: Some(vec![4]), + extrinsics: Some(vec![1].into_iter().collect()) + } + ), + ( + vec![100], + OverlayedValue { + value: Some(vec![101]), + extrinsics: Some(vec![NO_EXTRINSIC_INDEX].into_iter().collect()) + } + ), + ] + .into_iter() + .collect() + ); + + overlay.commit_prospective(); + + overlay.set_extrinsic_index(3); + overlay.set_storage(vec![3], Some(vec![7])); + + overlay.set_extrinsic_index(4); + overlay.set_storage(vec![1], Some(vec![8])); + + assert_eq!( + strip_extrinsic_index(&overlay.committed.top), + vec![ + ( + vec![1], + OverlayedValue { + value: Some(vec![6]), + extrinsics: Some(vec![0, 2].into_iter().collect()) + } + ), + ( + vec![3], + OverlayedValue { + value: Some(vec![4]), + extrinsics: Some(vec![1].into_iter().collect()) + } + ), + ( + vec![100], + OverlayedValue { + value: Some(vec![101]), + extrinsics: Some(vec![NO_EXTRINSIC_INDEX].into_iter().collect()) + } + ), + ] + .into_iter() + .collect() + ); + + assert_eq!( + strip_extrinsic_index(&overlay.prospective.top), + vec![ + ( + vec![1], + OverlayedValue { + value: Some(vec![8]), + extrinsics: Some(vec![4].into_iter().collect()) + } + ), + ( + vec![3], + OverlayedValue { + value: Some(vec![7]), + extrinsics: Some(vec![3].into_iter().collect()) + } + ), + ] + .into_iter() + .collect() + ); + + overlay.commit_prospective(); + + assert_eq!( + strip_extrinsic_index(&overlay.committed.top), + vec![ + ( + vec![1], + OverlayedValue { + value: Some(vec![8]), + extrinsics: Some(vec![0, 2, 4].into_iter().collect()) + } + ), + ( + vec![3], + OverlayedValue { + value: Some(vec![7]), + extrinsics: Some(vec![1, 3].into_iter().collect()) + } + ), + ( + vec![100], + OverlayedValue { + value: Some(vec![101]), + extrinsics: Some(vec![NO_EXTRINSIC_INDEX].into_iter().collect()) + } + ), + ] + .into_iter() + .collect() + ); + + assert_eq!(overlay.prospective, Default::default()); + } } diff --git a/core/state-machine/src/proving_backend.rs b/core/state-machine/src/proving_backend.rs index 4d85791faf..91659ed026 100644 --- a/core/state-machine/src/proving_backend.rs +++ b/core/state-machine/src/proving_backend.rs @@ -16,256 +16,288 @@ //! Proving state machine backend. -use std::cell::RefCell; -use log::debug; -use hash_db::Hasher; -use heapsize::HeapSizeOf; -use hash_db::HashDB; -use trie::{Recorder, MemoryDB, PrefixedMemoryDB, TrieError, default_child_trie_root, read_trie_value_with, read_child_trie_value_with, record_all_keys}; use crate::trie_backend::TrieBackend; use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; -use crate::{Error, ExecutionError, Backend}; +use crate::{Backend, Error, ExecutionError}; +use hash_db::HashDB; +use hash_db::Hasher; +use heapsize::HeapSizeOf; +use log::debug; +use std::cell::RefCell; +use trie::{ + default_child_trie_root, read_child_trie_value_with, read_trie_value_with, record_all_keys, + MemoryDB, PrefixedMemoryDB, Recorder, TrieError, +}; /// Patricia trie-based backend essence which also tracks all touched storage trie values. /// These can be sent to remote node and used as a proof of execution. pub struct ProvingBackendEssence<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { - pub(crate) backend: &'a TrieBackendEssence, - pub(crate) proof_recorder: &'a mut Recorder, + pub(crate) backend: &'a TrieBackendEssence, + pub(crate) proof_recorder: &'a mut Recorder, } impl<'a, S, H> ProvingBackendEssence<'a, S, H> - where - S: TrieBackendStorage, - H: Hasher, - H::Out: HeapSizeOf, +where + S: TrieBackendStorage, + H: Hasher, + H::Out: HeapSizeOf, { - pub fn storage(&mut self, key: &[u8]) -> Result>, String> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( - self.backend.backend_storage(), - &mut read_overlay, - ); - - let map_e = |e| format!("Trie lookup error: {}", e); - - read_trie_value_with::>(&eph, self.backend.root(), key, &mut *self.proof_recorder).map_err(map_e) - } - - pub fn child_storage(&mut self, storage_key: &[u8], key: &[u8]) -> Result>, String> { - let root = self.storage(storage_key)?.unwrap_or(default_child_trie_root::(storage_key)); - - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( - self.backend.backend_storage(), - &mut read_overlay, - ); - - let map_e = |e| format!("Trie lookup error: {}", e); - - read_child_trie_value_with(storage_key, &eph, &root, key, &mut *self.proof_recorder).map_err(map_e) - } - - pub fn record_all_keys(&mut self) { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( - self.backend.backend_storage(), - &mut read_overlay, - ); - - let mut iter = move || -> Result<(), Box>> { - let root = self.backend.root(); - record_all_keys::(&eph, root, &mut *self.proof_recorder) - }; - - if let Err(e) = iter() { - debug!(target: "trie", "Error while recording all keys: {}", e); - } - } + pub fn storage(&mut self, key: &[u8]) -> Result>, String> { + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); + + let map_e = |e| format!("Trie lookup error: {}", e); + + read_trie_value_with::>( + &eph, + self.backend.root(), + key, + &mut *self.proof_recorder, + ) + .map_err(map_e) + } + + pub fn child_storage( + &mut self, + storage_key: &[u8], + key: &[u8], + ) -> Result>, String> { + let root = self + .storage(storage_key)? + .unwrap_or(default_child_trie_root::(storage_key)); + + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); + + let map_e = |e| format!("Trie lookup error: {}", e); + + read_child_trie_value_with(storage_key, &eph, &root, key, &mut *self.proof_recorder) + .map_err(map_e) + } + + pub fn record_all_keys(&mut self) { + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); + + let mut iter = move || -> Result<(), Box>> { + let root = self.backend.root(); + record_all_keys::(&eph, root, &mut *self.proof_recorder) + }; + + if let Err(e) = iter() { + debug!(target: "trie", "Error while recording all keys: {}", e); + } + } } /// Patricia trie-based backend which also tracks all touched storage trie values. /// These can be sent to remote node and used as a proof of execution. pub struct ProvingBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { - backend: &'a TrieBackend, - proof_recorder: RefCell>, + backend: &'a TrieBackend, + proof_recorder: RefCell>, } impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> { - /// Create new proving backend. - pub fn new(backend: &'a TrieBackend) -> Self { - ProvingBackend { - backend, - proof_recorder: RefCell::new(Recorder::new()), - } - } - - /// Consume the backend, extracting the gathered proof in lexicographical order - /// by value. - pub fn extract_proof(self) -> Vec> { - self.proof_recorder.into_inner().drain() - .into_iter() - .map(|n| n.data.to_vec()) - .collect() - } + /// Create new proving backend. + pub fn new(backend: &'a TrieBackend) -> Self { + ProvingBackend { + backend, + proof_recorder: RefCell::new(Recorder::new()), + } + } + + /// Consume the backend, extracting the gathered proof in lexicographical order + /// by value. + pub fn extract_proof(self) -> Vec> { + self.proof_recorder + .into_inner() + .drain() + .into_iter() + .map(|n| n.data.to_vec()) + .collect() + } } impl<'a, S, H> Backend for ProvingBackend<'a, S, H> - where - S: 'a + TrieBackendStorage, - H: 'a + Hasher, - H::Out: Ord + HeapSizeOf, +where + S: 'a + TrieBackendStorage, + H: 'a + Hasher, + H::Out: Ord + HeapSizeOf, { - type Error = String; - type Transaction = S::Overlay; - type TrieBackendStorage = PrefixedMemoryDB; - - fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - ProvingBackendEssence { - backend: self.backend.essence(), - proof_recorder: &mut *self.proof_recorder.try_borrow_mut() - .expect("only fails when already borrowed; storage() is non-reentrant; qed"), - }.storage(key) - } - - fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { - ProvingBackendEssence { - backend: self.backend.essence(), - proof_recorder: &mut *self.proof_recorder.try_borrow_mut() - .expect("only fails when already borrowed; child_storage() is non-reentrant; qed"), - }.child_storage(storage_key, key) - } - - fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F) { - self.backend.for_keys_in_child_storage(storage_key, f) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.backend.for_keys_with_prefix(prefix, f) - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - self.backend.pairs() - } - - fn keys(&self, prefix: &Vec) -> Vec> { - self.backend.keys(prefix) - } - - fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) - where I: IntoIterator, Option>)> - { - self.backend.storage_root(delta) - } - - fn child_storage_root(&self, storage_key: &[u8], delta: I) -> (Vec, bool, Self::Transaction) - where - I: IntoIterator, Option>)>, - H::Out: Ord - { - self.backend.child_storage_root(storage_key, delta) - } - - fn try_into_trie_backend(self) -> Option> { - None - } + type Error = String; + type Transaction = S::Overlay; + type TrieBackendStorage = PrefixedMemoryDB; + + fn storage(&self, key: &[u8]) -> Result>, Self::Error> { + ProvingBackendEssence { + backend: self.backend.essence(), + proof_recorder: &mut *self + .proof_recorder + .try_borrow_mut() + .expect("only fails when already borrowed; storage() is non-reentrant; qed"), + } + .storage(key) + } + + fn child_storage( + &self, + storage_key: &[u8], + key: &[u8], + ) -> Result>, Self::Error> { + ProvingBackendEssence { + backend: self.backend.essence(), + proof_recorder: &mut *self + .proof_recorder + .try_borrow_mut() + .expect("only fails when already borrowed; child_storage() is non-reentrant; qed"), + } + .child_storage(storage_key, key) + } + + fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F) { + self.backend.for_keys_in_child_storage(storage_key, f) + } + + fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { + self.backend.for_keys_with_prefix(prefix, f) + } + + fn pairs(&self) -> Vec<(Vec, Vec)> { + self.backend.pairs() + } + + fn keys(&self, prefix: &Vec) -> Vec> { + self.backend.keys(prefix) + } + + fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) + where + I: IntoIterator, Option>)>, + { + self.backend.storage_root(delta) + } + + fn child_storage_root( + &self, + storage_key: &[u8], + delta: I, + ) -> (Vec, bool, Self::Transaction) + where + I: IntoIterator, Option>)>, + H::Out: Ord, + { + self.backend.child_storage_root(storage_key, delta) + } + + fn try_into_trie_backend(self) -> Option> { + None + } } /// Create proof check backend. pub fn create_proof_check_backend( - root: H::Out, - proof: Vec> + root: H::Out, + proof: Vec>, ) -> Result, H>, Box> where - H: Hasher, - H::Out: HeapSizeOf, + H: Hasher, + H::Out: HeapSizeOf, { - let db = create_proof_check_backend_storage(proof); + let db = create_proof_check_backend_storage(proof); - if !db.contains(&root, &[]) { - return Err(Box::new(ExecutionError::InvalidProof) as Box); - } + if !db.contains(&root, &[]) { + return Err(Box::new(ExecutionError::InvalidProof) as Box); + } - Ok(TrieBackend::new(db, root)) + Ok(TrieBackend::new(db, root)) } /// Create in-memory storage of proof check backend. -pub fn create_proof_check_backend_storage( - proof: Vec> -) -> MemoryDB +pub fn create_proof_check_backend_storage(proof: Vec>) -> MemoryDB where - H: Hasher, - H::Out: HeapSizeOf, + H: Hasher, + H::Out: HeapSizeOf, { - let mut db = MemoryDB::default(); - for item in proof { - db.insert(&[], &item); - } - db + let mut db = MemoryDB::default(); + for item in proof { + db.insert(&[], &item); + } + db } #[cfg(test)] mod tests { - use crate::backend::{InMemory}; - use crate::trie_backend::tests::test_trie; - use super::*; - use primitives::{Blake2Hasher}; - - fn test_proving<'a>(trie_backend: &'a TrieBackend, Blake2Hasher>) -> ProvingBackend<'a, PrefixedMemoryDB, Blake2Hasher> { - ProvingBackend::new(trie_backend) - } - - #[test] - fn proof_is_empty_until_value_is_read() { - let trie_backend = test_trie(); - assert!(test_proving(&trie_backend).extract_proof().is_empty()); - } - - #[test] - fn proof_is_non_empty_after_value_is_read() { - let trie_backend = test_trie(); - let backend = test_proving(&trie_backend); - assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); - assert!(!backend.extract_proof().is_empty()); - } - - #[test] - fn proof_is_invalid_when_does_not_contains_root() { - use primitives::H256; - assert!(create_proof_check_backend::(H256::from_low_u64_be(1), vec![]).is_err()); - } - - #[test] - fn passes_throgh_backend_calls() { - let trie_backend = test_trie(); - let proving_backend = test_proving(&trie_backend); - assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); - assert_eq!(trie_backend.pairs(), proving_backend.pairs()); - - let (trie_root, mut trie_mdb) = trie_backend.storage_root(::std::iter::empty()); - let (proving_root, mut proving_mdb) = proving_backend.storage_root(::std::iter::empty()); - assert_eq!(trie_root, proving_root); - assert_eq!(trie_mdb.drain(), proving_mdb.drain()); - } - - #[test] - fn proof_recorded_and_checked() { - let contents = (0..64).map(|i| (None, vec![i], Some(vec![i]))).collect::>(); - let in_memory = InMemory::::default(); - let in_memory = in_memory.update(contents); - let in_memory_root = in_memory.storage_root(::std::iter::empty()).0; - (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); - - let trie = in_memory.try_into_trie_backend().unwrap(); - let trie_root = trie.storage_root(::std::iter::empty()).0; - assert_eq!(in_memory_root, trie_root); - (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); - - let proving = ProvingBackend::new(&trie); - assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); - - let proof = proving.extract_proof(); - - let proof_check = create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); - assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); - } + use super::*; + use crate::backend::InMemory; + use crate::trie_backend::tests::test_trie; + use primitives::Blake2Hasher; + + fn test_proving<'a>( + trie_backend: &'a TrieBackend, Blake2Hasher>, + ) -> ProvingBackend<'a, PrefixedMemoryDB, Blake2Hasher> { + ProvingBackend::new(trie_backend) + } + + #[test] + fn proof_is_empty_until_value_is_read() { + let trie_backend = test_trie(); + assert!(test_proving(&trie_backend).extract_proof().is_empty()); + } + + #[test] + fn proof_is_non_empty_after_value_is_read() { + let trie_backend = test_trie(); + let backend = test_proving(&trie_backend); + assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); + assert!(!backend.extract_proof().is_empty()); + } + + #[test] + fn proof_is_invalid_when_does_not_contains_root() { + use primitives::H256; + assert!( + create_proof_check_backend::(H256::from_low_u64_be(1), vec![]).is_err() + ); + } + + #[test] + fn passes_throgh_backend_calls() { + let trie_backend = test_trie(); + let proving_backend = test_proving(&trie_backend); + assert_eq!( + trie_backend.storage(b"key").unwrap(), + proving_backend.storage(b"key").unwrap() + ); + assert_eq!(trie_backend.pairs(), proving_backend.pairs()); + + let (trie_root, mut trie_mdb) = trie_backend.storage_root(::std::iter::empty()); + let (proving_root, mut proving_mdb) = proving_backend.storage_root(::std::iter::empty()); + assert_eq!(trie_root, proving_root); + assert_eq!(trie_mdb.drain(), proving_mdb.drain()); + } + + #[test] + fn proof_recorded_and_checked() { + let contents = (0..64) + .map(|i| (None, vec![i], Some(vec![i]))) + .collect::>(); + let in_memory = InMemory::::default(); + let in_memory = in_memory.update(contents); + let in_memory_root = in_memory.storage_root(::std::iter::empty()).0; + (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); + + let trie = in_memory.try_into_trie_backend().unwrap(); + let trie_root = trie.storage_root(::std::iter::empty()).0; + assert_eq!(in_memory_root, trie_root); + (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); + + let proving = ProvingBackend::new(&trie); + assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); + + let proof = proving.extract_proof(); + + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); + assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); + } } diff --git a/core/state-machine/src/testing.rs b/core/state-machine/src/testing.rs index d03cc8e76d..cf02f6077a 100644 --- a/core/state-machine/src/testing.rs +++ b/core/state-machine/src/testing.rs @@ -16,187 +16,233 @@ //! Test implementation for Externalities. -use std::collections::HashMap; -use std::iter::FromIterator; +use super::{Externalities, OverlayedChanges}; +use crate::backend::InMemory; +use crate::changes_trie::{ + compute_changes_trie_root, AnchorBlockId, InMemoryStorage as ChangesTrieInMemoryStorage, +}; use hash_db::Hasher; use heapsize::HeapSizeOf; -use trie::trie_root; -use crate::backend::InMemory; -use crate::changes_trie::{compute_changes_trie_root, InMemoryStorage as ChangesTrieInMemoryStorage, AnchorBlockId}; -use primitives::storage::well_known_keys::{CHANGES_TRIE_CONFIG, CODE, HEAP_PAGES}; use parity_codec::Encode; -use super::{Externalities, OverlayedChanges}; +use primitives::storage::well_known_keys::{CHANGES_TRIE_CONFIG, CODE, HEAP_PAGES}; +use std::collections::HashMap; +use std::iter::FromIterator; +use trie::trie_root; /// Simple HashMap-based Externalities impl. -pub struct TestExternalities where H::Out: HeapSizeOf { - inner: HashMap, Vec>, - changes_trie_storage: ChangesTrieInMemoryStorage, - changes: OverlayedChanges, - code: Option>, +pub struct TestExternalities +where + H::Out: HeapSizeOf, +{ + inner: HashMap, Vec>, + changes_trie_storage: ChangesTrieInMemoryStorage, + changes: OverlayedChanges, + code: Option>, } -impl TestExternalities where H::Out: HeapSizeOf { - /// Create a new instance of `TestExternalities` - pub fn new(inner: HashMap, Vec>) -> Self { - Self::new_with_code(&[], inner) - } - - /// Create a new instance of `TestExternalities` - pub fn new_with_code(code: &[u8], mut inner: HashMap, Vec>) -> Self { - let mut overlay = OverlayedChanges::default(); - super::set_changes_trie_config( - &mut overlay, - inner.get(&CHANGES_TRIE_CONFIG.to_vec()).cloned(), - false, - ).expect("changes trie configuration is correct in test env; qed"); - - inner.insert(HEAP_PAGES.to_vec(), 8u64.encode()); - - TestExternalities { - inner, - changes_trie_storage: ChangesTrieInMemoryStorage::new(), - changes: overlay, - code: Some(code.to_vec()), - } - } - - /// Insert key/value - pub fn insert(&mut self, k: Vec, v: Vec) -> Option> { - self.inner.insert(k, v) - } +impl TestExternalities +where + H::Out: HeapSizeOf, +{ + /// Create a new instance of `TestExternalities` + pub fn new(inner: HashMap, Vec>) -> Self { + Self::new_with_code(&[], inner) + } + + /// Create a new instance of `TestExternalities` + pub fn new_with_code(code: &[u8], mut inner: HashMap, Vec>) -> Self { + let mut overlay = OverlayedChanges::default(); + super::set_changes_trie_config( + &mut overlay, + inner.get(&CHANGES_TRIE_CONFIG.to_vec()).cloned(), + false, + ) + .expect("changes trie configuration is correct in test env; qed"); + + inner.insert(HEAP_PAGES.to_vec(), 8u64.encode()); + + TestExternalities { + inner, + changes_trie_storage: ChangesTrieInMemoryStorage::new(), + changes: overlay, + code: Some(code.to_vec()), + } + } + + /// Insert key/value + pub fn insert(&mut self, k: Vec, v: Vec) -> Option> { + self.inner.insert(k, v) + } } -impl ::std::fmt::Debug for TestExternalities where H::Out: HeapSizeOf { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - write!(f, "{:?}", self.inner) - } +impl ::std::fmt::Debug for TestExternalities +where + H::Out: HeapSizeOf, +{ + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + write!(f, "{:?}", self.inner) + } } -impl PartialEq for TestExternalities where H::Out: HeapSizeOf { - fn eq(&self, other: &TestExternalities) -> bool { - self.inner.eq(&other.inner) - } +impl PartialEq for TestExternalities +where + H::Out: HeapSizeOf, +{ + fn eq(&self, other: &TestExternalities) -> bool { + self.inner.eq(&other.inner) + } } -impl FromIterator<(Vec, Vec)> for TestExternalities where H::Out: HeapSizeOf { - fn from_iter, Vec)>>(iter: I) -> Self { - let mut t = Self::new(Default::default()); - t.inner.extend(iter); - t - } +impl FromIterator<(Vec, Vec)> for TestExternalities +where + H::Out: HeapSizeOf, +{ + fn from_iter, Vec)>>(iter: I) -> Self { + let mut t = Self::new(Default::default()); + t.inner.extend(iter); + t + } } -impl Default for TestExternalities where H::Out: HeapSizeOf { - fn default() -> Self { Self::new(Default::default()) } +impl Default for TestExternalities +where + H::Out: HeapSizeOf, +{ + fn default() -> Self { + Self::new(Default::default()) + } } -impl From> for HashMap, Vec> where H::Out: HeapSizeOf { - fn from(tex: TestExternalities) -> Self { - tex.inner.into() - } +impl From> for HashMap, Vec> +where + H::Out: HeapSizeOf, +{ + fn from(tex: TestExternalities) -> Self { + tex.inner.into() + } } -impl From< HashMap, Vec> > for TestExternalities where H::Out: HeapSizeOf { - fn from(hashmap: HashMap, Vec>) -> Self { - TestExternalities { - inner: hashmap, - changes_trie_storage: ChangesTrieInMemoryStorage::new(), - changes: Default::default(), - code: None, - } - } +impl From, Vec>> for TestExternalities +where + H::Out: HeapSizeOf, +{ + fn from(hashmap: HashMap, Vec>) -> Self { + TestExternalities { + inner: hashmap, + changes_trie_storage: ChangesTrieInMemoryStorage::new(), + changes: Default::default(), + code: None, + } + } } // TODO child test primitives are currently limited to `changes` (for non child the way // things are defined seems utterly odd to (put changes in changes but never make them // available for read through inner) -impl Externalities for TestExternalities where H::Out: Ord + HeapSizeOf { - fn storage(&self, key: &[u8]) -> Option> { - match key { - CODE => self.code.clone(), - _ => self.inner.get(key).cloned(), - } - } - - fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option> { - self.changes.child_storage(storage_key, key)?.map(Vec::from) - } - - fn place_storage(&mut self, key: Vec, maybe_value: Option>) { - self.changes.set_storage(key.clone(), maybe_value.clone()); - match key.as_ref() { - CODE => self.code = maybe_value, - _ => { - match maybe_value { - Some(value) => { self.inner.insert(key, value); } - None => { self.inner.remove(&key); } - } - } - } - } - - fn place_child_storage(&mut self, storage_key: Vec, key: Vec, value: Option>) -> bool { - self.changes.set_child_storage(storage_key, key, value); - // TODO place_child_storage and set_child_storage should always be valid (create child on set)? - true - } - - fn kill_child_storage(&mut self, storage_key: &[u8]) { - self.changes.clear_child_storage(storage_key); - } - - fn clear_prefix(&mut self, prefix: &[u8]) { - self.changes.clear_prefix(prefix); - self.inner.retain(|key, _| !key.starts_with(prefix)); - } - - fn chain_id(&self) -> u64 { 42 } - - fn storage_root(&mut self) -> H::Out { - trie_root::(self.inner.clone()) - } - - fn child_storage_root(&mut self, _storage_key: &[u8]) -> Option> { - None - } - - fn storage_changes_root(&mut self, parent: H::Out, parent_num: u64) -> Option { - compute_changes_trie_root::<_, _, H>( - &InMemory::default(), - Some(&self.changes_trie_storage), - &self.changes, - &AnchorBlockId { hash: parent, number: parent_num }, - ).map(|(root, _)| root.clone()) - } - - fn submit_extrinsic(&mut self, _extrinsic: Vec) -> Result<(), ()> { - unimplemented!() - } +impl Externalities for TestExternalities +where + H::Out: Ord + HeapSizeOf, +{ + fn storage(&self, key: &[u8]) -> Option> { + match key { + CODE => self.code.clone(), + _ => self.inner.get(key).cloned(), + } + } + + fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option> { + self.changes.child_storage(storage_key, key)?.map(Vec::from) + } + + fn place_storage(&mut self, key: Vec, maybe_value: Option>) { + self.changes.set_storage(key.clone(), maybe_value.clone()); + match key.as_ref() { + CODE => self.code = maybe_value, + _ => match maybe_value { + Some(value) => { + self.inner.insert(key, value); + } + None => { + self.inner.remove(&key); + } + }, + } + } + + fn place_child_storage( + &mut self, + storage_key: Vec, + key: Vec, + value: Option>, + ) -> bool { + self.changes.set_child_storage(storage_key, key, value); + // TODO place_child_storage and set_child_storage should always be valid (create child on set)? + true + } + + fn kill_child_storage(&mut self, storage_key: &[u8]) { + self.changes.clear_child_storage(storage_key); + } + + fn clear_prefix(&mut self, prefix: &[u8]) { + self.changes.clear_prefix(prefix); + self.inner.retain(|key, _| !key.starts_with(prefix)); + } + + fn chain_id(&self) -> u64 { + 42 + } + + fn storage_root(&mut self) -> H::Out { + trie_root::(self.inner.clone()) + } + + fn child_storage_root(&mut self, _storage_key: &[u8]) -> Option> { + None + } + + fn storage_changes_root(&mut self, parent: H::Out, parent_num: u64) -> Option { + compute_changes_trie_root::<_, _, H>( + &InMemory::default(), + Some(&self.changes_trie_storage), + &self.changes, + &AnchorBlockId { + hash: parent, + number: parent_num, + }, + ) + .map(|(root, _)| root.clone()) + } + + fn submit_extrinsic(&mut self, _extrinsic: Vec) -> Result<(), ()> { + unimplemented!() + } } #[cfg(test)] mod tests { - use super::*; - use primitives::{Blake2Hasher, H256}; - use hex_literal::{hex, hex_impl}; - - #[test] - fn commit_should_work() { - let mut ext = TestExternalities::::default(); - ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); - ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); - ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); - const ROOT: [u8; 32] = hex!("0b33ed94e74e0f8e92a55923bece1ed02d16cf424e124613ddebc53ac3eeeabe"); - assert_eq!(ext.storage_root(), H256::from(ROOT)); - } - - #[test] - fn set_and_retrieve_code() { - let mut ext = TestExternalities::::default(); - - let code = vec![1, 2, 3]; - ext.set_storage(CODE.to_vec(), code.clone()); - - assert_eq!(&ext.storage(CODE).unwrap(), &code); - } + use super::*; + use hex_literal::{hex, hex_impl}; + use primitives::{Blake2Hasher, H256}; + + #[test] + fn commit_should_work() { + let mut ext = TestExternalities::::default(); + ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); + ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); + ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); + const ROOT: [u8; 32] = + hex!("0b33ed94e74e0f8e92a55923bece1ed02d16cf424e124613ddebc53ac3eeeabe"); + assert_eq!(ext.storage_root(), H256::from(ROOT)); + } + + #[test] + fn set_and_retrieve_code() { + let mut ext = TestExternalities::::default(); + + let code = vec![1, 2, 3]; + ext.set_storage(CODE.to_vec(), code.clone()); + + assert_eq!(&ext.storage(CODE).unwrap(), &code); + } } diff --git a/core/state-machine/src/trie_backend.rs b/core/state-machine/src/trie_backend.rs index b152d7fea1..7113987ae9 100644 --- a/core/state-machine/src/trie_backend.rs +++ b/core/state-machine/src/trie_backend.rs @@ -16,256 +16,280 @@ //! Trie-based state machine backend. -use log::{warn, debug}; +use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; +use crate::Backend; use hash_db::Hasher; use heapsize::HeapSizeOf; -use trie::{TrieDB, TrieError, Trie, delta_trie_root, default_child_trie_root, child_delta_trie_root}; -use crate::trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}; -use crate::Backend; +use log::{debug, warn}; +use trie::{ + child_delta_trie_root, default_child_trie_root, delta_trie_root, Trie, TrieDB, TrieError, +}; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. pub struct TrieBackend, H: Hasher> { - essence: TrieBackendEssence, + essence: TrieBackendEssence, } -impl, H: Hasher> TrieBackend where H::Out: HeapSizeOf { - /// Create new trie-based backend. - pub fn new(storage: S, root: H::Out) -> Self { - TrieBackend { - essence: TrieBackendEssence::new(storage, root), - } - } - - /// Get backend essence reference. - pub fn essence(&self) -> &TrieBackendEssence { - &self.essence - } - - /// Get backend storage reference. - pub fn backend_storage(&self) -> &S { - self.essence.backend_storage() - } - - /// Get trie root. - pub fn root(&self) -> &H::Out { - self.essence.root() - } - - /// Consumes self and returns underlying storage. - pub fn into_storage(self) -> S { - self.essence.into_storage() - } +impl, H: Hasher> TrieBackend +where + H::Out: HeapSizeOf, +{ + /// Create new trie-based backend. + pub fn new(storage: S, root: H::Out) -> Self { + TrieBackend { + essence: TrieBackendEssence::new(storage, root), + } + } + + /// Get backend essence reference. + pub fn essence(&self) -> &TrieBackendEssence { + &self.essence + } + + /// Get backend storage reference. + pub fn backend_storage(&self) -> &S { + self.essence.backend_storage() + } + + /// Get trie root. + pub fn root(&self) -> &H::Out { + self.essence.root() + } + + /// Consumes self and returns underlying storage. + pub fn into_storage(self) -> S { + self.essence.into_storage() + } } impl super::Error for String {} -impl, H: Hasher> Backend for TrieBackend where - H::Out: Ord + HeapSizeOf, +impl, H: Hasher> Backend for TrieBackend +where + H::Out: Ord + HeapSizeOf, { - type Error = String; - type Transaction = S::Overlay; - type TrieBackendStorage = S; - - fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - self.essence.storage(key) - } - - fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { - self.essence.child_storage(storage_key, key) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.essence.for_keys_with_prefix(prefix, f) - } - - fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F) { - self.essence.for_keys_in_child_storage(storage_key, f) - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); - - let collect_all = || -> Result<_, Box>> { - let trie = TrieDB::::new(&eph, self.essence.root())?; - let mut v = Vec::new(); - for x in trie.iter()? { - let (key, value) = x?; - v.push((key.to_vec(), value.to_vec())); - } - - Ok(v) - }; - - match collect_all() { - Ok(v) => v, - Err(e) => { - debug!(target: "trie", "Error extracting trie values: {}", e); - Vec::new() - } - } - } - - fn keys(&self, prefix: &Vec) -> Vec> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); - - let collect_all = || -> Result<_, Box>> { - let trie = TrieDB::::new(&eph, self.essence.root())?; - let mut v = Vec::new(); - for x in trie.iter()? { - let (key, _) = x?; - if key.starts_with(prefix) { - v.push(key.to_vec()); - } - } - - Ok(v) - }; - - collect_all().map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)).unwrap_or_default() - } - - fn storage_root(&self, delta: I) -> (H::Out, S::Overlay) - where I: IntoIterator, Option>)> - { - let mut write_overlay = S::Overlay::default(); - let mut root = *self.essence.root(); - - { - let mut eph = Ephemeral::new( - self.essence.backend_storage(), - &mut write_overlay, - ); - - match delta_trie_root::(&mut eph, root, delta) { - Ok(ret) => root = ret, - Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), - } - } - - (root, write_overlay) - } - - fn child_storage_root(&self, storage_key: &[u8], delta: I) -> (Vec, bool, Self::Transaction) - where - I: IntoIterator, Option>)>, - H::Out: Ord - { - let default_root = default_child_trie_root::(storage_key); - - let mut write_overlay = S::Overlay::default(); - let mut root = match self.storage(storage_key) { - Ok(value) => value.unwrap_or(default_child_trie_root::(storage_key)), - Err(e) => { - warn!(target: "trie", "Failed to read child storage root: {}", e); - default_root.clone() - }, - }; - - { - let mut eph = Ephemeral::new( - self.essence.backend_storage(), - &mut write_overlay, - ); - - match child_delta_trie_root::(storage_key, &mut eph, root.clone(), delta) { - Ok(ret) => root = ret, - Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), - } - } - - let is_default = root == default_root; - - (root, is_default, write_overlay) - } - - fn try_into_trie_backend(self) -> Option> { - Some(self) - } + type Error = String; + type Transaction = S::Overlay; + type TrieBackendStorage = S; + + fn storage(&self, key: &[u8]) -> Result>, Self::Error> { + self.essence.storage(key) + } + + fn child_storage( + &self, + storage_key: &[u8], + key: &[u8], + ) -> Result>, Self::Error> { + self.essence.child_storage(storage_key, key) + } + + fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { + self.essence.for_keys_with_prefix(prefix, f) + } + + fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F) { + self.essence.for_keys_in_child_storage(storage_key, f) + } + + fn pairs(&self) -> Vec<(Vec, Vec)> { + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); + + let collect_all = || -> Result<_, Box>> { + let trie = TrieDB::::new(&eph, self.essence.root())?; + let mut v = Vec::new(); + for x in trie.iter()? { + let (key, value) = x?; + v.push((key.to_vec(), value.to_vec())); + } + + Ok(v) + }; + + match collect_all() { + Ok(v) => v, + Err(e) => { + debug!(target: "trie", "Error extracting trie values: {}", e); + Vec::new() + } + } + } + + fn keys(&self, prefix: &Vec) -> Vec> { + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); + + let collect_all = || -> Result<_, Box>> { + let trie = TrieDB::::new(&eph, self.essence.root())?; + let mut v = Vec::new(); + for x in trie.iter()? { + let (key, _) = x?; + if key.starts_with(prefix) { + v.push(key.to_vec()); + } + } + + Ok(v) + }; + + collect_all() + .map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)) + .unwrap_or_default() + } + + fn storage_root(&self, delta: I) -> (H::Out, S::Overlay) + where + I: IntoIterator, Option>)>, + { + let mut write_overlay = S::Overlay::default(); + let mut root = *self.essence.root(); + + { + let mut eph = Ephemeral::new(self.essence.backend_storage(), &mut write_overlay); + + match delta_trie_root::(&mut eph, root, delta) { + Ok(ret) => root = ret, + Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), + } + } + + (root, write_overlay) + } + + fn child_storage_root( + &self, + storage_key: &[u8], + delta: I, + ) -> (Vec, bool, Self::Transaction) + where + I: IntoIterator, Option>)>, + H::Out: Ord, + { + let default_root = default_child_trie_root::(storage_key); + + let mut write_overlay = S::Overlay::default(); + let mut root = match self.storage(storage_key) { + Ok(value) => value.unwrap_or(default_child_trie_root::(storage_key)), + Err(e) => { + warn!(target: "trie", "Failed to read child storage root: {}", e); + default_root.clone() + } + }; + + { + let mut eph = Ephemeral::new(self.essence.backend_storage(), &mut write_overlay); + + match child_delta_trie_root::(storage_key, &mut eph, root.clone(), delta) + { + Ok(ret) => root = ret, + Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), + } + } + + let is_default = root == default_root; + + (root, is_default, write_overlay) + } + + fn try_into_trie_backend(self) -> Option> { + Some(self) + } } #[cfg(test)] pub mod tests { - use std::collections::HashSet; - use primitives::{Blake2Hasher, H256}; - use trie::{TrieMut, TrieDBMut, PrefixedMemoryDB}; - use super::*; - - fn test_db() -> (PrefixedMemoryDB, H256) { - let mut root = H256::default(); - let mut mdb = PrefixedMemoryDB::::default(); - { - let mut trie = TrieDBMut::new(&mut mdb, &mut root); - trie.insert(b"key", b"value").expect("insert failed"); - trie.insert(b"value1", &[42]).expect("insert failed"); - trie.insert(b"value2", &[24]).expect("insert failed"); - trie.insert(b":code", b"return 42").expect("insert failed"); - for i in 128u8..255u8 { - trie.insert(&[i], &[i]).unwrap(); - } - } - (mdb, root) - } - - pub(crate) fn test_trie() -> TrieBackend, Blake2Hasher> { - let (mdb, root) = test_db(); - TrieBackend::new(mdb, root) - } - - #[test] - fn read_from_storage_returns_some() { - assert_eq!(test_trie().storage(b"key").unwrap(), Some(b"value".to_vec())); - } - - #[test] - fn read_from_storage_returns_none() { - assert_eq!(test_trie().storage(b"non-existing-key").unwrap(), None); - } - - #[test] - fn pairs_are_not_empty_on_non_empty_storage() { - assert!(!test_trie().pairs().is_empty()); - } - - #[test] - fn pairs_are_empty_on_empty_storage() { - assert!(TrieBackend::, Blake2Hasher>::new( - PrefixedMemoryDB::default(), - Default::default(), - ).pairs().is_empty()); - } - - #[test] - fn storage_root_is_non_default() { - assert!(test_trie().storage_root(::std::iter::empty()).0 != H256::repeat_byte(0)); - } - - #[test] - fn storage_root_transaction_is_empty() { - assert!(test_trie().storage_root(::std::iter::empty()).1.drain().is_empty()); - } - - #[test] - fn storage_root_transaction_is_non_empty() { - let (new_root, mut tx) = test_trie().storage_root(vec![(b"new-key".to_vec(), Some(b"new-value".to_vec()))]); - assert!(!tx.drain().is_empty()); - assert!(new_root != test_trie().storage_root(::std::iter::empty()).0); - } - - #[test] - fn prefix_walking_works() { - let trie = test_trie(); - - let mut seen = HashSet::new(); - trie.for_keys_with_prefix(b"value", |key| { - let for_first_time = seen.insert(key.to_vec()); - assert!(for_first_time, "Seen key '{:?}' more than once", key); - }); - - let mut expected = HashSet::new(); - expected.insert(b"value1".to_vec()); - expected.insert(b"value2".to_vec()); - assert_eq!(seen, expected); - } + use super::*; + use primitives::{Blake2Hasher, H256}; + use std::collections::HashSet; + use trie::{PrefixedMemoryDB, TrieDBMut, TrieMut}; + + fn test_db() -> (PrefixedMemoryDB, H256) { + let mut root = H256::default(); + let mut mdb = PrefixedMemoryDB::::default(); + { + let mut trie = TrieDBMut::new(&mut mdb, &mut root); + trie.insert(b"key", b"value").expect("insert failed"); + trie.insert(b"value1", &[42]).expect("insert failed"); + trie.insert(b"value2", &[24]).expect("insert failed"); + trie.insert(b":code", b"return 42").expect("insert failed"); + for i in 128u8..255u8 { + trie.insert(&[i], &[i]).unwrap(); + } + } + (mdb, root) + } + + pub(crate) fn test_trie() -> TrieBackend, Blake2Hasher> { + let (mdb, root) = test_db(); + TrieBackend::new(mdb, root) + } + + #[test] + fn read_from_storage_returns_some() { + assert_eq!( + test_trie().storage(b"key").unwrap(), + Some(b"value".to_vec()) + ); + } + + #[test] + fn read_from_storage_returns_none() { + assert_eq!(test_trie().storage(b"non-existing-key").unwrap(), None); + } + + #[test] + fn pairs_are_not_empty_on_non_empty_storage() { + assert!(!test_trie().pairs().is_empty()); + } + + #[test] + fn pairs_are_empty_on_empty_storage() { + assert!( + TrieBackend::, Blake2Hasher>::new( + PrefixedMemoryDB::default(), + Default::default(), + ) + .pairs() + .is_empty() + ); + } + + #[test] + fn storage_root_is_non_default() { + assert!(test_trie().storage_root(::std::iter::empty()).0 != H256::repeat_byte(0)); + } + + #[test] + fn storage_root_transaction_is_empty() { + assert!(test_trie() + .storage_root(::std::iter::empty()) + .1 + .drain() + .is_empty()); + } + + #[test] + fn storage_root_transaction_is_non_empty() { + let (new_root, mut tx) = + test_trie().storage_root(vec![(b"new-key".to_vec(), Some(b"new-value".to_vec()))]); + assert!(!tx.drain().is_empty()); + assert!(new_root != test_trie().storage_root(::std::iter::empty()).0); + } + + #[test] + fn prefix_walking_works() { + let trie = test_trie(); + + let mut seen = HashSet::new(); + trie.for_keys_with_prefix(b"value", |key| { + let for_first_time = seen.insert(key.to_vec()); + assert!(for_first_time, "Seen key '{:?}' more than once", key); + }); + + let mut expected = HashSet::new(); + expected.insert(b"value1".to_vec()); + expected.insert(b"value2".to_vec()); + assert_eq!(seen, expected); + } } diff --git a/core/state-machine/src/trie_backend_essence.rs b/core/state-machine/src/trie_backend_essence.rs index 8101126c39..3c1f398f66 100644 --- a/core/state-machine/src/trie_backend_essence.rs +++ b/core/state-machine/src/trie_backend_essence.rs @@ -17,302 +17,312 @@ //! Trie-based state machine backend essence used to read values //! from storage. -use std::ops::Deref; -use std::sync::Arc; -use log::{debug, warn}; +use crate::backend::Consolidate; +use crate::changes_trie::Storage as ChangesTrieStorage; use hash_db::{self, Hasher}; use heapsize::HeapSizeOf; -use trie::{TrieDB, Trie, MemoryDB, PrefixedMemoryDB, DBValue, TrieError, default_child_trie_root, read_trie_value, read_child_trie_value, for_keys_in_child_trie}; -use crate::changes_trie::Storage as ChangesTrieStorage; -use crate::backend::Consolidate; +use log::{debug, warn}; +use std::ops::Deref; +use std::sync::Arc; +use trie::{ + default_child_trie_root, for_keys_in_child_trie, read_child_trie_value, read_trie_value, + DBValue, MemoryDB, PrefixedMemoryDB, Trie, TrieDB, TrieError, +}; /// Patricia trie-based storage trait. pub trait Storage: Send + Sync { - /// Get a trie node. - fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String>; + /// Get a trie node. + fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String>; } /// Patricia trie-based pairs storage essence. pub struct TrieBackendEssence, H: Hasher> { - storage: S, - root: H::Out, + storage: S, + root: H::Out, } -impl, H: Hasher> TrieBackendEssence where H::Out: HeapSizeOf { - /// Create new trie-based backend. - pub fn new(storage: S, root: H::Out) -> Self { - TrieBackendEssence { - storage, - root, - } - } - - /// Get backend storage reference. - pub fn backend_storage(&self) -> &S { - &self.storage - } - - /// Get trie root. - pub fn root(&self) -> &H::Out { - &self.root - } - - /// Consumes self and returns underlying storage. - pub fn into_storage(self) -> S { - self.storage - } - - /// Get the value of storage at given key. - pub fn storage(&self, key: &[u8]) -> Result>, String> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; - - let map_e = |e| format!("Trie lookup error: {}", e); - - read_trie_value(&eph, &self.root, key).map_err(map_e) - } - - /// Get the value of child storage at given key. - pub fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, String> { - let root = self.storage(storage_key)?.unwrap_or(default_child_trie_root::(storage_key)); - - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; - - let map_e = |e| format!("Trie lookup error: {}", e); - - read_child_trie_value(storage_key, &eph, &root, key).map_err(map_e) - } - - /// Retrieve all entries keys of child storage and call `f` for each of those keys. - pub fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F) { - let root = match self.storage(storage_key) { - Ok(v) => v.unwrap_or(default_child_trie_root::(storage_key)), - Err(e) => { - debug!(target: "trie", "Error while iterating child storage: {}", e); - return; - } - }; - - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; - - if let Err(e) = for_keys_in_child_trie::>(storage_key, &eph, &root, f) { - debug!(target: "trie", "Error while iterating child storage: {}", e); - } - } - - /// Execute given closure for all keys starting with prefix. - pub fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; - - let mut iter = move || -> Result<(), Box>> { - let trie = TrieDB::::new(&eph, &self.root)?; - let mut iter = trie.iter()?; - - iter.seek(prefix)?; - - for x in iter { - let (key, _) = x?; - - if !key.starts_with(prefix) { - break; - } - - f(&key); - } - - Ok(()) - }; - - if let Err(e) = iter() { - debug!(target: "trie", "Error while iterating by prefix: {}", e); - } - } +impl, H: Hasher> TrieBackendEssence +where + H::Out: HeapSizeOf, +{ + /// Create new trie-based backend. + pub fn new(storage: S, root: H::Out) -> Self { + TrieBackendEssence { storage, root } + } + + /// Get backend storage reference. + pub fn backend_storage(&self) -> &S { + &self.storage + } + + /// Get trie root. + pub fn root(&self) -> &H::Out { + &self.root + } + + /// Consumes self and returns underlying storage. + pub fn into_storage(self) -> S { + self.storage + } + + /// Get the value of storage at given key. + pub fn storage(&self, key: &[u8]) -> Result>, String> { + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral { + storage: &self.storage, + overlay: &mut read_overlay, + }; + + let map_e = |e| format!("Trie lookup error: {}", e); + + read_trie_value(&eph, &self.root, key).map_err(map_e) + } + + /// Get the value of child storage at given key. + pub fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, String> { + let root = self + .storage(storage_key)? + .unwrap_or(default_child_trie_root::(storage_key)); + + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral { + storage: &self.storage, + overlay: &mut read_overlay, + }; + + let map_e = |e| format!("Trie lookup error: {}", e); + + read_child_trie_value(storage_key, &eph, &root, key).map_err(map_e) + } + + /// Retrieve all entries keys of child storage and call `f` for each of those keys. + pub fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F) { + let root = match self.storage(storage_key) { + Ok(v) => v.unwrap_or(default_child_trie_root::(storage_key)), + Err(e) => { + debug!(target: "trie", "Error while iterating child storage: {}", e); + return; + } + }; + + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral { + storage: &self.storage, + overlay: &mut read_overlay, + }; + + if let Err(e) = for_keys_in_child_trie::>(storage_key, &eph, &root, f) + { + debug!(target: "trie", "Error while iterating child storage: {}", e); + } + } + + /// Execute given closure for all keys starting with prefix. + pub fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral { + storage: &self.storage, + overlay: &mut read_overlay, + }; + + let mut iter = move || -> Result<(), Box>> { + let trie = TrieDB::::new(&eph, &self.root)?; + let mut iter = trie.iter()?; + + iter.seek(prefix)?; + + for x in iter { + let (key, _) = x?; + + if !key.starts_with(prefix) { + break; + } + + f(&key); + } + + Ok(()) + }; + + if let Err(e) = iter() { + debug!(target: "trie", "Error while iterating by prefix: {}", e); + } + } } pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { - storage: &'a S, - overlay: &'a mut S::Overlay, + storage: &'a S, + overlay: &'a mut S::Overlay, } -impl<'a, - S: 'a + TrieBackendStorage, - H: 'a + Hasher -> hash_db::AsPlainDB - for Ephemeral<'a, S, H> - where H::Out: HeapSizeOf +impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsPlainDB + for Ephemeral<'a, S, H> +where + H::Out: HeapSizeOf, { - fn as_plain_db<'b>(&'b self) -> &'b (hash_db::PlainDB + 'b) { self } - fn as_plain_db_mut<'b>(&'b mut self) -> &'b mut (hash_db::PlainDB + 'b) { self } + fn as_plain_db<'b>(&'b self) -> &'b (hash_db::PlainDB + 'b) { + self + } + fn as_plain_db_mut<'b>(&'b mut self) -> &'b mut (hash_db::PlainDB + 'b) { + self + } } -impl<'a, - S: 'a + TrieBackendStorage, - H: 'a + Hasher -> hash_db::AsHashDB - for Ephemeral<'a, S, H> - where H::Out: HeapSizeOf +impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsHashDB + for Ephemeral<'a, S, H> +where + H::Out: HeapSizeOf, { - fn as_hash_db<'b>(&'b self) -> &'b (hash_db::HashDB + 'b) { self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (hash_db::HashDB + 'b) { self } + fn as_hash_db<'b>(&'b self) -> &'b (hash_db::HashDB + 'b) { + self + } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (hash_db::HashDB + 'b) { + self + } } impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { - pub fn new(storage: &'a S, overlay: &'a mut S::Overlay) -> Self { - Ephemeral { - storage, - overlay, - } - } + pub fn new(storage: &'a S, overlay: &'a mut S::Overlay) -> Self { + Ephemeral { storage, overlay } + } } -impl<'a, - S: 'a + TrieBackendStorage, - H: Hasher -> hash_db::PlainDB - for Ephemeral<'a, S, H> - where H::Out: HeapSizeOf +impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::PlainDB + for Ephemeral<'a, S, H> +where + H::Out: HeapSizeOf, { - fn get(&self, key: &H::Out) -> Option { - if let Some(val) = hash_db::HashDB::get(self.overlay, key, &[]) { - Some(val) - } else { - match self.storage.get(&key, &[]) { - Ok(x) => x, - Err(e) => { - warn!(target: "trie", "Failed to read from DB: {}", e); - None - }, - } - } - } - - fn contains(&self, key: &H::Out) -> bool { - hash_db::HashDB::get(self, key, &[]).is_some() - } - - fn emplace(&mut self, key: H::Out, value: DBValue) { - hash_db::HashDB::emplace(self.overlay, key, &[], value) - } - - fn remove(&mut self, key: &H::Out) { - hash_db::HashDB::remove(self.overlay, key, &[]) - } + fn get(&self, key: &H::Out) -> Option { + if let Some(val) = hash_db::HashDB::get(self.overlay, key, &[]) { + Some(val) + } else { + match self.storage.get(&key, &[]) { + Ok(x) => x, + Err(e) => { + warn!(target: "trie", "Failed to read from DB: {}", e); + None + } + } + } + } + + fn contains(&self, key: &H::Out) -> bool { + hash_db::HashDB::get(self, key, &[]).is_some() + } + + fn emplace(&mut self, key: H::Out, value: DBValue) { + hash_db::HashDB::emplace(self.overlay, key, &[], value) + } + + fn remove(&mut self, key: &H::Out) { + hash_db::HashDB::remove(self.overlay, key, &[]) + } } -impl<'a, - S: 'a + TrieBackendStorage, - H: Hasher -> hash_db::PlainDBRef - for Ephemeral<'a, S, H> - where H::Out: HeapSizeOf +impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::PlainDBRef + for Ephemeral<'a, S, H> +where + H::Out: HeapSizeOf, { - fn get(&self, key: &H::Out) -> Option { hash_db::PlainDB::get(self, key) } - fn contains(&self, key: &H::Out) -> bool { hash_db::PlainDB::contains(self, key) } + fn get(&self, key: &H::Out) -> Option { + hash_db::PlainDB::get(self, key) + } + fn contains(&self, key: &H::Out) -> bool { + hash_db::PlainDB::contains(self, key) + } } -impl<'a, - S: 'a + TrieBackendStorage, - H: Hasher -> hash_db::HashDB - for Ephemeral<'a, S, H> - where H::Out: HeapSizeOf +impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB + for Ephemeral<'a, S, H> +where + H::Out: HeapSizeOf, { - fn get(&self, key: &H::Out, prefix: &[u8]) -> Option { - if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { - Some(val) - } else { - match self.storage.get(&key, prefix) { - Ok(x) => x, - Err(e) => { - warn!(target: "trie", "Failed to read from DB: {}", e); - None - }, - } - } - } - - fn contains(&self, key: &H::Out, prefix: &[u8]) -> bool { - hash_db::HashDB::get(self, key, prefix).is_some() - } - - fn insert(&mut self, prefix: &[u8], value: &[u8]) -> H::Out { - hash_db::HashDB::insert(self.overlay, prefix, value) - } - - fn emplace(&mut self, key: H::Out, prefix: &[u8], value: DBValue) { - hash_db::HashDB::emplace(self.overlay, key, prefix, value) - } - - fn remove(&mut self, key: &H::Out, prefix: &[u8]) { - hash_db::HashDB::remove(self.overlay, key, prefix) - } + fn get(&self, key: &H::Out, prefix: &[u8]) -> Option { + if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { + Some(val) + } else { + match self.storage.get(&key, prefix) { + Ok(x) => x, + Err(e) => { + warn!(target: "trie", "Failed to read from DB: {}", e); + None + } + } + } + } + + fn contains(&self, key: &H::Out, prefix: &[u8]) -> bool { + hash_db::HashDB::get(self, key, prefix).is_some() + } + + fn insert(&mut self, prefix: &[u8], value: &[u8]) -> H::Out { + hash_db::HashDB::insert(self.overlay, prefix, value) + } + + fn emplace(&mut self, key: H::Out, prefix: &[u8], value: DBValue) { + hash_db::HashDB::emplace(self.overlay, key, prefix, value) + } + + fn remove(&mut self, key: &H::Out, prefix: &[u8]) { + hash_db::HashDB::remove(self.overlay, key, prefix) + } } -impl<'a, - S: 'a + TrieBackendStorage, - H: Hasher -> hash_db::HashDBRef - for Ephemeral<'a, S, H> - where H::Out: HeapSizeOf +impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDBRef + for Ephemeral<'a, S, H> +where + H::Out: HeapSizeOf, { - fn get(&self, key: &H::Out, prefix: &[u8]) -> Option { hash_db::HashDB::get(self, key, prefix) } - fn contains(&self, key: &H::Out, prefix: &[u8]) -> bool { hash_db::HashDB::contains(self, key, prefix) } + fn get(&self, key: &H::Out, prefix: &[u8]) -> Option { + hash_db::HashDB::get(self, key, prefix) + } + fn contains(&self, key: &H::Out, prefix: &[u8]) -> bool { + hash_db::HashDB::contains(self, key, prefix) + } } /// Key-value pairs storage that is used by trie backend essence. pub trait TrieBackendStorage: Send + Sync { - /// Type of in-memory overlay. - type Overlay: hash_db::HashDB + Default + Consolidate; - /// Get the value stored at key. - fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String>; + /// Type of in-memory overlay. + type Overlay: hash_db::HashDB + Default + Consolidate; + /// Get the value stored at key. + fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String>; } // This implementation is used by normal storage trie clients. impl TrieBackendStorage for Arc> { - type Overlay = PrefixedMemoryDB; + type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String> { - Storage::::get(self.deref(), key, prefix) - } + fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String> { + Storage::::get(self.deref(), key, prefix) + } } // This implementation is used by test storage trie clients. impl TrieBackendStorage for PrefixedMemoryDB { - type Overlay = PrefixedMemoryDB; + type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String> { - Ok(hash_db::HashDB::get(self, key, prefix)) - } + fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String> { + Ok(hash_db::HashDB::get(self, key, prefix)) + } } impl TrieBackendStorage for MemoryDB { - type Overlay = MemoryDB; + type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String> { - Ok(hash_db::HashDB::get(self, key, prefix)) - } + fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String> { + Ok(hash_db::HashDB::get(self, key, prefix)) + } } // This implementation is used by changes trie clients. -impl<'a, S, H: Hasher> TrieBackendStorage for &'a S where S: ChangesTrieStorage { - type Overlay = MemoryDB; +impl<'a, S, H: Hasher> TrieBackendStorage for &'a S +where + S: ChangesTrieStorage, +{ + type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String> { - ChangesTrieStorage::::get(*self, key, prefix) - } + fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String> { + ChangesTrieStorage::::get(*self, key, prefix) + } } diff --git a/core/telemetry/src/lib.rs b/core/telemetry/src/lib.rs index fba75c196a..49a572817e 100644 --- a/core/telemetry/src/lib.rs +++ b/core/telemetry/src/lib.rs @@ -21,26 +21,26 @@ //! server (if there is one). We use the async drain adapter of `slog` //! so that the logging thread doesn't get held up at all. -use std::{io, time, thread}; -use std::sync::Arc; -use parking_lot::Mutex; -use slog::{Drain, o}; +use core::result; use log::trace; +use parking_lot::Mutex; use rand::{thread_rng, Rng}; -pub use slog_scope::with_logger; +use serde_derive::{Deserialize, Serialize}; pub use slog; -use serde_derive::{Serialize, Deserialize}; use slog::OwnedKVList; use slog::Record; -use core::result; +use slog::{o, Drain}; +pub use slog_scope::with_logger; +use std::sync::Arc; +use std::{io, thread, time}; /// Configuration for telemetry. pub struct TelemetryConfig { - /// Collection of telemetry WebSocket servers with a corresponding verbosity level. - pub endpoints: TelemetryEndpoints, - /// What do do when we connect to the servers. - /// Note that this closure is executed each time we connect to a telemetry endpoint. - pub on_connect: Box, + /// Collection of telemetry WebSocket servers with a corresponding verbosity level. + pub endpoints: TelemetryEndpoints, + /// What do do when we connect to the servers. + /// Note that this closure is executed each time we connect to a telemetry endpoint. + pub on_connect: Box, } /// Telemetry service guard. @@ -61,102 +61,116 @@ pub const CONSENSUS_INFO: &str = "3"; /// Multiply logging to all drains. This is similar to `slog::Duplicate`, which is /// limited to two drains though and doesn't support dynamic nesting at runtime. #[derive(Debug, Clone)] -pub struct Multiply (pub Vec>); +pub struct Multiply(pub Vec>); impl Multiply { - pub fn new(v: Vec>) -> Self { - Multiply(v) - } + pub fn new(v: Vec>) -> Self { + Multiply(v) + } } impl Drain for Multiply { - type Ok = Vec; - type Err = Vec; - - fn log(&self, record: &Record, logger_values: &OwnedKVList) -> result::Result { - let mut oks = Vec::new(); - let mut errs = Vec::new(); - - self.0.iter().for_each(|l| { - let res: Result<::Ok, ::Err> = (*l).log(record, logger_values); - match res { - Ok(o) => oks.push(o), - Err(e) => errs.push(e), - } - }); - - if !errs.is_empty() { - result::Result::Err(errs) - } else { - result::Result::Ok(oks) - } - } + type Ok = Vec; + type Err = Vec; + + fn log( + &self, + record: &Record, + logger_values: &OwnedKVList, + ) -> result::Result { + let mut oks = Vec::new(); + let mut errs = Vec::new(); + + self.0.iter().for_each(|l| { + let res: Result<::Ok, ::Err> = (*l).log(record, logger_values); + match res { + Ok(o) => oks.push(o), + Err(e) => errs.push(e), + } + }); + + if !errs.is_empty() { + result::Result::Err(errs) + } else { + result::Result::Ok(oks) + } + } } /// Initialize telemetry. pub fn init_telemetry(config: TelemetryConfig) -> slog_scope::GlobalLoggerGuard { - let mut endpoint_drains: Vec>> = Vec::new(); - let mut out_syncs = Vec::new(); - - // Set up a filter/drain for each endpoint - config.endpoints.0.iter().for_each(|(url, verbosity)| { - let writer = TelemetryWriter::new(Arc::new(url.to_owned())); - let out_sync = writer.out.clone(); - out_syncs.push(out_sync); - - let until_verbosity = *verbosity; - let filter = slog::Filter( - slog_json::Json::default(writer).fuse(), - move |rec| { - let tag = rec.tag().parse::() - .expect("`telemetry!` macro requires tag."); - tag <= until_verbosity - }); - - let filter = Box::new(filter) as Box>; - endpoint_drains.push(filter); - }); - - // Set up logging to all endpoints - let drain = slog_async::Async::new(Multiply::new(endpoint_drains).fuse()); - let root = slog::Logger::root(drain.chan_size(CHANNEL_SIZE) - .overflow_strategy(slog_async::OverflowStrategy::DropAndReport) - .build().fuse(), o!() - ); - let logger_guard = slog_scope::set_global_logger(root); - - // Spawn a thread for each endpoint - let on_connect = Arc::new(config.on_connect); - config.endpoints.0.into_iter().for_each(|(url, verbosity)| { - let inner_verbosity = Arc::new(verbosity.to_owned()); - let inner_on_connect = Arc::clone(&on_connect); - - let out_sync = out_syncs.remove(0); - let out_sync = Arc::clone(&out_sync); - - thread::spawn(move || { - loop { - let on_connect = Arc::clone(&inner_on_connect); - let out_sync = Arc::clone(&out_sync); - let verbosity = Arc::clone(&inner_verbosity); - - trace!(target: "telemetry", + let mut endpoint_drains: Vec>> = Vec::new(); + let mut out_syncs = Vec::new(); + + // Set up a filter/drain for each endpoint + config.endpoints.0.iter().for_each(|(url, verbosity)| { + let writer = TelemetryWriter::new(Arc::new(url.to_owned())); + let out_sync = writer.out.clone(); + out_syncs.push(out_sync); + + let until_verbosity = *verbosity; + let filter = slog::Filter(slog_json::Json::default(writer).fuse(), move |rec| { + let tag = rec + .tag() + .parse::() + .expect("`telemetry!` macro requires tag."); + tag <= until_verbosity + }); + + let filter = Box::new(filter) as Box>; + endpoint_drains.push(filter); + }); + + // Set up logging to all endpoints + let drain = slog_async::Async::new(Multiply::new(endpoint_drains).fuse()); + let root = slog::Logger::root( + drain + .chan_size(CHANNEL_SIZE) + .overflow_strategy(slog_async::OverflowStrategy::DropAndReport) + .build() + .fuse(), + o!(), + ); + let logger_guard = slog_scope::set_global_logger(root); + + // Spawn a thread for each endpoint + let on_connect = Arc::new(config.on_connect); + config.endpoints.0.into_iter().for_each(|(url, verbosity)| { + let inner_verbosity = Arc::new(verbosity.to_owned()); + let inner_on_connect = Arc::clone(&on_connect); + + let out_sync = out_syncs.remove(0); + let out_sync = Arc::clone(&out_sync); + + thread::spawn(move || { + loop { + let on_connect = Arc::clone(&inner_on_connect); + let out_sync = Arc::clone(&out_sync); + let verbosity = Arc::clone(&inner_verbosity); + + trace!(target: "telemetry", "Connecting to Telemetry at {} with verbosity {}", url, Arc::clone(&verbosity)); - let _ = ws::connect(url.to_owned(), - |out| { - Connection::new(out, Arc::clone(&out_sync), Arc::clone(&on_connect), url.clone()) - }); - - // Sleep for a random time between 5-10 secs. If there are general connection - // issues not all threads should be synchronized in their re-connection time. - let random_sleep = thread_rng().gen_range(0, 5); - thread::sleep(time::Duration::from_secs(5) + time::Duration::from_secs(random_sleep)); - } - }); - }); - - return logger_guard; + let _ = ws::connect(url.to_owned(), |out| { + Connection::new( + out, + Arc::clone(&out_sync), + Arc::clone(&on_connect), + url.clone(), + ) + }); + + // Sleep for a random time between 5-10 secs. If there are general connection + // issues not all threads should be synchronized in their re-connection time. + let random_sleep = thread_rng().gen_range(0, 5); + thread::sleep( + time::Duration::from_secs(5) + time::Duration::from_secs(random_sleep), + ); + } + }); + }); + + return logger_guard; } /// Translates to `slog_scope::info`, but contains an additional verbosity @@ -172,118 +186,120 @@ macro_rules! telemetry { } struct Connection { - out: ws::Sender, - out_sync: Arc>>, - on_connect: Arc>, - url: String, + out: ws::Sender, + out_sync: Arc>>, + on_connect: Arc>, + url: String, } impl Connection { - fn new( - out: ws::Sender, - out_sync: Arc>>, - on_connect: Arc>, - url: String - ) -> Self { - Connection { - out, - out_sync, - on_connect, - url, - } - } + fn new( + out: ws::Sender, + out_sync: Arc>>, + on_connect: Arc>, + url: String, + ) -> Self { + Connection { + out, + out_sync, + on_connect, + url, + } + } } impl ws::Handler for Connection { - fn on_open(&mut self, _: ws::Handshake) -> ws::Result<()> { - trace!(target: "telemetry", "Connected to {}!", self.url); + fn on_open(&mut self, _: ws::Handshake) -> ws::Result<()> { + trace!(target: "telemetry", "Connected to {}!", self.url); - *self.out_sync.lock() = Some(self.out.clone()); - (self.on_connect)(); - Ok(()) - } + *self.out_sync.lock() = Some(self.out.clone()); + (self.on_connect)(); + Ok(()) + } fn on_close(&mut self, code: ws::CloseCode, reason: &str) { - *self.out_sync.lock() = None; + *self.out_sync.lock() = None; - trace!(target: "telemetry", "Connection to {} closing due to ({:?}) {}", + trace!(target: "telemetry", "Connection to {} closing due to ({:?}) {}", self.url, code, reason); } - fn on_error(&mut self, _: ws::Error) { - *self.out_sync.lock() = None; + fn on_error(&mut self, _: ws::Error) { + *self.out_sync.lock() = None; - // Sleep to ensure that reconnecting isn't spamming logs. - // This happens in it's own thread so it won't block anything. - thread::sleep(time::Duration::from_millis(1000)); - } + // Sleep to ensure that reconnecting isn't spamming logs. + // This happens in it's own thread so it won't block anything. + thread::sleep(time::Duration::from_millis(1000)); + } } struct TelemetryWriter { - buffer: Vec, - out: Arc>>, - url: Arc, + buffer: Vec, + out: Arc>>, + url: Arc, } impl TelemetryWriter { - fn new(url: Arc) -> Self { - let out = Arc::new(Mutex::new(None)); - - TelemetryWriter { - buffer: Vec::new(), - out, - url, - } - } + fn new(url: Arc) -> Self { + let out = Arc::new(Mutex::new(None)); + + TelemetryWriter { + buffer: Vec::new(), + out, + url, + } + } } impl io::Write for TelemetryWriter { - fn write(&mut self, msg: &[u8]) -> io::Result { - let mut iter = msg.split(|x| *x == b'\n'); - let first = iter.next().expect("Split iterator always has at least one element; qed"); - - self.buffer.extend_from_slice(first); - - // Flush for each occurrence of new line character - for continued in iter { - let _ = self.flush(); - self.buffer.extend_from_slice(continued); - } - - Ok(msg.len()) - } - - fn flush(&mut self) -> io::Result<()> { - if self.buffer.is_empty() { - return Ok(()); - } - if let Ok(s) = ::std::str::from_utf8(&self.buffer[..]) { - let mut out = self.out.lock(); - - let error = if let Some(ref mut o) = *out { - let r = o.send(s); - trace!(target: "telemetry", "Sent to telemetry {}: {} -> {:?}", self.url, s, r); - - r.is_err() - } else { - trace!(target: "telemetry", "Telemetry socket closed to {}, failed to send: {}", self.url, s); - false - }; - - if error { - *out = None; - } - } - self.buffer.clear(); - Ok(()) - } + fn write(&mut self, msg: &[u8]) -> io::Result { + let mut iter = msg.split(|x| *x == b'\n'); + let first = iter + .next() + .expect("Split iterator always has at least one element; qed"); + + self.buffer.extend_from_slice(first); + + // Flush for each occurrence of new line character + for continued in iter { + let _ = self.flush(); + self.buffer.extend_from_slice(continued); + } + + Ok(msg.len()) + } + + fn flush(&mut self) -> io::Result<()> { + if self.buffer.is_empty() { + return Ok(()); + } + if let Ok(s) = ::std::str::from_utf8(&self.buffer[..]) { + let mut out = self.out.lock(); + + let error = if let Some(ref mut o) = *out { + let r = o.send(s); + trace!(target: "telemetry", "Sent to telemetry {}: {} -> {:?}", self.url, s, r); + + r.is_err() + } else { + trace!(target: "telemetry", "Telemetry socket closed to {}, failed to send: {}", self.url, s); + false + }; + + if error { + *out = None; + } + } + self.buffer.clear(); + Ok(()) + } } #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct TelemetryEndpoints (Vec<(String, u8)>); +pub struct TelemetryEndpoints(Vec<(String, u8)>); impl TelemetryEndpoints { - pub fn new(endpoints: Vec<(String, u8)>) -> Self { - TelemetryEndpoints(endpoints) - } + pub fn new(endpoints: Vec<(String, u8)>) -> Self { + TelemetryEndpoints(endpoints) + } } diff --git a/core/test-client/src/block_builder_ext.rs b/core/test-client/src/block_builder_ext.rs index e427b57892..544fc4199c 100644 --- a/core/test-client/src/block_builder_ext.rs +++ b/core/test-client/src/block_builder_ext.rs @@ -16,30 +16,31 @@ //! Block Builder extensions for tests. -use client; use super::AccountKeyring; +use client; +use client::block_builder::api::BlockBuilder; use runtime; use runtime_primitives::traits::ProvideRuntimeApi; -use client::block_builder::api::BlockBuilder; /// Extension trait for test block builder. pub trait BlockBuilderExt { - /// Add transfer extrinsic to the block. - fn push_transfer(&mut self, transfer: runtime::Transfer) -> Result<(), client::error::Error>; + /// Add transfer extrinsic to the block. + fn push_transfer(&mut self, transfer: runtime::Transfer) -> Result<(), client::error::Error>; } -impl<'a, A> BlockBuilderExt for client::block_builder::BlockBuilder<'a, runtime::Block, A> where - A: ProvideRuntimeApi + client::blockchain::HeaderBackend + 'a, - A::Api: BlockBuilder +impl<'a, A> BlockBuilderExt for client::block_builder::BlockBuilder<'a, runtime::Block, A> +where + A: ProvideRuntimeApi + client::blockchain::HeaderBackend + 'a, + A::Api: BlockBuilder, { - fn push_transfer(&mut self, transfer: runtime::Transfer) -> Result<(), client::error::Error> { - self.push(sign_tx(transfer)) - } + fn push_transfer(&mut self, transfer: runtime::Transfer) -> Result<(), client::error::Error> { + self.push(sign_tx(transfer)) + } } fn sign_tx(transfer: runtime::Transfer) -> runtime::Extrinsic { - let signature = AccountKeyring::from_public(&transfer.from) - .unwrap() - .sign(&parity_codec::Encode::encode(&transfer)); - runtime::Extrinsic::Transfer(transfer, signature) + let signature = AccountKeyring::from_public(&transfer.from) + .unwrap() + .sign(&parity_codec::Encode::encode(&transfer)); + runtime::Extrinsic::Transfer(transfer, signature) } diff --git a/core/test-client/src/client_ext.rs b/core/test-client/src/client_ext.rs index 70e7feb078..c08076ac34 100644 --- a/core/test-client/src/client_ext.rs +++ b/core/test-client/src/client_ext.rs @@ -17,75 +17,89 @@ //! Client extension for tests. use client::{self, Client}; -use consensus::{ImportBlock, BlockImport, BlockOrigin, Error as ConsensusError, ForkChoiceStrategy}; -use runtime_primitives::Justification; -use runtime_primitives::generic::BlockId; +use consensus::{ + BlockImport, BlockOrigin, Error as ConsensusError, ForkChoiceStrategy, ImportBlock, +}; +use parity_codec::alloc::collections::hash_map::HashMap; use primitives::Blake2Hasher; use runtime; -use parity_codec::alloc::collections::hash_map::HashMap; +use runtime_primitives::generic::BlockId; +use runtime_primitives::Justification; /// Extension trait for a test client. pub trait TestClient: Sized { - /// Import block to the chain. No finality. - fn import(&self, origin: BlockOrigin, block: runtime::Block) - -> Result<(), ConsensusError>; + /// Import block to the chain. No finality. + fn import(&self, origin: BlockOrigin, block: runtime::Block) -> Result<(), ConsensusError>; - /// Import block with justification, finalizes block. - fn import_justified(&self, origin: BlockOrigin, block: runtime::Block, justification: Justification) - -> Result<(), ConsensusError>; + /// Import block with justification, finalizes block. + fn import_justified( + &self, + origin: BlockOrigin, + block: runtime::Block, + justification: Justification, + ) -> Result<(), ConsensusError>; - /// Finalize a block. - fn finalize_block(&self, id: BlockId, justification: Option) -> client::error::Result<()>; + /// Finalize a block. + fn finalize_block( + &self, + id: BlockId, + justification: Option, + ) -> client::error::Result<()>; - /// Returns hash of the genesis block. - fn genesis_hash(&self) -> runtime::Hash; + /// Returns hash of the genesis block. + fn genesis_hash(&self) -> runtime::Hash; } impl TestClient for Client - where - B: client::backend::Backend, - E: client::CallExecutor, - Self: BlockImport, +where + B: client::backend::Backend, + E: client::CallExecutor, + Self: BlockImport, { - fn import(&self, origin: BlockOrigin, block: runtime::Block) - -> Result<(), ConsensusError> - { - let import = ImportBlock { - origin, - header: block.header, - justification: None, - post_digests: vec![], - body: Some(block.extrinsics), - finalized: false, - auxiliary: Vec::new(), - fork_choice: ForkChoiceStrategy::LongestChain, - }; + fn import(&self, origin: BlockOrigin, block: runtime::Block) -> Result<(), ConsensusError> { + let import = ImportBlock { + origin, + header: block.header, + justification: None, + post_digests: vec![], + body: Some(block.extrinsics), + finalized: false, + auxiliary: Vec::new(), + fork_choice: ForkChoiceStrategy::LongestChain, + }; - self.import_block(import, HashMap::new()).map(|_| ()) - } + self.import_block(import, HashMap::new()).map(|_| ()) + } - fn import_justified(&self, origin: BlockOrigin, block: runtime::Block, justification: Justification) - -> Result<(), ConsensusError> - { - let import = ImportBlock { - origin, - header: block.header, - justification: Some(justification), - post_digests: vec![], - body: Some(block.extrinsics), - finalized: true, - auxiliary: Vec::new(), - fork_choice: ForkChoiceStrategy::LongestChain, - }; + fn import_justified( + &self, + origin: BlockOrigin, + block: runtime::Block, + justification: Justification, + ) -> Result<(), ConsensusError> { + let import = ImportBlock { + origin, + header: block.header, + justification: Some(justification), + post_digests: vec![], + body: Some(block.extrinsics), + finalized: true, + auxiliary: Vec::new(), + fork_choice: ForkChoiceStrategy::LongestChain, + }; - self.import_block(import, HashMap::new()).map(|_| ()) - } + self.import_block(import, HashMap::new()).map(|_| ()) + } - fn finalize_block(&self, id: BlockId, justification: Option) -> client::error::Result<()> { - self.finalize_block(id, justification, true) - } + fn finalize_block( + &self, + id: BlockId, + justification: Option, + ) -> client::error::Result<()> { + self.finalize_block(id, justification, true) + } - fn genesis_hash(&self) -> runtime::Hash { - self.block_hash(0).unwrap().unwrap() - } + fn genesis_hash(&self) -> runtime::Hash { + self.block_hash(0).unwrap().unwrap() + } } diff --git a/core/test-client/src/lib.rs b/core/test-client/src/lib.rs index 4a99df65a5..48284fe399 100644 --- a/core/test-client/src/lib.rs +++ b/core/test-client/src/lib.rs @@ -18,36 +18,36 @@ #![warn(missing_docs)] +mod block_builder_ext; pub mod client_ext; pub mod trait_tests; -mod block_builder_ext; -pub use client_ext::TestClient; pub use block_builder_ext::BlockBuilderExt; pub use client; -pub use client::ExecutionStrategies; -pub use client::blockchain; pub use client::backend; +pub use client::blockchain; +pub use client::ExecutionStrategies; +pub use client_ext::TestClient; +pub use consensus; pub use executor::NativeExecutor; +pub use keyring::{AccountKeyring, AuthorityKeyring}; pub use runtime; -pub use consensus; -pub use keyring::{AuthorityKeyring, AccountKeyring}; -use std::sync::Arc; +use client::LocalCallExecutor; use futures::future::FutureResult; use primitives::Blake2Hasher; +use runtime::genesismap::{additional_storage_with_genesis, GenesisConfig}; +use runtime_primitives::traits::{Block as BlockT, Hash as HashT, Header as HeaderT, NumberFor}; use runtime_primitives::StorageOverlay; -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Hash as HashT, NumberFor}; -use runtime::genesismap::{GenesisConfig, additional_storage_with_genesis}; use state_machine::ExecutionStrategy; -use client::LocalCallExecutor; +use std::sync::Arc; mod local_executor { - #![allow(missing_docs)] - use runtime; - use executor::native_executor_instance; - // FIXME #1576 change the macro and pass in the `BlakeHasher` that dispatch needs from here instead - native_executor_instance!( + #![allow(missing_docs)] + use executor::native_executor_instance; + use runtime; + // FIXME #1576 change the macro and pass in the `BlakeHasher` that dispatch needs from here instead + native_executor_instance!( pub LocalExecutor, runtime::api::dispatch, runtime::native_version, @@ -62,16 +62,13 @@ pub use local_executor::LocalExecutor; pub type Backend = client_db::Backend; /// Test client executor. -pub type Executor = client::LocalCallExecutor< - Backend, - executor::NativeExecutor, ->; +pub type Executor = client::LocalCallExecutor>; /// Test client light database backend. pub type LightBackend = client::light::backend::Backend< - client_db::light::LightStorage, - LightFetcher, - Blake2Hasher, + client_db::light::LightStorage, + LightFetcher, + Blake2Hasher, >; /// Test client light fetcher. @@ -79,143 +76,165 @@ pub struct LightFetcher; /// Test client light executor. pub type LightExecutor = client::light::call_executor::RemoteOrLocalCallExecutor< - runtime::Block, - LightBackend, - client::light::call_executor::RemoteCallExecutor< - client::light::blockchain::Blockchain< - client_db::light::LightStorage, - LightFetcher - >, - LightFetcher - >, - client::LocalCallExecutor< - client::light::backend::Backend< - client_db::light::LightStorage, - LightFetcher, - Blake2Hasher - >, - executor::NativeExecutor - > + runtime::Block, + LightBackend, + client::light::call_executor::RemoteCallExecutor< + client::light::blockchain::Blockchain< + client_db::light::LightStorage, + LightFetcher, + >, + LightFetcher, + >, + client::LocalCallExecutor< + client::light::backend::Backend< + client_db::light::LightStorage, + LightFetcher, + Blake2Hasher, + >, + executor::NativeExecutor, + >, >; /// Creates new client instance used for tests. pub fn new() -> client::Client { - new_with_backend(Arc::new(Backend::new_test(::std::u32::MAX, ::std::u64::MAX)), false) + new_with_backend( + Arc::new(Backend::new_test(::std::u32::MAX, ::std::u64::MAX)), + false, + ) } /// Creates new light client instance used for tests. -pub fn new_light() -> client::Client { - let storage = client_db::light::LightStorage::new_test(); - let blockchain = Arc::new(client::light::blockchain::Blockchain::new(storage)); - let backend = Arc::new(LightBackend::new(blockchain.clone())); - let executor = NativeExecutor::new(None); - let fetcher = Arc::new(LightFetcher); - let remote_call_executor = client::light::call_executor::RemoteCallExecutor::new(blockchain.clone(), fetcher); - let local_call_executor = client::LocalCallExecutor::new(backend.clone(), executor); - let call_executor = LightExecutor::new(backend.clone(), remote_call_executor, local_call_executor); - client::Client::new(backend, call_executor, genesis_storage(false), Default::default()).unwrap() +pub fn new_light( +) -> client::Client { + let storage = client_db::light::LightStorage::new_test(); + let blockchain = Arc::new(client::light::blockchain::Blockchain::new(storage)); + let backend = Arc::new(LightBackend::new(blockchain.clone())); + let executor = NativeExecutor::new(None); + let fetcher = Arc::new(LightFetcher); + let remote_call_executor = + client::light::call_executor::RemoteCallExecutor::new(blockchain.clone(), fetcher); + let local_call_executor = client::LocalCallExecutor::new(backend.clone(), executor); + let call_executor = + LightExecutor::new(backend.clone(), remote_call_executor, local_call_executor); + client::Client::new( + backend, + call_executor, + genesis_storage(false), + Default::default(), + ) + .unwrap() } /// Creates new client instance used for tests with the given api execution strategy. pub fn new_with_execution_strategy( - execution_strategy: ExecutionStrategy + execution_strategy: ExecutionStrategy, ) -> client::Client { - let backend = Arc::new(Backend::new_test(::std::u32::MAX, ::std::u64::MAX)); - let executor = NativeExecutor::new(None); - let executor = LocalCallExecutor::new(backend.clone(), executor); - - let execution_strategies = ExecutionStrategies { - syncing: execution_strategy, - importing: execution_strategy, - block_construction: execution_strategy, - offchain_worker: execution_strategy, - other: execution_strategy, - }; - - client::Client::new( - backend, - executor, - genesis_storage(false), - execution_strategies - ).expect("Creates new client") + let backend = Arc::new(Backend::new_test(::std::u32::MAX, ::std::u64::MAX)); + let executor = NativeExecutor::new(None); + let executor = LocalCallExecutor::new(backend.clone(), executor); + + let execution_strategies = ExecutionStrategies { + syncing: execution_strategy, + importing: execution_strategy, + block_construction: execution_strategy, + offchain_worker: execution_strategy, + other: execution_strategy, + }; + + client::Client::new( + backend, + executor, + genesis_storage(false), + execution_strategies, + ) + .expect("Creates new client") } /// Creates new test client instance that suports changes trie creation. -pub fn new_with_changes_trie() - -> client::Client -{ - new_with_backend(Arc::new(Backend::new_test(::std::u32::MAX, ::std::u64::MAX)), true) +pub fn new_with_changes_trie( +) -> client::Client { + new_with_backend( + Arc::new(Backend::new_test(::std::u32::MAX, ::std::u64::MAX)), + true, + ) } /// Creates new client instance used for tests with an explicitly provided backend. /// This is useful for testing backend implementations. pub fn new_with_backend( - backend: Arc, - support_changes_trie: bool + backend: Arc, + support_changes_trie: bool, ) -> client::Client< - B, - client::LocalCallExecutor>, - runtime::Block, - runtime::RuntimeApi -> where B: backend::LocalBackend + B, + client::LocalCallExecutor>, + runtime::Block, + runtime::RuntimeApi, +> +where + B: backend::LocalBackend, { - let executor = NativeExecutor::new(None); - client::new_with_backend(backend, executor, genesis_storage(support_changes_trie)).unwrap() + let executor = NativeExecutor::new(None); + client::new_with_backend(backend, executor, genesis_storage(support_changes_trie)).unwrap() } fn genesis_config(support_changes_trie: bool) -> GenesisConfig { - GenesisConfig::new(support_changes_trie, vec![ - AuthorityKeyring::Alice.into(), - AuthorityKeyring::Bob.into(), - AuthorityKeyring::Charlie.into(), - ], vec![ - AccountKeyring::Alice.into(), - AccountKeyring::Bob.into(), - AccountKeyring::Charlie.into(), - ], - 1000 - ) + GenesisConfig::new( + support_changes_trie, + vec![ + AuthorityKeyring::Alice.into(), + AuthorityKeyring::Bob.into(), + AuthorityKeyring::Charlie.into(), + ], + vec![ + AccountKeyring::Alice.into(), + AccountKeyring::Bob.into(), + AccountKeyring::Charlie.into(), + ], + 1000, + ) } fn genesis_storage(support_changes_trie: bool) -> StorageOverlay { - let mut storage = genesis_config(support_changes_trie).genesis_map(); - let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root(storage.clone().into_iter()); - let block: runtime::Block = client::genesis::construct_genesis_block(state_root); - storage.extend(additional_storage_with_genesis(&block)); - storage + let mut storage = genesis_config(support_changes_trie).genesis_map(); + let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( + storage.clone().into_iter(), + ); + let block: runtime::Block = client::genesis::construct_genesis_block(state_root); + storage.extend(additional_storage_with_genesis(&block)); + storage } impl client::light::fetcher::Fetcher for LightFetcher { - type RemoteHeaderResult = FutureResult; - type RemoteReadResult = FutureResult>, client::error::Error>; - type RemoteCallResult = FutureResult, client::error::Error>; - type RemoteChangesResult = FutureResult, u32)>, client::error::Error>; - - fn remote_header( - &self, - _request: client::light::fetcher::RemoteHeaderRequest, - ) -> Self::RemoteHeaderResult { - unimplemented!("not (yet) used in tests") - } - - fn remote_read( - &self, - _request: client::light::fetcher::RemoteReadRequest, - ) -> Self::RemoteReadResult { - unimplemented!("not (yet) used in tests") - } - - fn remote_call( - &self, - _request: client::light::fetcher::RemoteCallRequest, - ) -> Self::RemoteCallResult { - unimplemented!("not (yet) used in tests") - } - - fn remote_changes( - &self, - _request: client::light::fetcher::RemoteChangesRequest, - ) -> Self::RemoteChangesResult { - unimplemented!("not (yet) used in tests") - } + type RemoteHeaderResult = FutureResult; + type RemoteReadResult = FutureResult>, client::error::Error>; + type RemoteCallResult = FutureResult, client::error::Error>; + type RemoteChangesResult = FutureResult, u32)>, client::error::Error>; + + fn remote_header( + &self, + _request: client::light::fetcher::RemoteHeaderRequest, + ) -> Self::RemoteHeaderResult { + unimplemented!("not (yet) used in tests") + } + + fn remote_read( + &self, + _request: client::light::fetcher::RemoteReadRequest, + ) -> Self::RemoteReadResult { + unimplemented!("not (yet) used in tests") + } + + fn remote_call( + &self, + _request: client::light::fetcher::RemoteCallRequest, + ) -> Self::RemoteCallResult { + unimplemented!("not (yet) used in tests") + } + + fn remote_changes( + &self, + _request: client::light::fetcher::RemoteChangesRequest, + ) -> Self::RemoteChangesResult { + unimplemented!("not (yet) used in tests") + } } diff --git a/core/test-client/src/trait_tests.rs b/core/test-client/src/trait_tests.rs index aa51f7d8bf..8849c6d834 100644 --- a/core/test-client/src/trait_tests.rs +++ b/core/test-client/src/trait_tests.rs @@ -19,312 +19,476 @@ #![allow(missing_docs)] -use std::sync::Arc; -use consensus::BlockOrigin; -use primitives::Blake2Hasher; -use crate::{TestClient, AccountKeyring}; -use runtime_primitives::traits::Block as BlockT; use crate::backend; use crate::blockchain::{Backend as BlockChainBackendT, HeaderBackend}; -use crate::{BlockBuilderExt, new_with_backend}; +use crate::{new_with_backend, BlockBuilderExt}; +use crate::{AccountKeyring, TestClient}; +use consensus::BlockOrigin; +use primitives::Blake2Hasher; use runtime::{self, Transfer}; use runtime_primitives::generic::BlockId; +use runtime_primitives::traits::Block as BlockT; +use std::sync::Arc; /// helper to test the `leaves` implementation for various backends -pub fn test_leaves_for_backend(backend: Arc) where - B: backend::LocalBackend, +pub fn test_leaves_for_backend(backend: Arc) +where + B: backend::LocalBackend, { - // block tree: - // G -> A1 -> A2 -> A3 -> A4 -> A5 - // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 - - let client = new_with_backend(backend.clone(), false); - - let genesis_hash = client.info().unwrap().chain.genesis_hash; - - assert_eq!( - client.backend().blockchain().leaves().unwrap(), - vec![genesis_hash]); - - // G -> A1 - let a1 = client.new_block().unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - assert_eq!( - backend.blockchain().leaves().unwrap(), - vec![a1.hash()]); - - // A1 -> A2 - let a2 = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - assert_eq!( - client.backend().blockchain().leaves().unwrap(), - vec![a2.hash()]); - - // A2 -> A3 - let a3 = client.new_block_at(&BlockId::Hash(a2.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a3.clone()).unwrap(); - assert_eq!( - backend.blockchain().leaves().unwrap(), - vec![a3.hash()]); - - // A3 -> A4 - let a4 = client.new_block_at(&BlockId::Hash(a3.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a4.clone()).unwrap(); - assert_eq!( - backend.blockchain().leaves().unwrap(), - vec![a4.hash()]); - - // A4 -> A5 - let a5 = client.new_block_at(&BlockId::Hash(a4.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a5.clone()).unwrap(); - assert_eq!( - backend.blockchain().leaves().unwrap(), - vec![a5.hash()]); - - // A1 -> B2 - let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap(); - // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); - let b2 = builder.bake().unwrap(); - client.import(BlockOrigin::Own, b2.clone()).unwrap(); - assert_eq!( - backend.blockchain().leaves().unwrap(), - vec![a5.hash(), b2.hash()]); - - // B2 -> B3 - let b3 = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, b3.clone()).unwrap(); - assert_eq!( - backend.blockchain().leaves().unwrap(), - vec![a5.hash(), b3.hash()]); - - // B3 -> B4 - let b4 = client.new_block_at(&BlockId::Hash(b3.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, b4.clone()).unwrap(); - assert_eq!( - backend.blockchain().leaves().unwrap(), - vec![a5.hash(), b4.hash()]); - - // // B2 -> C3 - let mut builder = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap(); - // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); - let c3 = builder.bake().unwrap(); - client.import(BlockOrigin::Own, c3.clone()).unwrap(); - assert_eq!( - backend.blockchain().leaves().unwrap(), - vec![a5.hash(), b4.hash(), c3.hash()]); - - // A1 -> D2 - let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap(); - // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); - let d2 = builder.bake().unwrap(); - client.import(BlockOrigin::Own, d2.clone()).unwrap(); - assert_eq!( - backend.blockchain().leaves().unwrap(), - vec![a5.hash(), b4.hash(), c3.hash(), d2.hash()]); + // block tree: + // G -> A1 -> A2 -> A3 -> A4 -> A5 + // A1 -> B2 -> B3 -> B4 + // B2 -> C3 + // A1 -> D2 + + let client = new_with_backend(backend.clone(), false); + + let genesis_hash = client.info().unwrap().chain.genesis_hash; + + assert_eq!( + client.backend().blockchain().leaves().unwrap(), + vec![genesis_hash] + ); + + // G -> A1 + let a1 = client.new_block().unwrap().bake().unwrap(); + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + assert_eq!(backend.blockchain().leaves().unwrap(), vec![a1.hash()]); + + // A1 -> A2 + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + assert_eq!( + client.backend().blockchain().leaves().unwrap(), + vec![a2.hash()] + ); + + // A2 -> A3 + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a3.clone()).unwrap(); + assert_eq!(backend.blockchain().leaves().unwrap(), vec![a3.hash()]); + + // A3 -> A4 + let a4 = client + .new_block_at(&BlockId::Hash(a3.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a4.clone()).unwrap(); + assert_eq!(backend.blockchain().leaves().unwrap(), vec![a4.hash()]); + + // A4 -> A5 + let a5 = client + .new_block_at(&BlockId::Hash(a4.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a5.clone()).unwrap(); + assert_eq!(backend.blockchain().leaves().unwrap(), vec![a5.hash()]); + + // A1 -> B2 + let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap(); + // this push is required as otherwise B2 has the same hash as A2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); + let b2 = builder.bake().unwrap(); + client.import(BlockOrigin::Own, b2.clone()).unwrap(); + assert_eq!( + backend.blockchain().leaves().unwrap(), + vec![a5.hash(), b2.hash()] + ); + + // B2 -> B3 + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, b3.clone()).unwrap(); + assert_eq!( + backend.blockchain().leaves().unwrap(), + vec![a5.hash(), b3.hash()] + ); + + // B3 -> B4 + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, b4.clone()).unwrap(); + assert_eq!( + backend.blockchain().leaves().unwrap(), + vec![a5.hash(), b4.hash()] + ); + + // // B2 -> C3 + let mut builder = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap(); + // this push is required as otherwise C3 has the same hash as B3 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }) + .unwrap(); + let c3 = builder.bake().unwrap(); + client.import(BlockOrigin::Own, c3.clone()).unwrap(); + assert_eq!( + backend.blockchain().leaves().unwrap(), + vec![a5.hash(), b4.hash(), c3.hash()] + ); + + // A1 -> D2 + let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap(); + // this push is required as otherwise D2 has the same hash as B2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); + let d2 = builder.bake().unwrap(); + client.import(BlockOrigin::Own, d2.clone()).unwrap(); + assert_eq!( + backend.blockchain().leaves().unwrap(), + vec![a5.hash(), b4.hash(), c3.hash(), d2.hash()] + ); } /// helper to test the `children` implementation for various backends -pub fn test_children_for_backend(backend: Arc) where - B: backend::LocalBackend, +pub fn test_children_for_backend(backend: Arc) +where + B: backend::LocalBackend, { - // block tree: - // G -> A1 -> A2 -> A3 -> A4 -> A5 - // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 - - let client = new_with_backend(backend.clone(), false); - - // G -> A1 - let a1 = client.new_block().unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - // A2 -> A3 - let a3 = client.new_block_at(&BlockId::Hash(a2.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a3.clone()).unwrap(); - - // A3 -> A4 - let a4 = client.new_block_at(&BlockId::Hash(a3.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a4.clone()).unwrap(); - - // A4 -> A5 - let a5 = client.new_block_at(&BlockId::Hash(a4.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a5.clone()).unwrap(); - - // A1 -> B2 - let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap(); - // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); - let b2 = builder.bake().unwrap(); - client.import(BlockOrigin::Own, b2.clone()).unwrap(); - - // B2 -> B3 - let b3 = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, b3.clone()).unwrap(); - - // B3 -> B4 - let b4 = client.new_block_at(&BlockId::Hash(b3.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, b4.clone()).unwrap(); - - // // B2 -> C3 - let mut builder = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap(); - // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); - let c3 = builder.bake().unwrap(); - client.import(BlockOrigin::Own, c3.clone()).unwrap(); - - // A1 -> D2 - let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap(); - // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); - let d2 = builder.bake().unwrap(); - client.import(BlockOrigin::Own, d2.clone()).unwrap(); - - let genesis_hash = client.info().unwrap().chain.genesis_hash; - - let children1 = backend.blockchain().children(a4.hash()).unwrap(); - assert_eq!(vec![a5.hash()], children1); - - let children2 = backend.blockchain().children(a1.hash()).unwrap(); - assert_eq!(vec![a2.hash(), b2.hash(), d2.hash()], children2); - - let children3 = backend.blockchain().children(genesis_hash).unwrap(); - assert_eq!(vec![a1.hash()], children3); - - let children4 = backend.blockchain().children(b2.hash()).unwrap(); - assert_eq!(vec![b3.hash(), c3.hash()], children4); + // block tree: + // G -> A1 -> A2 -> A3 -> A4 -> A5 + // A1 -> B2 -> B3 -> B4 + // B2 -> C3 + // A1 -> D2 + + let client = new_with_backend(backend.clone(), false); + + // G -> A1 + let a1 = client.new_block().unwrap().bake().unwrap(); + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + // A2 -> A3 + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a3.clone()).unwrap(); + + // A3 -> A4 + let a4 = client + .new_block_at(&BlockId::Hash(a3.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a4.clone()).unwrap(); + + // A4 -> A5 + let a5 = client + .new_block_at(&BlockId::Hash(a4.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a5.clone()).unwrap(); + + // A1 -> B2 + let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap(); + // this push is required as otherwise B2 has the same hash as A2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); + let b2 = builder.bake().unwrap(); + client.import(BlockOrigin::Own, b2.clone()).unwrap(); + + // B2 -> B3 + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, b3.clone()).unwrap(); + + // B3 -> B4 + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, b4.clone()).unwrap(); + + // // B2 -> C3 + let mut builder = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap(); + // this push is required as otherwise C3 has the same hash as B3 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }) + .unwrap(); + let c3 = builder.bake().unwrap(); + client.import(BlockOrigin::Own, c3.clone()).unwrap(); + + // A1 -> D2 + let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap(); + // this push is required as otherwise D2 has the same hash as B2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); + let d2 = builder.bake().unwrap(); + client.import(BlockOrigin::Own, d2.clone()).unwrap(); + + let genesis_hash = client.info().unwrap().chain.genesis_hash; + + let children1 = backend.blockchain().children(a4.hash()).unwrap(); + assert_eq!(vec![a5.hash()], children1); + + let children2 = backend.blockchain().children(a1.hash()).unwrap(); + assert_eq!(vec![a2.hash(), b2.hash(), d2.hash()], children2); + + let children3 = backend.blockchain().children(genesis_hash).unwrap(); + assert_eq!(vec![a1.hash()], children3); + + let children4 = backend.blockchain().children(b2.hash()).unwrap(); + assert_eq!(vec![b3.hash(), c3.hash()], children4); } -pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc) where - B: backend::LocalBackend, +pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc) +where + B: backend::LocalBackend, { - // block tree: - // G -> A1 -> A2 -> A3 -> A4 -> A5 - // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 - let client = new_with_backend(backend, false); - - // G -> A1 - let a1 = client.new_block().unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - // A2 -> A3 - let a3 = client.new_block_at(&BlockId::Hash(a2.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a3.clone()).unwrap(); - - // A3 -> A4 - let a4 = client.new_block_at(&BlockId::Hash(a3.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a4.clone()).unwrap(); - - // A4 -> A5 - let a5 = client.new_block_at(&BlockId::Hash(a4.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, a5.clone()).unwrap(); - - // A1 -> B2 - let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap(); - // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); - let b2 = builder.bake().unwrap(); - client.import(BlockOrigin::Own, b2.clone()).unwrap(); - - // B2 -> B3 - let b3 = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, b3.clone()).unwrap(); - - // B3 -> B4 - let b4 = client.new_block_at(&BlockId::Hash(b3.hash())).unwrap().bake().unwrap(); - client.import(BlockOrigin::Own, b4.clone()).unwrap(); - - // // B2 -> C3 - let mut builder = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap(); - // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); - let c3 = builder.bake().unwrap(); - client.import(BlockOrigin::Own, c3.clone()).unwrap(); - - // A1 -> D2 - let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap(); - // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); - let d2 = builder.bake().unwrap(); - client.import(BlockOrigin::Own, d2.clone()).unwrap(); - - let genesis_hash = client.info().unwrap().chain.genesis_hash; - - assert_eq!(client.backend().blockchain().header(BlockId::Number(0)).unwrap().unwrap().hash(), genesis_hash); - assert_eq!(client.backend().blockchain().hash(0).unwrap().unwrap(), genesis_hash); - - assert_eq!(client.backend().blockchain().header(BlockId::Number(1)).unwrap().unwrap().hash(), a1.hash()); - assert_eq!(client.backend().blockchain().hash(1).unwrap().unwrap(), a1.hash()); - - assert_eq!(client.backend().blockchain().header(BlockId::Number(2)).unwrap().unwrap().hash(), a2.hash()); - assert_eq!(client.backend().blockchain().hash(2).unwrap().unwrap(), a2.hash()); - - assert_eq!(client.backend().blockchain().header(BlockId::Number(3)).unwrap().unwrap().hash(), a3.hash()); - assert_eq!(client.backend().blockchain().hash(3).unwrap().unwrap(), a3.hash()); - - assert_eq!(client.backend().blockchain().header(BlockId::Number(4)).unwrap().unwrap().hash(), a4.hash()); - assert_eq!(client.backend().blockchain().hash(4).unwrap().unwrap(), a4.hash()); - - assert_eq!(client.backend().blockchain().header(BlockId::Number(5)).unwrap().unwrap().hash(), a5.hash()); - assert_eq!(client.backend().blockchain().hash(5).unwrap().unwrap(), a5.hash()); + // block tree: + // G -> A1 -> A2 -> A3 -> A4 -> A5 + // A1 -> B2 -> B3 -> B4 + // B2 -> C3 + // A1 -> D2 + let client = new_with_backend(backend, false); + + // G -> A1 + let a1 = client.new_block().unwrap().bake().unwrap(); + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + // A2 -> A3 + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a3.clone()).unwrap(); + + // A3 -> A4 + let a4 = client + .new_block_at(&BlockId::Hash(a3.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a4.clone()).unwrap(); + + // A4 -> A5 + let a5 = client + .new_block_at(&BlockId::Hash(a4.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, a5.clone()).unwrap(); + + // A1 -> B2 + let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap(); + // this push is required as otherwise B2 has the same hash as A2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); + let b2 = builder.bake().unwrap(); + client.import(BlockOrigin::Own, b2.clone()).unwrap(); + + // B2 -> B3 + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, b3.clone()).unwrap(); + + // B3 -> B4 + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash())) + .unwrap() + .bake() + .unwrap(); + client.import(BlockOrigin::Own, b4.clone()).unwrap(); + + // // B2 -> C3 + let mut builder = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap(); + // this push is required as otherwise C3 has the same hash as B3 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }) + .unwrap(); + let c3 = builder.bake().unwrap(); + client.import(BlockOrigin::Own, c3.clone()).unwrap(); + + // A1 -> D2 + let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap(); + // this push is required as otherwise D2 has the same hash as B2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); + let d2 = builder.bake().unwrap(); + client.import(BlockOrigin::Own, d2.clone()).unwrap(); + + let genesis_hash = client.info().unwrap().chain.genesis_hash; + + assert_eq!( + client + .backend() + .blockchain() + .header(BlockId::Number(0)) + .unwrap() + .unwrap() + .hash(), + genesis_hash + ); + assert_eq!( + client.backend().blockchain().hash(0).unwrap().unwrap(), + genesis_hash + ); + + assert_eq!( + client + .backend() + .blockchain() + .header(BlockId::Number(1)) + .unwrap() + .unwrap() + .hash(), + a1.hash() + ); + assert_eq!( + client.backend().blockchain().hash(1).unwrap().unwrap(), + a1.hash() + ); + + assert_eq!( + client + .backend() + .blockchain() + .header(BlockId::Number(2)) + .unwrap() + .unwrap() + .hash(), + a2.hash() + ); + assert_eq!( + client.backend().blockchain().hash(2).unwrap().unwrap(), + a2.hash() + ); + + assert_eq!( + client + .backend() + .blockchain() + .header(BlockId::Number(3)) + .unwrap() + .unwrap() + .hash(), + a3.hash() + ); + assert_eq!( + client.backend().blockchain().hash(3).unwrap().unwrap(), + a3.hash() + ); + + assert_eq!( + client + .backend() + .blockchain() + .header(BlockId::Number(4)) + .unwrap() + .unwrap() + .hash(), + a4.hash() + ); + assert_eq!( + client.backend().blockchain().hash(4).unwrap().unwrap(), + a4.hash() + ); + + assert_eq!( + client + .backend() + .blockchain() + .header(BlockId::Number(5)) + .unwrap() + .unwrap() + .hash(), + a5.hash() + ); + assert_eq!( + client.backend().blockchain().hash(5).unwrap().unwrap(), + a5.hash() + ); } diff --git a/core/test-runtime/src/genesismap.rs b/core/test-runtime/src/genesismap.rs index 13e9e5ec9a..5dfa38d63a 100644 --- a/core/test-runtime/src/genesismap.rs +++ b/core/test-runtime/src/genesismap.rs @@ -16,57 +16,80 @@ //! Tool for creating the genesis block. -use std::collections::HashMap; -use runtime_io::twox_128; use super::AccountId; -use parity_codec::{Encode, KeyedVec, Joiner}; -use primitives::{ChangesTrieConfiguration, map, storage::well_known_keys}; -use runtime_primitives::traits::Block; +use parity_codec::{Encode, Joiner, KeyedVec}; use primitives::ed25519::Public as AuthorityId; +use primitives::{map, storage::well_known_keys, ChangesTrieConfiguration}; +use runtime_io::twox_128; +use runtime_primitives::traits::Block; +use std::collections::HashMap; /// Configuration of a general Substrate test genesis block. pub struct GenesisConfig { - pub changes_trie_config: Option, - pub authorities: Vec, - pub balances: Vec<(AccountId, u64)>, + pub changes_trie_config: Option, + pub authorities: Vec, + pub balances: Vec<(AccountId, u64)>, } impl GenesisConfig { - pub fn new(support_changes_trie: bool, authorities: Vec, endowed_accounts: Vec, balance: u64) -> Self { - GenesisConfig { - changes_trie_config: match support_changes_trie { - true => Some(super::changes_trie_config()), - false => None, - }, - authorities: authorities.clone(), - balances: endowed_accounts.into_iter().map(|a| (a, balance)).collect(), - } - } + pub fn new( + support_changes_trie: bool, + authorities: Vec, + endowed_accounts: Vec, + balance: u64, + ) -> Self { + GenesisConfig { + changes_trie_config: match support_changes_trie { + true => Some(super::changes_trie_config()), + false => None, + }, + authorities: authorities.clone(), + balances: endowed_accounts.into_iter().map(|a| (a, balance)).collect(), + } + } - pub fn genesis_map(&self) -> HashMap, Vec> { - let wasm_runtime = include_bytes!("../wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm").to_vec(); - let mut map: HashMap, Vec> = self.balances.iter() - .map(|&(ref account, balance)| (account.to_keyed_vec(b"balance:"), vec![].and(&balance))) - .map(|(k, v)| (twox_128(&k[..])[..].to_vec(), v.to_vec())) - .chain(vec![ - (well_known_keys::CODE.into(), wasm_runtime), - (well_known_keys::HEAP_PAGES.into(), vec![].and(&(16 as u64))), - (well_known_keys::AUTHORITY_COUNT.into(), vec![].and(&(self.authorities.len() as u32))), - ].into_iter()) - .chain(self.authorities.iter() - .enumerate() - .map(|(i, account)| ((i as u32).to_keyed_vec(well_known_keys::AUTHORITY_PREFIX), vec![].and(account))) - ) - .collect(); - if let Some(ref changes_trie_config) = self.changes_trie_config { - map.insert(well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), changes_trie_config.encode()); - } - map - } + pub fn genesis_map(&self) -> HashMap, Vec> { + let wasm_runtime = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm" + ) + .to_vec(); + let mut map: HashMap, Vec> = self + .balances + .iter() + .map(|&(ref account, balance)| { + (account.to_keyed_vec(b"balance:"), vec![].and(&balance)) + }) + .map(|(k, v)| (twox_128(&k[..])[..].to_vec(), v.to_vec())) + .chain( + vec![ + (well_known_keys::CODE.into(), wasm_runtime), + (well_known_keys::HEAP_PAGES.into(), vec![].and(&(16 as u64))), + ( + well_known_keys::AUTHORITY_COUNT.into(), + vec![].and(&(self.authorities.len() as u32)), + ), + ] + .into_iter(), + ) + .chain(self.authorities.iter().enumerate().map(|(i, account)| { + ( + (i as u32).to_keyed_vec(well_known_keys::AUTHORITY_PREFIX), + vec![].and(account), + ) + })) + .collect(); + if let Some(ref changes_trie_config) = self.changes_trie_config { + map.insert( + well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), + changes_trie_config.encode(), + ); + } + map + } } pub fn additional_storage_with_genesis(genesis_block: &crate::Block) -> HashMap, Vec> { - map![ - twox_128(&b"latest"[..]).to_vec() => genesis_block.hash().as_fixed_bytes().to_vec() - ] + map![ + twox_128(&b"latest"[..]).to_vec() => genesis_block.hash().as_fixed_bytes().to_vec() + ] } diff --git a/core/test-runtime/src/lib.rs b/core/test-runtime/src/lib.rs index 028bb7ba52..2c6b6b7e78 100644 --- a/core/test-runtime/src/lib.rs +++ b/core/test-runtime/src/lib.rs @@ -22,120 +22,125 @@ pub mod genesismap; pub mod system; -use rstd::{prelude::*, marker::PhantomData}; -use parity_codec::{Encode, Decode, Input}; +use parity_codec::{Decode, Encode, Input}; +use rstd::{marker::PhantomData, prelude::*}; -use substrate_client::{ - runtime_api as client_api, block_builder::api as block_builder_api, decl_runtime_apis, - impl_runtime_apis, -}; -use runtime_primitives::{ - ApplyResult, transaction_validity::TransactionValidity, - create_runtime_str, - traits::{ - BlindCheckable, BlakeTwo256, Block as BlockT, Extrinsic as ExtrinsicT, - GetNodeBlockType, GetRuntimeBlockType, AuthorityIdFor, - }, -}; -use runtime_version::RuntimeVersion; +use cfg_if::cfg_if; +use inherents::{CheckInherentsResult, InherentData}; pub use primitives::hash::H256; use primitives::{ed25519, sr25519, OpaqueMetadata}; +use runtime_primitives::{ + create_runtime_str, + traits::{ + AuthorityIdFor, BlakeTwo256, BlindCheckable, Block as BlockT, Extrinsic as ExtrinsicT, + GetNodeBlockType, GetRuntimeBlockType, + }, + transaction_validity::TransactionValidity, + ApplyResult, +}; #[cfg(any(feature = "std", test))] use runtime_version::NativeVersion; -use inherents::{CheckInherentsResult, InherentData}; -use cfg_if::cfg_if; +use runtime_version::RuntimeVersion; +use substrate_client::{ + block_builder::api as block_builder_api, decl_runtime_apis, impl_runtime_apis, + runtime_api as client_api, +}; /// Test runtime version. pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("test"), - impl_name: create_runtime_str!("parity-test"), - authoring_version: 1, - spec_version: 1, - impl_version: 1, - apis: RUNTIME_API_VERSIONS, + spec_name: create_runtime_str!("test"), + impl_name: create_runtime_str!("parity-test"), + authoring_version: 1, + spec_version: 1, + impl_version: 1, + apis: RUNTIME_API_VERSIONS, }; fn version() -> RuntimeVersion { - VERSION + VERSION } /// Native version. #[cfg(any(feature = "std", test))] pub fn native_version() -> NativeVersion { - NativeVersion { - runtime_version: VERSION, - can_author_with: Default::default(), - } + NativeVersion { + runtime_version: VERSION, + can_author_with: Default::default(), + } } /// Calls in transactions. #[derive(Clone, PartialEq, Eq, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug))] pub struct Transfer { - pub from: AccountId, - pub to: AccountId, - pub amount: u64, - pub nonce: u64, + pub from: AccountId, + pub to: AccountId, + pub amount: u64, + pub nonce: u64, } impl Transfer { - /// Convert into a signed extrinsic. - #[cfg(feature = "std")] - pub fn into_signed_tx(self) -> Extrinsic { - let signature = keyring::AccountKeyring::from_public(&self.from) - .expect("Creates keyring from public key.").sign(&self.encode()).into(); - Extrinsic::Transfer(self, signature) - } + /// Convert into a signed extrinsic. + #[cfg(feature = "std")] + pub fn into_signed_tx(self) -> Extrinsic { + let signature = keyring::AccountKeyring::from_public(&self.from) + .expect("Creates keyring from public key.") + .sign(&self.encode()) + .into(); + Extrinsic::Transfer(self, signature) + } } /// Extrinsic for test-runtime. #[derive(Clone, PartialEq, Eq, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug))] pub enum Extrinsic { - AuthoritiesChange(Vec), - Transfer(Transfer, AccountSignature), - IncludeData(Vec), + AuthoritiesChange(Vec), + Transfer(Transfer, AccountSignature), + IncludeData(Vec), } #[cfg(feature = "std")] -impl serde::Serialize for Extrinsic -{ - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { - self.using_encoded(|bytes| seq.serialize_bytes(bytes)) - } +impl serde::Serialize for Extrinsic { + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { + self.using_encoded(|bytes| seq.serialize_bytes(bytes)) + } } impl BlindCheckable for Extrinsic { - type Checked = Self; - - fn check(self) -> Result { - match self { - Extrinsic::AuthoritiesChange(new_auth) => Ok(Extrinsic::AuthoritiesChange(new_auth)), - Extrinsic::Transfer(transfer, signature) => { - if runtime_primitives::verify_encoded_lazy(&signature, &transfer, &transfer.from) { - Ok(Extrinsic::Transfer(transfer, signature)) - } else { - Err(runtime_primitives::BAD_SIGNATURE) - } - }, - Extrinsic::IncludeData(data) => Ok(Extrinsic::IncludeData(data)), - } - } + type Checked = Self; + + fn check(self) -> Result { + match self { + Extrinsic::AuthoritiesChange(new_auth) => Ok(Extrinsic::AuthoritiesChange(new_auth)), + Extrinsic::Transfer(transfer, signature) => { + if runtime_primitives::verify_encoded_lazy(&signature, &transfer, &transfer.from) { + Ok(Extrinsic::Transfer(transfer, signature)) + } else { + Err(runtime_primitives::BAD_SIGNATURE) + } + } + Extrinsic::IncludeData(data) => Ok(Extrinsic::IncludeData(data)), + } + } } impl ExtrinsicT for Extrinsic { - fn is_signed(&self) -> Option { - Some(true) - } + fn is_signed(&self) -> Option { + Some(true) + } } impl Extrinsic { - pub fn transfer(&self) -> &Transfer { - match self { - Extrinsic::Transfer(ref transfer, _) => transfer, - _ => panic!("cannot convert to transfer ref"), - } - } + pub fn transfer(&self) -> &Transfer { + match self { + Extrinsic::Transfer(ref transfer, _) => transfer, + _ => panic!("cannot convert to transfer ref"), + } + } } // The identity type used by authorities. @@ -153,7 +158,8 @@ pub type BlockNumber = u64; /// Index of a transaction. pub type Index = u64; /// The item of a block digest. -pub type DigestItem = runtime_primitives::generic::DigestItem; +pub type DigestItem = + runtime_primitives::generic::DigestItem; /// The digest of a block. pub type Digest = runtime_primitives::generic::Digest; /// A test block. @@ -163,343 +169,348 @@ pub type Header = runtime_primitives::generic::Header Vec { - use runtime_io::print; - - print("run_tests..."); - let block = Block::decode(&mut input).unwrap(); - print("deserialized block."); - let stxs = block.extrinsics.iter().map(Encode::encode).collect::>(); - print("reserialized transactions."); - [stxs.len() as u8].encode() + use runtime_io::print; + + print("run_tests..."); + let block = Block::decode(&mut input).unwrap(); + print("deserialized block."); + let stxs = block + .extrinsics + .iter() + .map(Encode::encode) + .collect::>(); + print("reserialized transactions."); + [stxs.len() as u8].encode() } /// Changes trie configuration (optionally) used in tests. pub fn changes_trie_config() -> primitives::ChangesTrieConfiguration { - primitives::ChangesTrieConfiguration { - digest_interval: 4, - digest_levels: 2, - } + primitives::ChangesTrieConfiguration { + digest_interval: 4, + digest_levels: 2, + } } /// A type that can not be decoded. #[derive(PartialEq)] pub struct DecodeFails { - _phantom: PhantomData, + _phantom: PhantomData, } impl Encode for DecodeFails { - fn encode(&self) -> Vec { - Vec::new() - } + fn encode(&self) -> Vec { + Vec::new() + } } impl DecodeFails { - /// Create a new instance. - pub fn new() -> DecodeFails { - DecodeFails { - _phantom: Default::default(), - } - } + /// Create a new instance. + pub fn new() -> DecodeFails { + DecodeFails { + _phantom: Default::default(), + } + } } impl Decode for DecodeFails { - fn decode(_: &mut I) -> Option { - // decoding always fails - None - } + fn decode(_: &mut I) -> Option { + // decoding always fails + None + } } cfg_if! { - if #[cfg(feature = "std")] { - decl_runtime_apis! { - #[api_version(2)] - pub trait TestAPI { - /// Return the balance of the given account id. - fn balance_of(id: AccountId) -> u64; - /// A benchmark function that adds one to the given value and returns the result. - fn benchmark_add_one(val: &u64) -> u64; - /// A benchmark function that adds one to each value in the given vector and returns the - /// result. - fn benchmark_vector_add_one(vec: &Vec) -> Vec; - /// A function that always fails to convert a parameter between runtime and node. - fn fail_convert_parameter(param: DecodeFails); - /// A function that always fails to convert its return value between runtime and node. - fn fail_convert_return_value() -> DecodeFails; - /// A function for that the signature changed in version `2`. - #[changed_in(2)] - fn function_signature_changed() -> Vec; - /// The new signature. - fn function_signature_changed() -> u64; - fn fail_on_native() -> u64; - fn fail_on_wasm() -> u64; - fn benchmark_indirect_call() -> u64; - fn benchmark_direct_call() -> u64; - } - } - } else { - decl_runtime_apis! { - pub trait TestAPI { - /// Return the balance of the given account id. - fn balance_of(id: AccountId) -> u64; - /// A benchmark function that adds one to the given value and returns the result. - fn benchmark_add_one(val: &u64) -> u64; - /// A benchmark function that adds one to each value in the given vector and returns the - /// result. - fn benchmark_vector_add_one(vec: &Vec) -> Vec; - /// A function that always fails to convert a parameter between runtime and node. - fn fail_convert_parameter(param: DecodeFails); - /// A function that always fails to convert its return value between runtime and node. - fn fail_convert_return_value() -> DecodeFails; - /// In wasm we just emulate the old behavior. - fn function_signature_changed() -> Vec; - fn fail_on_native() -> u64; - fn fail_on_wasm() -> u64; - fn benchmark_indirect_call() -> u64; - fn benchmark_direct_call() -> u64; - } - } - } + if #[cfg(feature = "std")] { + decl_runtime_apis! { + #[api_version(2)] + pub trait TestAPI { + /// Return the balance of the given account id. + fn balance_of(id: AccountId) -> u64; + /// A benchmark function that adds one to the given value and returns the result. + fn benchmark_add_one(val: &u64) -> u64; + /// A benchmark function that adds one to each value in the given vector and returns the + /// result. + fn benchmark_vector_add_one(vec: &Vec) -> Vec; + /// A function that always fails to convert a parameter between runtime and node. + fn fail_convert_parameter(param: DecodeFails); + /// A function that always fails to convert its return value between runtime and node. + fn fail_convert_return_value() -> DecodeFails; + /// A function for that the signature changed in version `2`. + #[changed_in(2)] + fn function_signature_changed() -> Vec; + /// The new signature. + fn function_signature_changed() -> u64; + fn fail_on_native() -> u64; + fn fail_on_wasm() -> u64; + fn benchmark_indirect_call() -> u64; + fn benchmark_direct_call() -> u64; + } + } + } else { + decl_runtime_apis! { + pub trait TestAPI { + /// Return the balance of the given account id. + fn balance_of(id: AccountId) -> u64; + /// A benchmark function that adds one to the given value and returns the result. + fn benchmark_add_one(val: &u64) -> u64; + /// A benchmark function that adds one to each value in the given vector and returns the + /// result. + fn benchmark_vector_add_one(vec: &Vec) -> Vec; + /// A function that always fails to convert a parameter between runtime and node. + fn fail_convert_parameter(param: DecodeFails); + /// A function that always fails to convert its return value between runtime and node. + fn fail_convert_return_value() -> DecodeFails; + /// In wasm we just emulate the old behavior. + fn function_signature_changed() -> Vec; + fn fail_on_native() -> u64; + fn fail_on_wasm() -> u64; + fn benchmark_indirect_call() -> u64; + fn benchmark_direct_call() -> u64; + } + } + } } pub struct Runtime; impl GetNodeBlockType for Runtime { - type NodeBlock = Block; + type NodeBlock = Block; } impl GetRuntimeBlockType for Runtime { - type RuntimeBlock = Block; + type RuntimeBlock = Block; } /// Adds one to the given input and returns the final result. #[inline(never)] fn benchmark_add_one(i: u64) -> u64 { - i + 1 + i + 1 } /// The `benchmark_add_one` function as function pointer. #[cfg(not(feature = "std"))] -static BENCHMARK_ADD_ONE: runtime_io::ExchangeableFunction u64> = runtime_io::ExchangeableFunction::new(benchmark_add_one); +static BENCHMARK_ADD_ONE: runtime_io::ExchangeableFunction u64> = + runtime_io::ExchangeableFunction::new(benchmark_add_one); cfg_if! { - if #[cfg(feature = "std")] { - impl_runtime_apis! { - impl client_api::Core for Runtime { - fn version() -> RuntimeVersion { - version() - } - - fn execute_block(block: Block) { - system::execute_block(block) - } - - fn initialize_block(header: &::Header) { - system::initialize_block(header) - } - } - - impl client_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - unimplemented!() - } - } - - impl client_api::TaggedTransactionQueue for Runtime { - fn validate_transaction(utx: ::Extrinsic) -> TransactionValidity { - system::validate_transaction(utx) - } - } - - impl block_builder_api::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyResult { - system::execute_transaction(extrinsic) - } - - fn finalize_block() -> ::Header { - system::finalize_block() - } - - fn inherent_extrinsics(_data: InherentData) -> Vec<::Extrinsic> { - vec![] - } - - fn check_inherents(_block: Block, _data: InherentData) -> CheckInherentsResult { - CheckInherentsResult::new() - } - - fn random_seed() -> ::Hash { - unimplemented!() - } - } - - impl self::TestAPI for Runtime { - fn balance_of(id: AccountId) -> u64 { - system::balance_of(id) - } - - fn benchmark_add_one(val: &u64) -> u64 { - val + 1 - } - - fn benchmark_vector_add_one(vec: &Vec) -> Vec { - let mut vec = vec.clone(); - vec.iter_mut().for_each(|v| *v += 1); - vec - } - - fn fail_convert_parameter(_: DecodeFails) {} - - fn fail_convert_return_value() -> DecodeFails { - DecodeFails::new() - } - - fn function_signature_changed() -> u64 { - 1 - } - - fn fail_on_native() -> u64 { - panic!("Failing because we are on native") - } - fn fail_on_wasm() -> u64 { - 1 - } - fn benchmark_indirect_call() -> u64 { - let function = benchmark_add_one; - (0..1000).fold(0, |p, i| p + function(i)) - } - fn benchmark_direct_call() -> u64 { - (0..1000).fold(0, |p, i| p + benchmark_add_one(i)) - } - } - - impl consensus_aura::AuraApi for Runtime { - fn slot_duration() -> u64 { 1 } - } - - impl offchain_primitives::OffchainWorkerApi for Runtime { - fn offchain_worker(block: u64) { - let ex = Extrinsic::IncludeData(block.encode()); - runtime_io::submit_extrinsic(&ex) - } - } - - impl consensus_authorities::AuthoritiesApi for Runtime { - fn authorities() -> Vec> { - crate::system::authorities() - } - } - } - } else { - impl_runtime_apis! { - impl client_api::Core for Runtime { - fn version() -> RuntimeVersion { - version() - } - - fn execute_block(block: Block) { - system::execute_block(block) - } - - fn initialize_block(header: &::Header) { - system::initialize_block(header) - } - } - - impl client_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - unimplemented!() - } - } - - impl client_api::TaggedTransactionQueue for Runtime { - fn validate_transaction(utx: ::Extrinsic) -> TransactionValidity { - system::validate_transaction(utx) - } - } - - impl block_builder_api::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyResult { - system::execute_transaction(extrinsic) - } - - fn finalize_block() -> ::Header { - system::finalize_block() - } - - fn inherent_extrinsics(_data: InherentData) -> Vec<::Extrinsic> { - vec![] - } - - fn check_inherents(_block: Block, _data: InherentData) -> CheckInherentsResult { - CheckInherentsResult::new() - } - - fn random_seed() -> ::Hash { - unimplemented!() - } - } - - impl self::TestAPI for Runtime { - fn balance_of(id: AccountId) -> u64 { - system::balance_of(id) - } - - fn benchmark_add_one(val: &u64) -> u64 { - val + 1 - } - - fn benchmark_vector_add_one(vec: &Vec) -> Vec { - let mut vec = vec.clone(); - vec.iter_mut().for_each(|v| *v += 1); - vec - } - - fn fail_convert_parameter(_: DecodeFails) {} - - fn fail_convert_return_value() -> DecodeFails { - DecodeFails::new() - } - - fn function_signature_changed() -> Vec { - let mut vec = Vec::new(); - vec.push(1); - vec.push(2); - vec - } - - fn fail_on_native() -> u64 { - 1 - } - - fn fail_on_wasm() -> u64 { - panic!("Failing because we are on wasm") - } - - fn benchmark_indirect_call() -> u64 { - (0..10000).fold(0, |p, i| p + BENCHMARK_ADD_ONE.get()(i)) - } - - fn benchmark_direct_call() -> u64 { - (0..10000).fold(0, |p, i| p + benchmark_add_one(i)) - } - } - - impl consensus_aura::AuraApi for Runtime { - fn slot_duration() -> u64 { 1 } - } - - impl offchain_primitives::OffchainWorkerApi for Runtime { - fn offchain_worker(block: u64) { - let ex = Extrinsic::IncludeData(block.encode()); - runtime_io::submit_extrinsic(&ex) - } - } - - impl consensus_authorities::AuthoritiesApi for Runtime { - fn authorities() -> Vec> { - crate::system::authorities() - } - } - } - } -} \ No newline at end of file + if #[cfg(feature = "std")] { + impl_runtime_apis! { + impl client_api::Core for Runtime { + fn version() -> RuntimeVersion { + version() + } + + fn execute_block(block: Block) { + system::execute_block(block) + } + + fn initialize_block(header: &::Header) { + system::initialize_block(header) + } + } + + impl client_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + unimplemented!() + } + } + + impl client_api::TaggedTransactionQueue for Runtime { + fn validate_transaction(utx: ::Extrinsic) -> TransactionValidity { + system::validate_transaction(utx) + } + } + + impl block_builder_api::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyResult { + system::execute_transaction(extrinsic) + } + + fn finalize_block() -> ::Header { + system::finalize_block() + } + + fn inherent_extrinsics(_data: InherentData) -> Vec<::Extrinsic> { + vec![] + } + + fn check_inherents(_block: Block, _data: InherentData) -> CheckInherentsResult { + CheckInherentsResult::new() + } + + fn random_seed() -> ::Hash { + unimplemented!() + } + } + + impl self::TestAPI for Runtime { + fn balance_of(id: AccountId) -> u64 { + system::balance_of(id) + } + + fn benchmark_add_one(val: &u64) -> u64 { + val + 1 + } + + fn benchmark_vector_add_one(vec: &Vec) -> Vec { + let mut vec = vec.clone(); + vec.iter_mut().for_each(|v| *v += 1); + vec + } + + fn fail_convert_parameter(_: DecodeFails) {} + + fn fail_convert_return_value() -> DecodeFails { + DecodeFails::new() + } + + fn function_signature_changed() -> u64 { + 1 + } + + fn fail_on_native() -> u64 { + panic!("Failing because we are on native") + } + fn fail_on_wasm() -> u64 { + 1 + } + fn benchmark_indirect_call() -> u64 { + let function = benchmark_add_one; + (0..1000).fold(0, |p, i| p + function(i)) + } + fn benchmark_direct_call() -> u64 { + (0..1000).fold(0, |p, i| p + benchmark_add_one(i)) + } + } + + impl consensus_aura::AuraApi for Runtime { + fn slot_duration() -> u64 { 1 } + } + + impl offchain_primitives::OffchainWorkerApi for Runtime { + fn offchain_worker(block: u64) { + let ex = Extrinsic::IncludeData(block.encode()); + runtime_io::submit_extrinsic(&ex) + } + } + + impl consensus_authorities::AuthoritiesApi for Runtime { + fn authorities() -> Vec> { + crate::system::authorities() + } + } + } + } else { + impl_runtime_apis! { + impl client_api::Core for Runtime { + fn version() -> RuntimeVersion { + version() + } + + fn execute_block(block: Block) { + system::execute_block(block) + } + + fn initialize_block(header: &::Header) { + system::initialize_block(header) + } + } + + impl client_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + unimplemented!() + } + } + + impl client_api::TaggedTransactionQueue for Runtime { + fn validate_transaction(utx: ::Extrinsic) -> TransactionValidity { + system::validate_transaction(utx) + } + } + + impl block_builder_api::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyResult { + system::execute_transaction(extrinsic) + } + + fn finalize_block() -> ::Header { + system::finalize_block() + } + + fn inherent_extrinsics(_data: InherentData) -> Vec<::Extrinsic> { + vec![] + } + + fn check_inherents(_block: Block, _data: InherentData) -> CheckInherentsResult { + CheckInherentsResult::new() + } + + fn random_seed() -> ::Hash { + unimplemented!() + } + } + + impl self::TestAPI for Runtime { + fn balance_of(id: AccountId) -> u64 { + system::balance_of(id) + } + + fn benchmark_add_one(val: &u64) -> u64 { + val + 1 + } + + fn benchmark_vector_add_one(vec: &Vec) -> Vec { + let mut vec = vec.clone(); + vec.iter_mut().for_each(|v| *v += 1); + vec + } + + fn fail_convert_parameter(_: DecodeFails) {} + + fn fail_convert_return_value() -> DecodeFails { + DecodeFails::new() + } + + fn function_signature_changed() -> Vec { + let mut vec = Vec::new(); + vec.push(1); + vec.push(2); + vec + } + + fn fail_on_native() -> u64 { + 1 + } + + fn fail_on_wasm() -> u64 { + panic!("Failing because we are on wasm") + } + + fn benchmark_indirect_call() -> u64 { + (0..10000).fold(0, |p, i| p + BENCHMARK_ADD_ONE.get()(i)) + } + + fn benchmark_direct_call() -> u64 { + (0..10000).fold(0, |p, i| p + benchmark_add_one(i)) + } + } + + impl consensus_aura::AuraApi for Runtime { + fn slot_duration() -> u64 { 1 } + } + + impl offchain_primitives::OffchainWorkerApi for Runtime { + fn offchain_worker(block: u64) { + let ex = Extrinsic::IncludeData(block.encode()); + runtime_io::submit_extrinsic(&ex) + } + } + + impl consensus_authorities::AuthoritiesApi for Runtime { + fn authorities() -> Vec> { + crate::system::authorities() + } + } + } + } +} diff --git a/core/test-runtime/src/system.rs b/core/test-runtime/src/system.rs index a119c8b62d..3ee47e4e67 100644 --- a/core/test-runtime/src/system.rs +++ b/core/test-runtime/src/system.rs @@ -17,433 +17,479 @@ //! System manager: Handles all of the top-level stuff; executing block/transaction, setting code //! and depositing logs. +use super::{AccountId, Block, BlockNumber, Digest, Extrinsic, Header, Transfer, H256 as Hash}; +use parity_codec::{Encode, KeyedVec}; +use primitives::ed25519::Public as AuthorityId; +use primitives::{storage::well_known_keys, Blake2Hasher}; use rstd::prelude::*; -use runtime_io::{storage_root, enumerated_trie_root, storage_changes_root, twox_128}; -use runtime_support::storage::{self, StorageValue, StorageMap}; -use runtime_support::storage_items; -use runtime_primitives::traits::{Hash as HashT, BlakeTwo256, Digest as DigestT, NumberFor, Block as BlockT}; +use runtime_io::{enumerated_trie_root, storage_changes_root, storage_root, twox_128}; use runtime_primitives::generic; -use runtime_primitives::{ApplyError, ApplyOutcome, ApplyResult, transaction_validity::TransactionValidity}; -use parity_codec::{KeyedVec, Encode}; -use super::{AccountId, BlockNumber, Extrinsic, Transfer, H256 as Hash, Block, Header, Digest}; -use primitives::{Blake2Hasher, storage::well_known_keys}; -use primitives::ed25519::Public as AuthorityId; +use runtime_primitives::traits::{ + BlakeTwo256, Block as BlockT, Digest as DigestT, Hash as HashT, NumberFor, +}; +use runtime_primitives::{ + transaction_validity::TransactionValidity, ApplyError, ApplyOutcome, ApplyResult, +}; +use runtime_support::storage::{self, StorageMap, StorageValue}; +use runtime_support::storage_items; const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; storage_items! { - ExtrinsicData: b"sys:xtd" => required map [ u32 => Vec ]; - // The current block number being processed. Set by `execute_block`. - Number: b"sys:num" => required BlockNumber; - ParentHash: b"sys:pha" => required Hash; - NewAuthorities: b"sys:new_auth" => Vec; + ExtrinsicData: b"sys:xtd" => required map [ u32 => Vec ]; + // The current block number being processed. Set by `execute_block`. + Number: b"sys:num" => required BlockNumber; + ParentHash: b"sys:pha" => required Hash; + NewAuthorities: b"sys:new_auth" => Vec; } pub fn balance_of_key(who: AccountId) -> Vec { - who.to_keyed_vec(BALANCE_OF) + who.to_keyed_vec(BALANCE_OF) } pub fn balance_of(who: AccountId) -> u64 { - storage::get_or(&balance_of_key(who), 0) + storage::get_or(&balance_of_key(who), 0) } pub fn nonce_of(who: AccountId) -> u64 { - storage::get_or(&who.to_keyed_vec(NONCE_OF), 0) + storage::get_or(&who.to_keyed_vec(NONCE_OF), 0) } /// Get authorities at given block. pub fn authorities() -> Vec { - let len: u32 = storage::unhashed::get(well_known_keys::AUTHORITY_COUNT) - .expect("There are always authorities in test-runtime"); - (0..len) - .map(|i| storage::unhashed::get(&i.to_keyed_vec(well_known_keys::AUTHORITY_PREFIX)) - .expect("Authority is properly encoded in test-runtime") - ) - .collect() + let len: u32 = storage::unhashed::get(well_known_keys::AUTHORITY_COUNT) + .expect("There are always authorities in test-runtime"); + (0..len) + .map(|i| { + storage::unhashed::get(&i.to_keyed_vec(well_known_keys::AUTHORITY_PREFIX)) + .expect("Authority is properly encoded in test-runtime") + }) + .collect() } pub fn initialize_block(header: &Header) { - // populate environment. - ::put(&header.number); - ::put(&header.parent_hash); - storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); + // populate environment. + ::put(&header.number); + ::put(&header.parent_hash); + storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); } fn execute_extrinsics_without_checks(extrinsics: Vec<::Extrinsic>) { - // execute transactions - extrinsics.into_iter().enumerate().for_each(|(i, e)| { - storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(i as u32)); - execute_transaction_backend(&e).unwrap_or_else(|_| panic!("Invalid transaction")); - storage::unhashed::kill(well_known_keys::EXTRINSIC_INDEX); - }); + // execute transactions + extrinsics.into_iter().enumerate().for_each(|(i, e)| { + storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(i as u32)); + execute_transaction_backend(&e).unwrap_or_else(|_| panic!("Invalid transaction")); + storage::unhashed::kill(well_known_keys::EXTRINSIC_INDEX); + }); } /// Actually execute all transitioning for `block`. pub fn polish_block(block: &mut Block) { - let header = &mut block.header; - - // check transaction trie root represents the transactions. - let txs = block.extrinsics.iter().map(Encode::encode).collect::>(); - let txs = txs.iter().map(Vec::as_slice).collect::>(); - let txs_root = enumerated_trie_root::(&txs).into(); - info_expect_equal_hash(&txs_root, &header.extrinsics_root); - header.extrinsics_root = txs_root; - - // execute transactions - block.extrinsics.iter().enumerate().for_each(|(i, e)| { - storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(i as u32)); - execute_transaction_backend(e).unwrap_or_else(|_| panic!("Invalid transaction")); - storage::unhashed::kill(well_known_keys::EXTRINSIC_INDEX); - }); - - header.state_root = storage_root().into(); - - // check digest - let mut digest = Digest::default(); - if let Some(storage_changes_root) = storage_changes_root(header.parent_hash.into(), header.number - 1) { - digest.push(generic::DigestItem::ChangesTrieRoot(storage_changes_root.into())); - } - if let Some(new_authorities) = ::take() { - digest.push(generic::DigestItem::AuthoritiesChange(new_authorities)); - } - header.digest = digest; + let header = &mut block.header; + + // check transaction trie root represents the transactions. + let txs = block + .extrinsics + .iter() + .map(Encode::encode) + .collect::>(); + let txs = txs.iter().map(Vec::as_slice).collect::>(); + let txs_root = enumerated_trie_root::(&txs).into(); + info_expect_equal_hash(&txs_root, &header.extrinsics_root); + header.extrinsics_root = txs_root; + + // execute transactions + block.extrinsics.iter().enumerate().for_each(|(i, e)| { + storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(i as u32)); + execute_transaction_backend(e).unwrap_or_else(|_| panic!("Invalid transaction")); + storage::unhashed::kill(well_known_keys::EXTRINSIC_INDEX); + }); + + header.state_root = storage_root().into(); + + // check digest + let mut digest = Digest::default(); + if let Some(storage_changes_root) = + storage_changes_root(header.parent_hash.into(), header.number - 1) + { + digest.push(generic::DigestItem::ChangesTrieRoot( + storage_changes_root.into(), + )); + } + if let Some(new_authorities) = ::take() { + digest.push(generic::DigestItem::AuthoritiesChange(new_authorities)); + } + header.digest = digest; } pub fn execute_block(block: Block) { - let ref header = block.header; - - // check transaction trie root represents the transactions. - let txs = block.extrinsics.iter().map(Encode::encode).collect::>(); - let txs = txs.iter().map(Vec::as_slice).collect::>(); - let txs_root = enumerated_trie_root::(&txs).into(); - info_expect_equal_hash(&txs_root, &header.extrinsics_root); - assert!(txs_root == header.extrinsics_root, "Transaction trie root must be valid."); - - execute_extrinsics_without_checks(block.extrinsics); - - // check storage root. - let storage_root = storage_root().into(); - info_expect_equal_hash(&storage_root, &header.state_root); - assert!(storage_root == header.state_root, "Storage root must match that calculated."); - - // check digest - let mut digest = Digest::default(); - if let Some(storage_changes_root) = storage_changes_root(header.parent_hash.into(), header.number - 1) { - digest.push(generic::DigestItem::ChangesTrieRoot(storage_changes_root.into())); - } - if let Some(new_authorities) = ::take() { - digest.push(generic::DigestItem::AuthoritiesChange(new_authorities)); - } - assert!(digest == header.digest, "Header digest items must match that calculated."); + let ref header = block.header; + + // check transaction trie root represents the transactions. + let txs = block + .extrinsics + .iter() + .map(Encode::encode) + .collect::>(); + let txs = txs.iter().map(Vec::as_slice).collect::>(); + let txs_root = enumerated_trie_root::(&txs).into(); + info_expect_equal_hash(&txs_root, &header.extrinsics_root); + assert!( + txs_root == header.extrinsics_root, + "Transaction trie root must be valid." + ); + + execute_extrinsics_without_checks(block.extrinsics); + + // check storage root. + let storage_root = storage_root().into(); + info_expect_equal_hash(&storage_root, &header.state_root); + assert!( + storage_root == header.state_root, + "Storage root must match that calculated." + ); + + // check digest + let mut digest = Digest::default(); + if let Some(storage_changes_root) = + storage_changes_root(header.parent_hash.into(), header.number - 1) + { + digest.push(generic::DigestItem::ChangesTrieRoot( + storage_changes_root.into(), + )); + } + if let Some(new_authorities) = ::take() { + digest.push(generic::DigestItem::AuthoritiesChange(new_authorities)); + } + assert!( + digest == header.digest, + "Header digest items must match that calculated." + ); } /// The block executor. pub struct BlockExecutor; impl executive::ExecuteBlock for BlockExecutor { - fn execute_block(block: Block) { - execute_block(block); - } - - fn execute_extrinsics_without_checks(_: NumberFor, extrinsics: Vec<::Extrinsic>) { - execute_extrinsics_without_checks(extrinsics); - } + fn execute_block(block: Block) { + execute_block(block); + } + + fn execute_extrinsics_without_checks( + _: NumberFor, + extrinsics: Vec<::Extrinsic>, + ) { + execute_extrinsics_without_checks(extrinsics); + } } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity { - if check_signature(&utx).is_err() { - return TransactionValidity::Invalid(ApplyError::BadSignature as i8); - } - - let tx = utx.transfer(); - let nonce_key = tx.from.to_keyed_vec(NONCE_OF); - let expected_nonce: u64 = storage::get_or(&nonce_key, 0); - if tx.nonce < expected_nonce { - return TransactionValidity::Invalid(ApplyError::Stale as i8); - } - if tx.nonce > expected_nonce + 64 { - return TransactionValidity::Unknown(ApplyError::Future as i8); - } - - let hash = |from: &AccountId, nonce: u64| { - twox_128(&nonce.to_keyed_vec(&from.encode())).to_vec() - }; - let requires = if tx.nonce != expected_nonce && tx.nonce > 0 { - let mut deps = Vec::new(); - deps.push(hash(&tx.from, tx.nonce - 1)); - deps - } else { Vec::new() }; - - let provides = { - let mut p = Vec::new(); - p.push(hash(&tx.from, tx.nonce)); - p - }; - - TransactionValidity::Valid { - priority: tx.amount, - requires, - provides, - longevity: 64, - } + if check_signature(&utx).is_err() { + return TransactionValidity::Invalid(ApplyError::BadSignature as i8); + } + + let tx = utx.transfer(); + let nonce_key = tx.from.to_keyed_vec(NONCE_OF); + let expected_nonce: u64 = storage::get_or(&nonce_key, 0); + if tx.nonce < expected_nonce { + return TransactionValidity::Invalid(ApplyError::Stale as i8); + } + if tx.nonce > expected_nonce + 64 { + return TransactionValidity::Unknown(ApplyError::Future as i8); + } + + let hash = + |from: &AccountId, nonce: u64| twox_128(&nonce.to_keyed_vec(&from.encode())).to_vec(); + let requires = if tx.nonce != expected_nonce && tx.nonce > 0 { + let mut deps = Vec::new(); + deps.push(hash(&tx.from, tx.nonce - 1)); + deps + } else { + Vec::new() + }; + + let provides = { + let mut p = Vec::new(); + p.push(hash(&tx.from, tx.nonce)); + p + }; + + TransactionValidity::Valid { + priority: tx.amount, + requires, + provides, + longevity: 64, + } } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn execute_transaction(utx: Extrinsic) -> ApplyResult { - let extrinsic_index: u32 = storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX).unwrap(); - let result = execute_transaction_backend(&utx); - ExtrinsicData::insert(extrinsic_index, utx.encode()); - storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(extrinsic_index + 1)); - result + let extrinsic_index: u32 = storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX).unwrap(); + let result = execute_transaction_backend(&utx); + ExtrinsicData::insert(extrinsic_index, utx.encode()); + storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(extrinsic_index + 1)); + result } /// Finalize the block. pub fn finalize_block() -> Header { - let extrinsic_index: u32 = storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX).unwrap(); - let txs: Vec<_> = (0..extrinsic_index).map(ExtrinsicData::take).collect(); - let txs = txs.iter().map(Vec::as_slice).collect::>(); - let extrinsics_root = enumerated_trie_root::(&txs).into(); - - let number = ::take(); - let parent_hash = ::take(); - let storage_root = BlakeTwo256::storage_root(); - let storage_changes_root = BlakeTwo256::storage_changes_root(parent_hash, number - 1); - - let mut digest = Digest::default(); - if let Some(storage_changes_root) = storage_changes_root { - digest.push(generic::DigestItem::ChangesTrieRoot(storage_changes_root)); - } - if let Some(new_authorities) = ::take() { - digest.push(generic::DigestItem::AuthoritiesChange(new_authorities)); - } - - Header { - number, - extrinsics_root, - state_root: storage_root, - parent_hash, - digest: digest, - } + let extrinsic_index: u32 = storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX).unwrap(); + let txs: Vec<_> = (0..extrinsic_index).map(ExtrinsicData::take).collect(); + let txs = txs.iter().map(Vec::as_slice).collect::>(); + let extrinsics_root = enumerated_trie_root::(&txs).into(); + + let number = ::take(); + let parent_hash = ::take(); + let storage_root = BlakeTwo256::storage_root(); + let storage_changes_root = BlakeTwo256::storage_changes_root(parent_hash, number - 1); + + let mut digest = Digest::default(); + if let Some(storage_changes_root) = storage_changes_root { + digest.push(generic::DigestItem::ChangesTrieRoot(storage_changes_root)); + } + if let Some(new_authorities) = ::take() { + digest.push(generic::DigestItem::AuthoritiesChange(new_authorities)); + } + + Header { + number, + extrinsics_root, + state_root: storage_root, + parent_hash, + digest: digest, + } } #[inline(always)] fn check_signature(utx: &Extrinsic) -> Result<(), ApplyError> { - use runtime_primitives::traits::BlindCheckable; - utx.clone().check().map_err(|_| ApplyError::BadSignature)?; - Ok(()) + use runtime_primitives::traits::BlindCheckable; + utx.clone().check().map_err(|_| ApplyError::BadSignature)?; + Ok(()) } fn execute_transaction_backend(utx: &Extrinsic) -> ApplyResult { - check_signature(utx)?; - match utx { - Extrinsic::Transfer(ref transfer, _) => execute_transfer_backend(transfer), - Extrinsic::AuthoritiesChange(ref new_auth) => execute_new_authorities_backend(new_auth), - Extrinsic::IncludeData(_) => Ok(ApplyOutcome::Success), - } + check_signature(utx)?; + match utx { + Extrinsic::Transfer(ref transfer, _) => execute_transfer_backend(transfer), + Extrinsic::AuthoritiesChange(ref new_auth) => execute_new_authorities_backend(new_auth), + Extrinsic::IncludeData(_) => Ok(ApplyOutcome::Success), + } } fn execute_transfer_backend(tx: &Transfer) -> ApplyResult { - // check nonce - let nonce_key = tx.from.to_keyed_vec(NONCE_OF); - let expected_nonce: u64 = storage::get_or(&nonce_key, 0); - if !(tx.nonce == expected_nonce) { - return Err(ApplyError::Stale) - } - - // increment nonce in storage - storage::put(&nonce_key, &(expected_nonce + 1)); - - // check sender balance - let from_balance_key = tx.from.to_keyed_vec(BALANCE_OF); - let from_balance: u64 = storage::get_or(&from_balance_key, 0); - - // enact transfer - if !(tx.amount <= from_balance) { - return Err(ApplyError::CantPay) - } - let to_balance_key = tx.to.to_keyed_vec(BALANCE_OF); - let to_balance: u64 = storage::get_or(&to_balance_key, 0); - storage::put(&from_balance_key, &(from_balance - tx.amount)); - storage::put(&to_balance_key, &(to_balance + tx.amount)); - Ok(ApplyOutcome::Success) + // check nonce + let nonce_key = tx.from.to_keyed_vec(NONCE_OF); + let expected_nonce: u64 = storage::get_or(&nonce_key, 0); + if !(tx.nonce == expected_nonce) { + return Err(ApplyError::Stale); + } + + // increment nonce in storage + storage::put(&nonce_key, &(expected_nonce + 1)); + + // check sender balance + let from_balance_key = tx.from.to_keyed_vec(BALANCE_OF); + let from_balance: u64 = storage::get_or(&from_balance_key, 0); + + // enact transfer + if !(tx.amount <= from_balance) { + return Err(ApplyError::CantPay); + } + let to_balance_key = tx.to.to_keyed_vec(BALANCE_OF); + let to_balance: u64 = storage::get_or(&to_balance_key, 0); + storage::put(&from_balance_key, &(from_balance - tx.amount)); + storage::put(&to_balance_key, &(to_balance + tx.amount)); + Ok(ApplyOutcome::Success) } fn execute_new_authorities_backend(new_authorities: &[AuthorityId]) -> ApplyResult { - let new_authorities: Vec = new_authorities.iter().cloned().collect(); - ::put(new_authorities); - Ok(ApplyOutcome::Success) + let new_authorities: Vec = new_authorities.iter().cloned().collect(); + ::put(new_authorities); + Ok(ApplyOutcome::Success) } #[cfg(feature = "std")] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { - use primitives::hexdisplay::HexDisplay; - if given != expected { - println!( - "Hash: given={}, expected={}", - HexDisplay::from(given.as_fixed_bytes()), - HexDisplay::from(expected.as_fixed_bytes()) - ); - } + use primitives::hexdisplay::HexDisplay; + if given != expected { + println!( + "Hash: given={}, expected={}", + HexDisplay::from(given.as_fixed_bytes()), + HexDisplay::from(expected.as_fixed_bytes()) + ); + } } #[cfg(not(feature = "std"))] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { - if given != expected { - ::runtime_io::print("Hash not equal"); - ::runtime_io::print(given.as_bytes()); - ::runtime_io::print(expected.as_bytes()); - } + if given != expected { + ::runtime_io::print("Hash not equal"); + ::runtime_io::print(given.as_bytes()); + ::runtime_io::print(expected.as_bytes()); + } } #[cfg(test)] mod tests { - use super::*; - - use runtime_io::{with_externalities, twox_128, TestExternalities}; - use parity_codec::{Joiner, KeyedVec}; - use substrate_test_client::{AuthorityKeyring, AccountKeyring}; - use crate::{Header, Transfer}; - use primitives::{Blake2Hasher, map}; - use primitives::storage::well_known_keys; - use substrate_executor::WasmExecutor; - - const WASM_CODE: &'static [u8] = - include_bytes!("../wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm"); - - fn new_test_ext() -> TestExternalities { - TestExternalities::new(map![ - twox_128(b"latest").to_vec() => vec![69u8; 32], - twox_128(well_known_keys::AUTHORITY_COUNT).to_vec() => vec![].and(&3u32), - twox_128(&0u32.to_keyed_vec(well_known_keys::AUTHORITY_PREFIX)).to_vec() => AuthorityKeyring::Alice.to_raw_public().to_vec(), - twox_128(&1u32.to_keyed_vec(well_known_keys::AUTHORITY_PREFIX)).to_vec() => AuthorityKeyring::Bob.to_raw_public().to_vec(), - twox_128(&2u32.to_keyed_vec(well_known_keys::AUTHORITY_PREFIX)).to_vec() => AuthorityKeyring::Charlie.to_raw_public().to_vec(), - twox_128(&AccountKeyring::Alice.to_raw_public().to_keyed_vec(b"balance:")).to_vec() => vec![111u8, 0, 0, 0, 0, 0, 0, 0] - ]) - } - - fn block_import_works(block_executor: F) where F: Fn(Block, &mut TestExternalities) { - let h = Header { - parent_hash: [69u8; 32].into(), - number: 1, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - }; - let mut b = Block { - header: h, - extrinsics: vec![], - }; - - with_externalities(&mut new_test_ext(), || polish_block(&mut b)); - - block_executor(b, &mut new_test_ext()); - } - - #[test] - fn block_import_works_native() { - block_import_works(|b, ext| { - with_externalities(ext, || { - execute_block(b); - }); - }); - } - - #[test] - fn block_import_works_wasm() { - block_import_works(|b, ext| { - WasmExecutor::new().call(ext, 8, &WASM_CODE, "Core_execute_block", &b.encode()).unwrap(); - }) - } - - fn block_import_with_transaction_works(block_executor: F) where F: Fn(Block, &mut TestExternalities) { - let mut b1 = Block { - header: Header { - parent_hash: [69u8; 32].into(), - number: 1, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - }, - extrinsics: vec![ - Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), - amount: 69, - nonce: 0, - }.into_signed_tx() - ], - }; - - let mut dummy_ext = new_test_ext(); - with_externalities(&mut dummy_ext, || polish_block(&mut b1)); - - let mut b2 = Block { - header: Header { - parent_hash: b1.header.hash(), - number: 2, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - }, - extrinsics: vec![ - Transfer { - from: AccountKeyring::Bob.into(), - to: AccountKeyring::Alice.into(), - amount: 27, - nonce: 0, - }.into_signed_tx(), - Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Charlie.into(), - amount: 69, - nonce: 1, - }.into_signed_tx(), - ], - }; - - with_externalities(&mut dummy_ext, || polish_block(&mut b2)); - drop(dummy_ext); - - let mut t = new_test_ext(); - - with_externalities(&mut t, || { - assert_eq!(balance_of(AccountKeyring::Alice.into()), 111); - assert_eq!(balance_of(AccountKeyring::Bob.into()), 0); - }); - - block_executor(b1, &mut t); - - with_externalities(&mut t, || { - assert_eq!(balance_of(AccountKeyring::Alice.into()), 42); - assert_eq!(balance_of(AccountKeyring::Bob.into()), 69); - }); - - block_executor(b2, &mut t); - - with_externalities(&mut t, || { - assert_eq!(balance_of(AccountKeyring::Alice.into()), 0); - assert_eq!(balance_of(AccountKeyring::Bob.into()), 42); - assert_eq!(balance_of(AccountKeyring::Charlie.into()), 69); - }); - } - - #[test] - fn block_import_with_transaction_works_native() { - block_import_with_transaction_works(|b, ext| { - with_externalities(ext, || { - execute_block(b); - }); - }); - } - - #[test] - fn block_import_with_transaction_works_wasm() { - block_import_with_transaction_works(|b, ext| { - WasmExecutor::new().call(ext, 8, &WASM_CODE, "Core_execute_block", &b.encode()).unwrap(); - }) - } + use super::*; + + use crate::{Header, Transfer}; + use parity_codec::{Joiner, KeyedVec}; + use primitives::storage::well_known_keys; + use primitives::{map, Blake2Hasher}; + use runtime_io::{twox_128, with_externalities, TestExternalities}; + use substrate_executor::WasmExecutor; + use substrate_test_client::{AccountKeyring, AuthorityKeyring}; + + const WASM_CODE: &'static [u8] = include_bytes!( + "../wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm" + ); + + fn new_test_ext() -> TestExternalities { + TestExternalities::new(map![ + twox_128(b"latest").to_vec() => vec![69u8; 32], + twox_128(well_known_keys::AUTHORITY_COUNT).to_vec() => vec![].and(&3u32), + twox_128(&0u32.to_keyed_vec(well_known_keys::AUTHORITY_PREFIX)).to_vec() => AuthorityKeyring::Alice.to_raw_public().to_vec(), + twox_128(&1u32.to_keyed_vec(well_known_keys::AUTHORITY_PREFIX)).to_vec() => AuthorityKeyring::Bob.to_raw_public().to_vec(), + twox_128(&2u32.to_keyed_vec(well_known_keys::AUTHORITY_PREFIX)).to_vec() => AuthorityKeyring::Charlie.to_raw_public().to_vec(), + twox_128(&AccountKeyring::Alice.to_raw_public().to_keyed_vec(b"balance:")).to_vec() => vec![111u8, 0, 0, 0, 0, 0, 0, 0] + ]) + } + + fn block_import_works(block_executor: F) + where + F: Fn(Block, &mut TestExternalities), + { + let h = Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: Default::default(), + }; + let mut b = Block { + header: h, + extrinsics: vec![], + }; + + with_externalities(&mut new_test_ext(), || polish_block(&mut b)); + + block_executor(b, &mut new_test_ext()); + } + + #[test] + fn block_import_works_native() { + block_import_works(|b, ext| { + with_externalities(ext, || { + execute_block(b); + }); + }); + } + + #[test] + fn block_import_works_wasm() { + block_import_works(|b, ext| { + WasmExecutor::new() + .call(ext, 8, &WASM_CODE, "Core_execute_block", &b.encode()) + .unwrap(); + }) + } + + fn block_import_with_transaction_works(block_executor: F) + where + F: Fn(Block, &mut TestExternalities), + { + let mut b1 = Block { + header: Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: Default::default(), + }, + extrinsics: vec![Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), + amount: 69, + nonce: 0, + } + .into_signed_tx()], + }; + + let mut dummy_ext = new_test_ext(); + with_externalities(&mut dummy_ext, || polish_block(&mut b1)); + + let mut b2 = Block { + header: Header { + parent_hash: b1.header.hash(), + number: 2, + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: Default::default(), + }, + extrinsics: vec![ + Transfer { + from: AccountKeyring::Bob.into(), + to: AccountKeyring::Alice.into(), + amount: 27, + nonce: 0, + } + .into_signed_tx(), + Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Charlie.into(), + amount: 69, + nonce: 1, + } + .into_signed_tx(), + ], + }; + + with_externalities(&mut dummy_ext, || polish_block(&mut b2)); + drop(dummy_ext); + + let mut t = new_test_ext(); + + with_externalities(&mut t, || { + assert_eq!(balance_of(AccountKeyring::Alice.into()), 111); + assert_eq!(balance_of(AccountKeyring::Bob.into()), 0); + }); + + block_executor(b1, &mut t); + + with_externalities(&mut t, || { + assert_eq!(balance_of(AccountKeyring::Alice.into()), 42); + assert_eq!(balance_of(AccountKeyring::Bob.into()), 69); + }); + + block_executor(b2, &mut t); + + with_externalities(&mut t, || { + assert_eq!(balance_of(AccountKeyring::Alice.into()), 0); + assert_eq!(balance_of(AccountKeyring::Bob.into()), 42); + assert_eq!(balance_of(AccountKeyring::Charlie.into()), 69); + }); + } + + #[test] + fn block_import_with_transaction_works_native() { + block_import_with_transaction_works(|b, ext| { + with_externalities(ext, || { + execute_block(b); + }); + }); + } + + #[test] + fn block_import_with_transaction_works_wasm() { + block_import_with_transaction_works(|b, ext| { + WasmExecutor::new() + .call(ext, 8, &WASM_CODE, "Core_execute_block", &b.encode()) + .unwrap(); + }) + } } diff --git a/core/transaction-pool/graph/src/base_pool.rs b/core/transaction-pool/graph/src/base_pool.rs index ad434e57d4..85f6810792 100644 --- a/core/transaction-pool/graph/src/base_pool.rs +++ b/core/transaction-pool/graph/src/base_pool.rs @@ -18,23 +18,16 @@ //! //! For a more full-featured pool, have a look at the `pool` module. -use std::{ - collections::HashSet, - fmt, - hash, - sync::Arc, -}; +use std::{collections::HashSet, fmt, hash, sync::Arc}; use error_chain::bail; -use log::{trace, debug, warn}; +use log::{debug, trace, warn}; use serde::Serialize; -use substrate_primitives::hexdisplay::HexDisplay; use sr_primitives::traits::Member; use sr_primitives::transaction_validity::{ - TransactionTag as Tag, - TransactionLongevity as Longevity, - TransactionPriority as Priority, + TransactionLongevity as Longevity, TransactionPriority as Priority, TransactionTag as Tag, }; +use substrate_primitives::hexdisplay::HexDisplay; use crate::error; use crate::future::{FutureTransactions, WaitingTransaction}; @@ -43,96 +36,97 @@ use crate::ready::ReadyTransactions; /// Successful import result. #[derive(Debug, PartialEq, Eq)] pub enum Imported { - /// Transaction was successfuly imported to Ready queue. - Ready { - /// Hash of transaction that was successfuly imported. - hash: Hash, - /// Transactions that got promoted from the Future queue. - promoted: Vec, - /// Transactions that failed to be promoted from the Future queue and are now discarded. - failed: Vec, - /// Transactions removed from the Ready pool (replaced). - removed: Vec>>, - }, - /// Transaction was successfuly imported to Future queue. - Future { - /// Hash of transaction that was successfuly imported. - hash: Hash, - } + /// Transaction was successfuly imported to Ready queue. + Ready { + /// Hash of transaction that was successfuly imported. + hash: Hash, + /// Transactions that got promoted from the Future queue. + promoted: Vec, + /// Transactions that failed to be promoted from the Future queue and are now discarded. + failed: Vec, + /// Transactions removed from the Ready pool (replaced). + removed: Vec>>, + }, + /// Transaction was successfuly imported to Future queue. + Future { + /// Hash of transaction that was successfuly imported. + hash: Hash, + }, } impl Imported { - /// Returns the hash of imported transaction. - pub fn hash(&self) -> &Hash { - use self::Imported::*; - match *self { - Ready { ref hash, .. } => hash, - Future { ref hash, .. } => hash, - } - } + /// Returns the hash of imported transaction. + pub fn hash(&self) -> &Hash { + use self::Imported::*; + match *self { + Ready { ref hash, .. } => hash, + Future { ref hash, .. } => hash, + } + } } /// Status of pruning the queue. #[derive(Debug)] pub struct PruneStatus { - /// A list of imports that satisfying the tag triggered. - pub promoted: Vec>, - /// A list of transactions that failed to be promoted and now are discarded. - pub failed: Vec, - /// A list of transactions that got pruned from the ready queue. - pub pruned: Vec>>, + /// A list of imports that satisfying the tag triggered. + pub promoted: Vec>, + /// A list of transactions that failed to be promoted and now are discarded. + pub failed: Vec, + /// A list of transactions that got pruned from the ready queue. + pub pruned: Vec>>, } /// Immutable transaction #[cfg_attr(test, derive(Clone))] #[derive(PartialEq, Eq)] pub struct Transaction { - /// Raw extrinsic representing that transaction. - pub data: Extrinsic, - /// Number of bytes encoding of the transaction requires. - pub bytes: usize, - /// Transaction hash (unique) - pub hash: Hash, - /// Transaction priority (higher = better) - pub priority: Priority, - /// At which block the transaction becomes invalid? - pub valid_till: Longevity, - /// Tags required by the transaction. - pub requires: Vec, - /// Tags that this transaction provides. - pub provides: Vec, + /// Raw extrinsic representing that transaction. + pub data: Extrinsic, + /// Number of bytes encoding of the transaction requires. + pub bytes: usize, + /// Transaction hash (unique) + pub hash: Hash, + /// Transaction priority (higher = better) + pub priority: Priority, + /// At which block the transaction becomes invalid? + pub valid_till: Longevity, + /// Tags required by the transaction. + pub requires: Vec, + /// Tags that this transaction provides. + pub provides: Vec, } -impl fmt::Debug for Transaction where - Hash: fmt::Debug, - Extrinsic: fmt::Debug, +impl fmt::Debug for Transaction +where + Hash: fmt::Debug, + Extrinsic: fmt::Debug, { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fn print_tags(fmt: &mut fmt::Formatter, tags: &[Tag]) -> fmt::Result { - let mut it = tags.iter(); - if let Some(t) = it.next() { - write!(fmt, "{}", HexDisplay::from(t))?; - } - for t in it { - write!(fmt, ",{}", HexDisplay::from(t))?; - } - Ok(()) - } - - write!(fmt, "Transaction {{ ")?; - write!(fmt, "hash: {:?}, ", &self.hash)?; - write!(fmt, "priority: {:?}, ", &self.priority)?; - write!(fmt, "valid_till: {:?}, ", &self.valid_till)?; - write!(fmt, "bytes: {:?}, ", &self.bytes)?; - write!(fmt, "requires: [")?; - print_tags(fmt, &self.requires)?; - write!(fmt, "], provides: [")?; - print_tags(fmt, &self.provides)?; - write!(fmt, "], ")?; - write!(fmt, "data: {:?}", &self.data)?; - write!(fmt, "}}")?; - Ok(()) - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fn print_tags(fmt: &mut fmt::Formatter, tags: &[Tag]) -> fmt::Result { + let mut it = tags.iter(); + if let Some(t) = it.next() { + write!(fmt, "{}", HexDisplay::from(t))?; + } + for t in it { + write!(fmt, ",{}", HexDisplay::from(t))?; + } + Ok(()) + } + + write!(fmt, "Transaction {{ ")?; + write!(fmt, "hash: {:?}, ", &self.hash)?; + write!(fmt, "priority: {:?}, ", &self.priority)?; + write!(fmt, "valid_till: {:?}, ", &self.valid_till)?; + write!(fmt, "bytes: {:?}, ", &self.bytes)?; + write!(fmt, "requires: [")?; + print_tags(fmt, &self.requires)?; + write!(fmt, "], provides: [")?; + print_tags(fmt, &self.provides)?; + write!(fmt, "], ")?; + write!(fmt, "data: {:?}", &self.data)?; + write!(fmt, "}}")?; + Ok(()) + } } /// Store last pruned tags for given number of invocations. @@ -150,750 +144,786 @@ const RECENTLY_PRUNED_TAGS: usize = 2; /// required tags. #[derive(Debug)] pub struct BasePool { - future: FutureTransactions, - ready: ReadyTransactions, - /// Store recently pruned tags (for last two invocations). - /// - /// This is used to make sure we don't accidentally put - /// transactions to future in case they were just stuck in verification. - recently_pruned: [HashSet; RECENTLY_PRUNED_TAGS], - recently_pruned_index: usize, + future: FutureTransactions, + ready: ReadyTransactions, + /// Store recently pruned tags (for last two invocations). + /// + /// This is used to make sure we don't accidentally put + /// transactions to future in case they were just stuck in verification. + recently_pruned: [HashSet; RECENTLY_PRUNED_TAGS], + recently_pruned_index: usize, } impl Default for BasePool { - fn default() -> Self { - BasePool { - future: Default::default(), - ready: Default::default(), - recently_pruned: Default::default(), - recently_pruned_index: 0, - } - } + fn default() -> Self { + BasePool { + future: Default::default(), + ready: Default::default(), + recently_pruned: Default::default(), + recently_pruned_index: 0, + } + } } impl BasePool { - /// Imports transaction to the pool. - /// - /// The pool consists of two parts: Future and Ready. - /// The former contains transactions that require some tags that are not yet provided by - /// other transactions in the pool. - /// The latter contains transactions that have all the requirements satisfied and are - /// ready to be included in the block. - pub fn import( - &mut self, - tx: Transaction, - ) -> error::Result> { - if self.future.contains(&tx.hash) || self.ready.contains(&tx.hash) { - bail!(error::ErrorKind::AlreadyImported(Box::new(tx.hash.clone()))) - } - - let tx = WaitingTransaction::new( - tx, - self.ready.provided_tags(), - &self.recently_pruned, - ); - trace!(target: "txpool", "[{:?}] {:?}", tx.transaction.hash, tx); - debug!(target: "txpool", "[{:?}] Importing to {}", tx.transaction.hash, if tx.is_ready() { "ready" } else { "future" }); - - // If all tags are not satisfied import to future. - if !tx.is_ready() { - let hash = tx.transaction.hash.clone(); - self.future.import(tx); - return Ok(Imported::Future { hash }); - } - - self.import_to_ready(tx) - } - - /// Imports transaction to ready queue. - /// - /// NOTE the transaction has to have all requirements satisfied. - fn import_to_ready(&mut self, tx: WaitingTransaction) -> error::Result> { - let hash = tx.transaction.hash.clone(); - let mut promoted = vec![]; - let mut failed = vec![]; - let mut removed = vec![]; - - let mut first = true; - let mut to_import = vec![tx]; - - loop { - // take first transaction from the list - let tx = match to_import.pop() { - Some(tx) => tx, - None => break, - }; - - // find transactions in Future that it unlocks - to_import.append(&mut self.future.satisfy_tags(&tx.transaction.provides)); - - // import this transaction - let current_hash = tx.transaction.hash.clone(); - match self.ready.import(tx) { - Ok(mut replaced) => { - if !first { - promoted.push(current_hash); - } - // The transactions were removed from the ready pool. We might attempt to re-import them. - removed.append(&mut replaced); - }, - // transaction failed to be imported. - Err(e) => if first { - debug!(target: "txpool", "[{:?}] Error importing: {:?}", current_hash, e); - return Err(e) - } else { - failed.push(current_hash); - }, - } - first = false; - } - - // An edge case when importing transaction caused - // some future transactions to be imported and that - // future transactions pushed out current transaction. - // This means that there is a cycle and the transactions should - // be moved back to future, since we can't resolve it. - if removed.iter().any(|tx| tx.hash == hash) { - // We still need to remove all transactions that we promoted - // since they depend on each other and will never get to the best iterator. - self.ready.remove_invalid(&promoted); - - debug!(target: "txpool", "[{:?}] Cycle detected, bailing.", hash); - bail!(error::ErrorKind::CycleDetected) - } - - Ok(Imported::Ready { - hash, - promoted, - failed, - removed, - }) - } - - /// Returns an iterator over ready transactions in the pool. - pub fn ready(&self) -> impl Iterator>> { - self.ready.get() - } - - /// Returns an iterator over future transactions in the pool. - pub fn futures(&self) -> impl Iterator> { - self.future.all() - } - - /// Returns pool transactions given list of hashes. - /// - /// Includes both ready and future pool. For every hash in the `hashes` - /// iterator an `Option` is produced (so the resulting `Vec` always have the same length). - pub fn by_hash(&self, hashes: &[Hash]) -> Vec>>> { - let ready = self.ready.by_hash(hashes); - let future = self.future.by_hash(hashes); - - ready - .into_iter() - .zip(future) - .map(|(a, b)| a.or(b)) - .collect() - } - - /// Makes sure that the transactions in the queues stay within provided limits. - /// - /// Removes and returns worst transactions from the queues and all transactions that depend on them. - /// Technically the worst transaction should be evaluated by computing the entire pending set. - /// We use a simplified approach to remove the transaction that occupies the pool for the longest time. - pub fn enforce_limits(&mut self, ready: &Limit, future: &Limit) -> Vec>> { - let mut removed = vec![]; - - while ready.is_exceeded(self.ready.len(), self.ready.bytes()) { - // find the worst transaction - let minimal = self.ready - .fold(|minimal, current| { - let transaction = ¤t.transaction; - match minimal { - None => Some(transaction.clone()), - Some(ref tx) if tx.insertion_id > transaction.insertion_id => { - Some(transaction.clone()) - }, - other => other, - } - }); - - if let Some(minimal) = minimal { - removed.append(&mut self.remove_invalid(&[minimal.transaction.hash.clone()])) - } else { - break; - } - } - - while future.is_exceeded(self.future.len(), self.future.bytes()) { - // find the worst transaction - let minimal = self.future - .fold(|minimal, current| { - match minimal { - None => Some(current.clone()), - Some(ref tx) if tx.imported_at > current.imported_at => { - Some(current.clone()) - }, - other => other, - } - }); - - if let Some(minimal) = minimal { - removed.append(&mut self.remove_invalid(&[minimal.transaction.hash.clone()])) - } else { - break; - } - } - - removed - } - - /// Removes all transactions represented by the hashes and all other transactions - /// that depend on them. - /// - /// Returns a list of actually removed transactions. - /// NOTE some transactions might still be valid, but were just removed because - /// they were part of a chain, you may attempt to re-import them later. - /// NOTE If you want to remove ready transactions that were already used - /// and you don't want them to be stored in the pool use `prune_tags` method. - pub fn remove_invalid(&mut self, hashes: &[Hash]) -> Vec>> { - let mut removed = self.ready.remove_invalid(hashes); - removed.extend(self.future.remove(hashes)); - removed - } - - /// Prunes transactions that provide given list of tags. - /// - /// This will cause all transactions that provide these tags to be removed from the pool, - /// but unlike `remove_invalid`, dependent transactions are not touched. - /// Additional transactions from future queue might be promoted to ready if you satisfy tags - /// that the pool didn't previously know about. - pub fn prune_tags(&mut self, tags: impl IntoIterator) -> PruneStatus { - let mut to_import = vec![]; - let mut pruned = vec![]; - let recently_pruned = &mut self.recently_pruned[self.recently_pruned_index]; - self.recently_pruned_index = (self.recently_pruned_index + 1) % RECENTLY_PRUNED_TAGS; - recently_pruned.clear(); - - for tag in tags { - // make sure to promote any future transactions that could be unlocked - to_import.append(&mut self.future.satisfy_tags(::std::iter::once(&tag))); - // and actually prune transactions in ready queue - pruned.append(&mut self.ready.prune_tags(tag.clone())); - // store the tags for next submission - recently_pruned.insert(tag); - } - - let mut promoted = vec![]; - let mut failed = vec![]; - for tx in to_import { - let hash = tx.transaction.hash.clone(); - match self.import_to_ready(tx) { - Ok(res) => promoted.push(res), - Err(e) => { - warn!(target: "txpool", "[{:?}] Failed to promote during pruning: {:?}", hash, e); - failed.push(hash) - }, - } - } - - PruneStatus { - pruned, - failed, - promoted, - } - } - - /// Get pool status. - pub fn status(&self) -> Status { - Status { - ready: self.ready.len(), - ready_bytes: self.ready.bytes(), - future: self.future.len(), - future_bytes: self.future.bytes(), - } - } + /// Imports transaction to the pool. + /// + /// The pool consists of two parts: Future and Ready. + /// The former contains transactions that require some tags that are not yet provided by + /// other transactions in the pool. + /// The latter contains transactions that have all the requirements satisfied and are + /// ready to be included in the block. + pub fn import(&mut self, tx: Transaction) -> error::Result> { + if self.future.contains(&tx.hash) || self.ready.contains(&tx.hash) { + bail!(error::ErrorKind::AlreadyImported(Box::new(tx.hash.clone()))) + } + + let tx = WaitingTransaction::new(tx, self.ready.provided_tags(), &self.recently_pruned); + trace!(target: "txpool", "[{:?}] {:?}", tx.transaction.hash, tx); + debug!(target: "txpool", "[{:?}] Importing to {}", tx.transaction.hash, if tx.is_ready() { "ready" } else { "future" }); + + // If all tags are not satisfied import to future. + if !tx.is_ready() { + let hash = tx.transaction.hash.clone(); + self.future.import(tx); + return Ok(Imported::Future { hash }); + } + + self.import_to_ready(tx) + } + + /// Imports transaction to ready queue. + /// + /// NOTE the transaction has to have all requirements satisfied. + fn import_to_ready( + &mut self, + tx: WaitingTransaction, + ) -> error::Result> { + let hash = tx.transaction.hash.clone(); + let mut promoted = vec![]; + let mut failed = vec![]; + let mut removed = vec![]; + + let mut first = true; + let mut to_import = vec![tx]; + + loop { + // take first transaction from the list + let tx = match to_import.pop() { + Some(tx) => tx, + None => break, + }; + + // find transactions in Future that it unlocks + to_import.append(&mut self.future.satisfy_tags(&tx.transaction.provides)); + + // import this transaction + let current_hash = tx.transaction.hash.clone(); + match self.ready.import(tx) { + Ok(mut replaced) => { + if !first { + promoted.push(current_hash); + } + // The transactions were removed from the ready pool. We might attempt to re-import them. + removed.append(&mut replaced); + } + // transaction failed to be imported. + Err(e) => { + if first { + debug!(target: "txpool", "[{:?}] Error importing: {:?}", current_hash, e); + return Err(e); + } else { + failed.push(current_hash); + } + } + } + first = false; + } + + // An edge case when importing transaction caused + // some future transactions to be imported and that + // future transactions pushed out current transaction. + // This means that there is a cycle and the transactions should + // be moved back to future, since we can't resolve it. + if removed.iter().any(|tx| tx.hash == hash) { + // We still need to remove all transactions that we promoted + // since they depend on each other and will never get to the best iterator. + self.ready.remove_invalid(&promoted); + + debug!(target: "txpool", "[{:?}] Cycle detected, bailing.", hash); + bail!(error::ErrorKind::CycleDetected) + } + + Ok(Imported::Ready { + hash, + promoted, + failed, + removed, + }) + } + + /// Returns an iterator over ready transactions in the pool. + pub fn ready(&self) -> impl Iterator>> { + self.ready.get() + } + + /// Returns an iterator over future transactions in the pool. + pub fn futures(&self) -> impl Iterator> { + self.future.all() + } + + /// Returns pool transactions given list of hashes. + /// + /// Includes both ready and future pool. For every hash in the `hashes` + /// iterator an `Option` is produced (so the resulting `Vec` always have the same length). + pub fn by_hash(&self, hashes: &[Hash]) -> Vec>>> { + let ready = self.ready.by_hash(hashes); + let future = self.future.by_hash(hashes); + + ready + .into_iter() + .zip(future) + .map(|(a, b)| a.or(b)) + .collect() + } + + /// Makes sure that the transactions in the queues stay within provided limits. + /// + /// Removes and returns worst transactions from the queues and all transactions that depend on them. + /// Technically the worst transaction should be evaluated by computing the entire pending set. + /// We use a simplified approach to remove the transaction that occupies the pool for the longest time. + pub fn enforce_limits( + &mut self, + ready: &Limit, + future: &Limit, + ) -> Vec>> { + let mut removed = vec![]; + + while ready.is_exceeded(self.ready.len(), self.ready.bytes()) { + // find the worst transaction + let minimal = self.ready.fold(|minimal, current| { + let transaction = ¤t.transaction; + match minimal { + None => Some(transaction.clone()), + Some(ref tx) if tx.insertion_id > transaction.insertion_id => { + Some(transaction.clone()) + } + other => other, + } + }); + + if let Some(minimal) = minimal { + removed.append(&mut self.remove_invalid(&[minimal.transaction.hash.clone()])) + } else { + break; + } + } + + while future.is_exceeded(self.future.len(), self.future.bytes()) { + // find the worst transaction + let minimal = self.future.fold(|minimal, current| match minimal { + None => Some(current.clone()), + Some(ref tx) if tx.imported_at > current.imported_at => Some(current.clone()), + other => other, + }); + + if let Some(minimal) = minimal { + removed.append(&mut self.remove_invalid(&[minimal.transaction.hash.clone()])) + } else { + break; + } + } + + removed + } + + /// Removes all transactions represented by the hashes and all other transactions + /// that depend on them. + /// + /// Returns a list of actually removed transactions. + /// NOTE some transactions might still be valid, but were just removed because + /// they were part of a chain, you may attempt to re-import them later. + /// NOTE If you want to remove ready transactions that were already used + /// and you don't want them to be stored in the pool use `prune_tags` method. + pub fn remove_invalid(&mut self, hashes: &[Hash]) -> Vec>> { + let mut removed = self.ready.remove_invalid(hashes); + removed.extend(self.future.remove(hashes)); + removed + } + + /// Prunes transactions that provide given list of tags. + /// + /// This will cause all transactions that provide these tags to be removed from the pool, + /// but unlike `remove_invalid`, dependent transactions are not touched. + /// Additional transactions from future queue might be promoted to ready if you satisfy tags + /// that the pool didn't previously know about. + pub fn prune_tags(&mut self, tags: impl IntoIterator) -> PruneStatus { + let mut to_import = vec![]; + let mut pruned = vec![]; + let recently_pruned = &mut self.recently_pruned[self.recently_pruned_index]; + self.recently_pruned_index = (self.recently_pruned_index + 1) % RECENTLY_PRUNED_TAGS; + recently_pruned.clear(); + + for tag in tags { + // make sure to promote any future transactions that could be unlocked + to_import.append(&mut self.future.satisfy_tags(::std::iter::once(&tag))); + // and actually prune transactions in ready queue + pruned.append(&mut self.ready.prune_tags(tag.clone())); + // store the tags for next submission + recently_pruned.insert(tag); + } + + let mut promoted = vec![]; + let mut failed = vec![]; + for tx in to_import { + let hash = tx.transaction.hash.clone(); + match self.import_to_ready(tx) { + Ok(res) => promoted.push(res), + Err(e) => { + warn!(target: "txpool", "[{:?}] Failed to promote during pruning: {:?}", hash, e); + failed.push(hash) + } + } + } + + PruneStatus { + pruned, + failed, + promoted, + } + } + + /// Get pool status. + pub fn status(&self) -> Status { + Status { + ready: self.ready.len(), + ready_bytes: self.ready.bytes(), + future: self.future.len(), + future_bytes: self.future.bytes(), + } + } } /// Pool status #[derive(Debug)] pub struct Status { - /// Number of transactions in the ready queue. - pub ready: usize, - /// Sum of bytes of ready transaction encodings. - pub ready_bytes: usize, - /// Number of transactions in the future queue. - pub future: usize, - /// Sum of bytes of ready transaction encodings. - pub future_bytes: usize, + /// Number of transactions in the ready queue. + pub ready: usize, + /// Sum of bytes of ready transaction encodings. + pub ready_bytes: usize, + /// Number of transactions in the future queue. + pub future: usize, + /// Sum of bytes of ready transaction encodings. + pub future_bytes: usize, } impl Status { - /// Returns true if the are no transactions in the pool. - pub fn is_empty(&self) -> bool { - self.ready == 0 && self.future == 0 - } + /// Returns true if the are no transactions in the pool. + pub fn is_empty(&self) -> bool { + self.ready == 0 && self.future == 0 + } } /// Queue limits #[derive(Debug, Clone)] pub struct Limit { - /// Maximal number of transactions in the queue. - pub count: usize, - /// Maximal size of encodings of all transactions in the queue. - pub total_bytes: usize, + /// Maximal number of transactions in the queue. + pub count: usize, + /// Maximal size of encodings of all transactions in the queue. + pub total_bytes: usize, } impl Limit { - /// Returns true if any of the provided values exceeds the limit. - pub fn is_exceeded(&self, count: usize, bytes: usize) -> bool { - self.count < count || self.total_bytes < bytes - } + /// Returns true if any of the provided values exceeds the limit. + pub fn is_exceeded(&self, count: usize, bytes: usize) -> bool { + self.count < count || self.total_bytes < bytes + } } #[cfg(test)] mod tests { - use super::*; - - type Hash = u64; - - fn pool() -> BasePool> { - BasePool::default() - } - - #[test] - fn should_import_transaction_to_ready() { - // given - let mut pool = pool(); - - // when - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1u64, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![1]], - }).unwrap(); - - // then - assert_eq!(pool.ready().count(), 1); - assert_eq!(pool.ready.len(), 1); - } - - #[test] - fn should_not_import_same_transaction_twice() { - // given - let mut pool = pool(); - - // when - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![1]], - }).unwrap(); - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![1]], - }).unwrap_err(); - - // then - assert_eq!(pool.ready().count(), 1); - assert_eq!(pool.ready.len(), 1); - } - - - #[test] - fn should_import_transaction_to_future_and_promote_it_later() { - // given - let mut pool = pool(); - - // when - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![0]], - provides: vec![vec![1]], - }).unwrap(); - assert_eq!(pool.ready().count(), 0); - assert_eq!(pool.ready.len(), 0); - pool.import(Transaction { - data: vec![2u8], - bytes: 1, - hash: 2, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![0]], - }).unwrap(); - - // then - assert_eq!(pool.ready().count(), 2); - assert_eq!(pool.ready.len(), 2); - } - - #[test] - fn should_promote_a_subgraph() { - // given - let mut pool = pool(); - - // when - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![0]], - provides: vec![vec![1]], - }).unwrap(); - pool.import(Transaction { - data: vec![3u8], - bytes: 1, - hash: 3, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![2]], - provides: vec![], - }).unwrap(); - pool.import(Transaction { - data: vec![2u8], - bytes: 1, - hash: 2, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![1]], - provides: vec![vec![3], vec![2]], - }).unwrap(); - pool.import(Transaction { - data: vec![4u8], - bytes: 1, - hash: 4, - priority: 1_000u64, - valid_till: 64u64, - requires: vec![vec![3], vec![4]], - provides: vec![], - }).unwrap(); - assert_eq!(pool.ready().count(), 0); - assert_eq!(pool.ready.len(), 0); - - let res = pool.import(Transaction { - data: vec![5u8], - bytes: 1, - hash: 5, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![0], vec![4]], - }).unwrap(); - - // then - let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); - - assert_eq!(it.next(), Some(5)); - assert_eq!(it.next(), Some(1)); - assert_eq!(it.next(), Some(2)); - assert_eq!(it.next(), Some(4)); - assert_eq!(it.next(), Some(3)); - assert_eq!(it.next(), None); - assert_eq!(res, Imported::Ready { - hash: 5, - promoted: vec![1, 2, 3, 4], - failed: vec![], - removed: vec![], - }); - } - - #[test] - fn should_handle_a_cycle() { - // given - let mut pool = pool(); - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![0]], - provides: vec![vec![1]], - }).unwrap(); - pool.import(Transaction { - data: vec![3u8], - bytes: 1, - hash: 3, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![1]], - provides: vec![vec![2]], - }).unwrap(); - assert_eq!(pool.ready().count(), 0); - assert_eq!(pool.ready.len(), 0); - - // when - pool.import(Transaction { - data: vec![2u8], - bytes: 1, - hash: 2, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![2]], - provides: vec![vec![0]], - }).unwrap(); - - // then - { - let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); - assert_eq!(it.next(), None); - } - // all transactions occupy the Future queue - it's fine - assert_eq!(pool.future.len(), 3); - - // let's close the cycle with one additional transaction - let res = pool.import(Transaction { - data: vec![4u8], - bytes: 1, - hash: 4, - priority: 50u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![0]], - }).unwrap(); - let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); - assert_eq!(it.next(), Some(4)); - assert_eq!(it.next(), Some(1)); - assert_eq!(it.next(), Some(3)); - assert_eq!(it.next(), None); - assert_eq!(res, Imported::Ready { - hash: 4, - promoted: vec![1, 3], - failed: vec![2], - removed: vec![], - }); - assert_eq!(pool.future.len(), 0); - } - - #[test] - fn should_handle_a_cycle_with_low_priority() { - // given - let mut pool = pool(); - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![0]], - provides: vec![vec![1]], - }).unwrap(); - pool.import(Transaction { - data: vec![3u8], - bytes: 1, - hash: 3, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![1]], - provides: vec![vec![2]], - }).unwrap(); - assert_eq!(pool.ready().count(), 0); - assert_eq!(pool.ready.len(), 0); - - // when - pool.import(Transaction { - data: vec![2u8], - bytes: 1, - hash: 2, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![2]], - provides: vec![vec![0]], - }).unwrap(); - - // then - { - let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); - assert_eq!(it.next(), None); - } - // all transactions occupy the Future queue - it's fine - assert_eq!(pool.future.len(), 3); - - // let's close the cycle with one additional transaction - let err = pool.import(Transaction { - data: vec![4u8], - bytes: 1, - hash: 4, - priority: 1u64, // lower priority than Tx(2) - valid_till: 64u64, - requires: vec![], - provides: vec![vec![0]], - }).unwrap_err(); - let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); - assert_eq!(it.next(), None); - assert_eq!(pool.ready.len(), 0); - assert_eq!(pool.future.len(), 0); - if let error::ErrorKind::CycleDetected = *err.kind() { - } else { - assert!(false, "Invalid error kind: {:?}", err.kind()); - } - } - - #[test] - fn should_remove_invalid_transactions() { - // given - let mut pool = pool(); - pool.import(Transaction { - data: vec![5u8], - bytes: 1, - hash: 5, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![0], vec![4]], - }).unwrap(); - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![0]], - provides: vec![vec![1]], - }).unwrap(); - pool.import(Transaction { - data: vec![3u8], - bytes: 1, - hash: 3, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![2]], - provides: vec![], - }).unwrap(); - pool.import(Transaction { - data: vec![2u8], - bytes: 1, - hash: 2, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![1]], - provides: vec![vec![3], vec![2]], - }).unwrap(); - pool.import(Transaction { - data: vec![4u8], - bytes: 1, - hash: 4, - priority: 1_000u64, - valid_till: 64u64, - requires: vec![vec![3], vec![4]], - provides: vec![], - }).unwrap(); - // future - pool.import(Transaction { - data: vec![6u8], - bytes: 1, - hash: 6, - priority: 1_000u64, - valid_till: 64u64, - requires: vec![vec![11]], - provides: vec![], - }).unwrap(); - assert_eq!(pool.ready().count(), 5); - assert_eq!(pool.future.len(), 1); - - // when - pool.remove_invalid(&[6, 1]); - - // then - assert_eq!(pool.ready().count(), 1); - assert_eq!(pool.future.len(), 0); - } - - #[test] - fn should_prune_ready_transactions() { - // given - let mut pool = pool(); - // future (waiting for 0) - pool.import(Transaction { - data: vec![5u8], - bytes: 1, - hash: 5, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![0]], - provides: vec![vec![100]], - }).unwrap(); - // ready - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![1]], - }).unwrap(); - pool.import(Transaction { - data: vec![2u8], - bytes: 1, - hash: 2, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![2]], - provides: vec![vec![3]], - }).unwrap(); - pool.import(Transaction { - data: vec![3u8], - bytes: 1, - hash: 3, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![1]], - provides: vec![vec![2]], - }).unwrap(); - pool.import(Transaction { - data: vec![4u8], - bytes: 1, - hash: 4, - priority: 1_000u64, - valid_till: 64u64, - requires: vec![vec![3], vec![2]], - provides: vec![vec![4]], - }).unwrap(); - - assert_eq!(pool.ready().count(), 4); - assert_eq!(pool.future.len(), 1); - - // when - let result = pool.prune_tags(vec![vec![0], vec![2]]); - - // then - assert_eq!(result.pruned.len(), 2); - assert_eq!(result.failed.len(), 0); - assert_eq!(result.promoted[0], Imported::Ready { - hash: 5, - promoted: vec![], - failed: vec![], - removed: vec![], - }); - assert_eq!(result.promoted.len(), 1); - assert_eq!(pool.future.len(), 0); - assert_eq!(pool.ready.len(), 3); - assert_eq!(pool.ready().count(), 3); - } - - #[test] - fn transaction_debug() { - assert_eq!( + use super::*; + + type Hash = u64; + + fn pool() -> BasePool> { + BasePool::default() + } + + #[test] + fn should_import_transaction_to_ready() { + // given + let mut pool = pool(); + + // when + pool.import(Transaction { + data: vec![1u8], + bytes: 1, + hash: 1u64, + priority: 5u64, + valid_till: 64u64, + requires: vec![], + provides: vec![vec![1]], + }) + .unwrap(); + + // then + assert_eq!(pool.ready().count(), 1); + assert_eq!(pool.ready.len(), 1); + } + + #[test] + fn should_not_import_same_transaction_twice() { + // given + let mut pool = pool(); + + // when + pool.import(Transaction { + data: vec![1u8], + bytes: 1, + hash: 1, + priority: 5u64, + valid_till: 64u64, + requires: vec![], + provides: vec![vec![1]], + }) + .unwrap(); + pool.import(Transaction { + data: vec![1u8], + bytes: 1, + hash: 1, + priority: 5u64, + valid_till: 64u64, + requires: vec![], + provides: vec![vec![1]], + }) + .unwrap_err(); + + // then + assert_eq!(pool.ready().count(), 1); + assert_eq!(pool.ready.len(), 1); + } + + #[test] + fn should_import_transaction_to_future_and_promote_it_later() { + // given + let mut pool = pool(); + + // when + pool.import(Transaction { + data: vec![1u8], + bytes: 1, + hash: 1, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![0]], + provides: vec![vec![1]], + }) + .unwrap(); + assert_eq!(pool.ready().count(), 0); + assert_eq!(pool.ready.len(), 0); + pool.import(Transaction { + data: vec![2u8], + bytes: 1, + hash: 2, + priority: 5u64, + valid_till: 64u64, + requires: vec![], + provides: vec![vec![0]], + }) + .unwrap(); + + // then + assert_eq!(pool.ready().count(), 2); + assert_eq!(pool.ready.len(), 2); + } + + #[test] + fn should_promote_a_subgraph() { + // given + let mut pool = pool(); + + // when + pool.import(Transaction { + data: vec![1u8], + bytes: 1, + hash: 1, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![0]], + provides: vec![vec![1]], + }) + .unwrap(); + pool.import(Transaction { + data: vec![3u8], + bytes: 1, + hash: 3, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![2]], + provides: vec![], + }) + .unwrap(); + pool.import(Transaction { + data: vec![2u8], + bytes: 1, + hash: 2, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![1]], + provides: vec![vec![3], vec![2]], + }) + .unwrap(); + pool.import(Transaction { + data: vec![4u8], + bytes: 1, + hash: 4, + priority: 1_000u64, + valid_till: 64u64, + requires: vec![vec![3], vec![4]], + provides: vec![], + }) + .unwrap(); + assert_eq!(pool.ready().count(), 0); + assert_eq!(pool.ready.len(), 0); + + let res = pool + .import(Transaction { + data: vec![5u8], + bytes: 1, + hash: 5, + priority: 5u64, + valid_till: 64u64, + requires: vec![], + provides: vec![vec![0], vec![4]], + }) + .unwrap(); + + // then + let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); + + assert_eq!(it.next(), Some(5)); + assert_eq!(it.next(), Some(1)); + assert_eq!(it.next(), Some(2)); + assert_eq!(it.next(), Some(4)); + assert_eq!(it.next(), Some(3)); + assert_eq!(it.next(), None); + assert_eq!( + res, + Imported::Ready { + hash: 5, + promoted: vec![1, 2, 3, 4], + failed: vec![], + removed: vec![], + } + ); + } + + #[test] + fn should_handle_a_cycle() { + // given + let mut pool = pool(); + pool.import(Transaction { + data: vec![1u8], + bytes: 1, + hash: 1, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![0]], + provides: vec![vec![1]], + }) + .unwrap(); + pool.import(Transaction { + data: vec![3u8], + bytes: 1, + hash: 3, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![1]], + provides: vec![vec![2]], + }) + .unwrap(); + assert_eq!(pool.ready().count(), 0); + assert_eq!(pool.ready.len(), 0); + + // when + pool.import(Transaction { + data: vec![2u8], + bytes: 1, + hash: 2, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![2]], + provides: vec![vec![0]], + }) + .unwrap(); + + // then + { + let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); + assert_eq!(it.next(), None); + } + // all transactions occupy the Future queue - it's fine + assert_eq!(pool.future.len(), 3); + + // let's close the cycle with one additional transaction + let res = pool + .import(Transaction { + data: vec![4u8], + bytes: 1, + hash: 4, + priority: 50u64, + valid_till: 64u64, + requires: vec![], + provides: vec![vec![0]], + }) + .unwrap(); + let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); + assert_eq!(it.next(), Some(4)); + assert_eq!(it.next(), Some(1)); + assert_eq!(it.next(), Some(3)); + assert_eq!(it.next(), None); + assert_eq!( + res, + Imported::Ready { + hash: 4, + promoted: vec![1, 3], + failed: vec![2], + removed: vec![], + } + ); + assert_eq!(pool.future.len(), 0); + } + + #[test] + fn should_handle_a_cycle_with_low_priority() { + // given + let mut pool = pool(); + pool.import(Transaction { + data: vec![1u8], + bytes: 1, + hash: 1, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![0]], + provides: vec![vec![1]], + }) + .unwrap(); + pool.import(Transaction { + data: vec![3u8], + bytes: 1, + hash: 3, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![1]], + provides: vec![vec![2]], + }) + .unwrap(); + assert_eq!(pool.ready().count(), 0); + assert_eq!(pool.ready.len(), 0); + + // when + pool.import(Transaction { + data: vec![2u8], + bytes: 1, + hash: 2, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![2]], + provides: vec![vec![0]], + }) + .unwrap(); + + // then + { + let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); + assert_eq!(it.next(), None); + } + // all transactions occupy the Future queue - it's fine + assert_eq!(pool.future.len(), 3); + + // let's close the cycle with one additional transaction + let err = pool + .import(Transaction { + data: vec![4u8], + bytes: 1, + hash: 4, + priority: 1u64, // lower priority than Tx(2) + valid_till: 64u64, + requires: vec![], + provides: vec![vec![0]], + }) + .unwrap_err(); + let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); + assert_eq!(it.next(), None); + assert_eq!(pool.ready.len(), 0); + assert_eq!(pool.future.len(), 0); + if let error::ErrorKind::CycleDetected = *err.kind() { + } else { + assert!(false, "Invalid error kind: {:?}", err.kind()); + } + } + + #[test] + fn should_remove_invalid_transactions() { + // given + let mut pool = pool(); + pool.import(Transaction { + data: vec![5u8], + bytes: 1, + hash: 5, + priority: 5u64, + valid_till: 64u64, + requires: vec![], + provides: vec![vec![0], vec![4]], + }) + .unwrap(); + pool.import(Transaction { + data: vec![1u8], + bytes: 1, + hash: 1, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![0]], + provides: vec![vec![1]], + }) + .unwrap(); + pool.import(Transaction { + data: vec![3u8], + bytes: 1, + hash: 3, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![2]], + provides: vec![], + }) + .unwrap(); + pool.import(Transaction { + data: vec![2u8], + bytes: 1, + hash: 2, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![1]], + provides: vec![vec![3], vec![2]], + }) + .unwrap(); + pool.import(Transaction { + data: vec![4u8], + bytes: 1, + hash: 4, + priority: 1_000u64, + valid_till: 64u64, + requires: vec![vec![3], vec![4]], + provides: vec![], + }) + .unwrap(); + // future + pool.import(Transaction { + data: vec![6u8], + bytes: 1, + hash: 6, + priority: 1_000u64, + valid_till: 64u64, + requires: vec![vec![11]], + provides: vec![], + }) + .unwrap(); + assert_eq!(pool.ready().count(), 5); + assert_eq!(pool.future.len(), 1); + + // when + pool.remove_invalid(&[6, 1]); + + // then + assert_eq!(pool.ready().count(), 1); + assert_eq!(pool.future.len(), 0); + } + + #[test] + fn should_prune_ready_transactions() { + // given + let mut pool = pool(); + // future (waiting for 0) + pool.import(Transaction { + data: vec![5u8], + bytes: 1, + hash: 5, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![0]], + provides: vec![vec![100]], + }) + .unwrap(); + // ready + pool.import(Transaction { + data: vec![1u8], + bytes: 1, + hash: 1, + priority: 5u64, + valid_till: 64u64, + requires: vec![], + provides: vec![vec![1]], + }) + .unwrap(); + pool.import(Transaction { + data: vec![2u8], + bytes: 1, + hash: 2, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![2]], + provides: vec![vec![3]], + }) + .unwrap(); + pool.import(Transaction { + data: vec![3u8], + bytes: 1, + hash: 3, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![1]], + provides: vec![vec![2]], + }) + .unwrap(); + pool.import(Transaction { + data: vec![4u8], + bytes: 1, + hash: 4, + priority: 1_000u64, + valid_till: 64u64, + requires: vec![vec![3], vec![2]], + provides: vec![vec![4]], + }) + .unwrap(); + + assert_eq!(pool.ready().count(), 4); + assert_eq!(pool.future.len(), 1); + + // when + let result = pool.prune_tags(vec![vec![0], vec![2]]); + + // then + assert_eq!(result.pruned.len(), 2); + assert_eq!(result.failed.len(), 0); + assert_eq!( + result.promoted[0], + Imported::Ready { + hash: 5, + promoted: vec![], + failed: vec![], + removed: vec![], + } + ); + assert_eq!(result.promoted.len(), 1); + assert_eq!(pool.future.len(), 0); + assert_eq!(pool.ready.len(), 3); + assert_eq!(pool.ready().count(), 3); + } + + #[test] + fn transaction_debug() { + assert_eq!( format!("{:?}", Transaction { data: vec![4u8], bytes: 1, @@ -905,5 +935,5 @@ mod tests { }), r#"Transaction { hash: 4, priority: 1000, valid_till: 64, bytes: 1, requires: [03,02], provides: [04], data: [4]}"#.to_owned() ); - } + } } diff --git a/core/transaction-pool/graph/src/error.rs b/core/transaction-pool/graph/src/error.rs index 435ca922cd..f3d4d5ccd9 100644 --- a/core/transaction-pool/graph/src/error.rs +++ b/core/transaction-pool/graph/src/error.rs @@ -16,61 +16,66 @@ //! Transaction pool errors. -use sr_primitives::transaction_validity::TransactionPriority as Priority; use error_chain::{ - error_chain, error_chain_processing, impl_error_chain_processed, impl_extract_backtrace, impl_error_chain_kind + error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed, + impl_extract_backtrace, }; +use sr_primitives::transaction_validity::TransactionPriority as Priority; error_chain! { - errors { - /// Transaction is not verifiable yet, but might be in the future. - UnknownTransactionValidity(e: i8) { - description("Runtime cannot determine validity of the transaction yet."), - display("Unkown Transaction Validity. Error code: {}", e), - } - /// Transaction is invalid - InvalidTransaction(e: i8) { - description("Runtime check for the transaction failed."), - display("Invalid Transaction. Error Code: {}", e), - } - /// The transaction is temporarily baned - TemporarilyBanned { - description("Transaction is temporarily banned from importing to the pool."), - display("Temporarily Banned"), - } - /// The transaction is already in the pool. - AlreadyImported(hash: Box<::std::any::Any + Send>) { - description("Transaction is already in the pool"), - display("[{:?}] Already imported", hash), - } - /// The transaction cannot be imported cause it's a replacement and has too low priority. - TooLowPriority(old: Priority, new: Priority) { - description("The priority is too low to replace transactions already in the pool."), - display("Too low priority ({} > {})", old, new) - } - /// Deps cycle detected and we couldn't import transaction. - CycleDetected { - description("Transaction was not imported because of detected cycle."), - display("Cycle Detected"), - } - /// Transaction was dropped immediately after it got inserted. - ImmediatelyDropped { - description("Transaction couldn't enter the pool because of the limit."), - display("Immediately Dropped"), - } - } + errors { + /// Transaction is not verifiable yet, but might be in the future. + UnknownTransactionValidity(e: i8) { + description("Runtime cannot determine validity of the transaction yet."), + display("Unkown Transaction Validity. Error code: {}", e), + } + /// Transaction is invalid + InvalidTransaction(e: i8) { + description("Runtime check for the transaction failed."), + display("Invalid Transaction. Error Code: {}", e), + } + /// The transaction is temporarily baned + TemporarilyBanned { + description("Transaction is temporarily banned from importing to the pool."), + display("Temporarily Banned"), + } + /// The transaction is already in the pool. + AlreadyImported(hash: Box<::std::any::Any + Send>) { + description("Transaction is already in the pool"), + display("[{:?}] Already imported", hash), + } + /// The transaction cannot be imported cause it's a replacement and has too low priority. + TooLowPriority(old: Priority, new: Priority) { + description("The priority is too low to replace transactions already in the pool."), + display("Too low priority ({} > {})", old, new) + } + /// Deps cycle detected and we couldn't import transaction. + CycleDetected { + description("Transaction was not imported because of detected cycle."), + display("Cycle Detected"), + } + /// Transaction was dropped immediately after it got inserted. + ImmediatelyDropped { + description("Transaction couldn't enter the pool because of the limit."), + display("Immediately Dropped"), + } + } } /// Transaction pool error conversion. pub trait IntoPoolError: ::std::error::Error + Send + Sized { - /// Try to extract original `Error` - /// - /// This implementation is optional and used only to - /// provide more descriptive error messages for end users - /// of RPC API. - fn into_pool_error(self) -> ::std::result::Result { Err(self) } + /// Try to extract original `Error` + /// + /// This implementation is optional and used only to + /// provide more descriptive error messages for end users + /// of RPC API. + fn into_pool_error(self) -> ::std::result::Result { + Err(self) + } } impl IntoPoolError for Error { - fn into_pool_error(self) -> ::std::result::Result { Ok(self) } + fn into_pool_error(self) -> ::std::result::Result { + Ok(self) + } } diff --git a/core/transaction-pool/graph/src/future.rs b/core/transaction-pool/graph/src/future.rs index 6ca5019e47..6b79f6e632 100644 --- a/core/transaction-pool/graph/src/future.rs +++ b/core/transaction-pool/graph/src/future.rs @@ -15,94 +15,93 @@ // along with Substrate. If not, see . use std::{ - collections::{HashMap, HashSet}, - fmt, - hash, - sync::Arc, - time, + collections::{HashMap, HashSet}, + fmt, hash, + sync::Arc, + time, }; +use sr_primitives::transaction_validity::TransactionTag as Tag; use substrate_primitives::hexdisplay::HexDisplay; -use sr_primitives::transaction_validity::{ - TransactionTag as Tag, -}; use crate::base_pool::Transaction; /// Transaction with partially satisfied dependencies. pub struct WaitingTransaction { - /// Transaction details. - pub transaction: Arc>, - /// Tags that are required and have not been satisfied yet by other transactions in the pool. - pub missing_tags: HashSet, - /// Time of import to the Future Queue. - pub imported_at: time::Instant, + /// Transaction details. + pub transaction: Arc>, + /// Tags that are required and have not been satisfied yet by other transactions in the pool. + pub missing_tags: HashSet, + /// Time of import to the Future Queue. + pub imported_at: time::Instant, } impl fmt::Debug for WaitingTransaction { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "WaitingTransaction {{ ")?; - write!(fmt, "imported_at: {:?}, ", self.imported_at)?; - write!(fmt, "transaction: {:?}, ", self.transaction)?; - write!(fmt, "missing_tags: {{")?; - let mut it = self.missing_tags.iter().map(|tag| HexDisplay::from(tag)); - if let Some(tag) = it.next() { - write!(fmt, "{}", tag)?; - } - for tag in it { - write!(fmt, ", {}", tag)?; - } - write!(fmt, " }}}}") - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "WaitingTransaction {{ ")?; + write!(fmt, "imported_at: {:?}, ", self.imported_at)?; + write!(fmt, "transaction: {:?}, ", self.transaction)?; + write!(fmt, "missing_tags: {{")?; + let mut it = self.missing_tags.iter().map(|tag| HexDisplay::from(tag)); + if let Some(tag) = it.next() { + write!(fmt, "{}", tag)?; + } + for tag in it { + write!(fmt, ", {}", tag)?; + } + write!(fmt, " }}}}") + } } impl Clone for WaitingTransaction { - fn clone(&self) -> Self { - WaitingTransaction { - transaction: self.transaction.clone(), - missing_tags: self.missing_tags.clone(), - imported_at: self.imported_at.clone(), - } - } + fn clone(&self) -> Self { + WaitingTransaction { + transaction: self.transaction.clone(), + missing_tags: self.missing_tags.clone(), + imported_at: self.imported_at.clone(), + } + } } impl WaitingTransaction { - /// Creates a new `WaitingTransaction`. - /// - /// Computes the set of missing tags based on the requirements and tags that - /// are provided by all transactions in the ready queue. - pub fn new( - transaction: Transaction, - provided: &HashMap, - recently_pruned: &[HashSet], - ) -> Self { - let missing_tags = transaction.requires - .iter() - .filter(|tag| { - // is true if the tag is already satisfied either via transaction in the pool - // or one that was recently included. - let is_provided = provided.contains_key(&**tag) || recently_pruned.iter().any(|x| x.contains(&**tag)); - !is_provided - }) - .cloned() - .collect(); - - WaitingTransaction { - transaction: Arc::new(transaction), - missing_tags, - imported_at: time::Instant::now(), - } - } - - /// Marks the tag as satisfied. - pub fn satisfy_tag(&mut self, tag: &Tag) { - self.missing_tags.remove(tag); - } - - /// Returns true if transaction has all requirements satisfied. - pub fn is_ready(&self) -> bool { - self.missing_tags.is_empty() - } + /// Creates a new `WaitingTransaction`. + /// + /// Computes the set of missing tags based on the requirements and tags that + /// are provided by all transactions in the ready queue. + pub fn new( + transaction: Transaction, + provided: &HashMap, + recently_pruned: &[HashSet], + ) -> Self { + let missing_tags = transaction + .requires + .iter() + .filter(|tag| { + // is true if the tag is already satisfied either via transaction in the pool + // or one that was recently included. + let is_provided = provided.contains_key(&**tag) + || recently_pruned.iter().any(|x| x.contains(&**tag)); + !is_provided + }) + .cloned() + .collect(); + + WaitingTransaction { + transaction: Arc::new(transaction), + missing_tags, + imported_at: time::Instant::now(), + } + } + + /// Marks the tag as satisfied. + pub fn satisfy_tag(&mut self, tag: &Tag) { + self.missing_tags.remove(tag); + } + + /// Returns true if transaction has all requirements satisfied. + pub fn is_ready(&self) -> bool { + self.missing_tags.is_empty() + } } /// A pool of transactions that are not yet ready to be included in the block. @@ -111,19 +110,19 @@ impl WaitingTransaction { /// could provide a tag that they require. #[derive(Debug)] pub struct FutureTransactions { - /// tags that are not yet provided by any transaction and we await for them - wanted_tags: HashMap>, - /// Transactions waiting for a particular other transaction - waiting: HashMap>, + /// tags that are not yet provided by any transaction and we await for them + wanted_tags: HashMap>, + /// Transactions waiting for a particular other transaction + waiting: HashMap>, } impl Default for FutureTransactions { - fn default() -> Self { - FutureTransactions { - wanted_tags: Default::default(), - waiting: Default::default(), - } - } + fn default() -> Self { + FutureTransactions { + wanted_tags: Default::default(), + waiting: Default::default(), + } + } } const WAITING_PROOF: &str = r"# @@ -134,106 +133,123 @@ qed #"; impl FutureTransactions { - /// Import transaction to Future queue. - /// - /// Only transactions that don't have all their tags satisfied should occupy - /// the Future queue. - /// As soon as required tags are provided by some other transactions that are ready - /// we should remove the transactions from here and move them to the Ready queue. - pub fn import(&mut self, tx: WaitingTransaction) { - assert!(!tx.is_ready(), "Transaction is ready."); - assert!(!self.waiting.contains_key(&tx.transaction.hash), "Transaction is already imported."); - - // Add all tags that are missing - for tag in &tx.missing_tags { - let entry = self.wanted_tags.entry(tag.clone()).or_insert_with(HashSet::new); - entry.insert(tx.transaction.hash.clone()); - } - - // Add the transaction to a by-hash waiting map - self.waiting.insert(tx.transaction.hash.clone(), tx); - } - - /// Returns true if given hash is part of the queue. - pub fn contains(&self, hash: &Hash) -> bool { - self.waiting.contains_key(hash) - } - - /// Returns a list of known transactions - pub fn by_hash(&self, hashes: &[Hash]) -> Vec>>> { - hashes.iter().map(|h| self.waiting.get(h).map(|x| x.transaction.clone())).collect() - } - - /// Satisfies provided tags in transactions that are waiting for them. - /// - /// Returns (and removes) transactions that became ready after their last tag got - /// satisfied and now we can remove them from Future and move to Ready queue. - pub fn satisfy_tags>(&mut self, tags: impl IntoIterator) -> Vec> { - let mut became_ready = vec![]; - - for tag in tags { - if let Some(hashes) = self.wanted_tags.remove(tag.as_ref()) { - for hash in hashes { - let is_ready = { - let tx = self.waiting.get_mut(&hash).expect(WAITING_PROOF); - tx.satisfy_tag(tag.as_ref()); - tx.is_ready() - }; - - if is_ready { - let tx = self.waiting.remove(&hash).expect(WAITING_PROOF); - became_ready.push(tx); - } - } - } - } - - became_ready - } - - /// Removes transactions for given list of hashes. - /// - /// Returns a list of actually removed transactions. - pub fn remove(&mut self, hashes: &[Hash]) -> Vec>> { - let mut removed = vec![]; - for hash in hashes { - if let Some(waiting_tx) = self.waiting.remove(hash) { - // remove from wanted_tags as well - for tag in waiting_tx.missing_tags { - let remove = if let Some(wanted) = self.wanted_tags.get_mut(&tag) { - wanted.remove(hash); - wanted.is_empty() - } else { false }; - if remove { - self.wanted_tags.remove(&tag); - } - } - // add to result - removed.push(waiting_tx.transaction) - } - } - removed - } - - /// Fold a list of future transactions to compute a single value. - pub fn fold, &WaitingTransaction) -> Option>(&mut self, f: F) -> Option { - self.waiting - .values() - .fold(None, f) - } - - /// Returns iterator over all future transactions - pub fn all(&self) -> impl Iterator> { - self.waiting.values().map(|waiting| &*waiting.transaction) - } - - /// Returns number of transactions in the Future queue. - pub fn len(&self) -> usize { - self.waiting.len() - } - - /// Returns sum of encoding lengths of all transactions in this queue. - pub fn bytes(&self) -> usize { - self.waiting.values().fold(0, |acc, tx| acc + tx.transaction.bytes) - } + /// Import transaction to Future queue. + /// + /// Only transactions that don't have all their tags satisfied should occupy + /// the Future queue. + /// As soon as required tags are provided by some other transactions that are ready + /// we should remove the transactions from here and move them to the Ready queue. + pub fn import(&mut self, tx: WaitingTransaction) { + assert!(!tx.is_ready(), "Transaction is ready."); + assert!( + !self.waiting.contains_key(&tx.transaction.hash), + "Transaction is already imported." + ); + + // Add all tags that are missing + for tag in &tx.missing_tags { + let entry = self + .wanted_tags + .entry(tag.clone()) + .or_insert_with(HashSet::new); + entry.insert(tx.transaction.hash.clone()); + } + + // Add the transaction to a by-hash waiting map + self.waiting.insert(tx.transaction.hash.clone(), tx); + } + + /// Returns true if given hash is part of the queue. + pub fn contains(&self, hash: &Hash) -> bool { + self.waiting.contains_key(hash) + } + + /// Returns a list of known transactions + pub fn by_hash(&self, hashes: &[Hash]) -> Vec>>> { + hashes + .iter() + .map(|h| self.waiting.get(h).map(|x| x.transaction.clone())) + .collect() + } + + /// Satisfies provided tags in transactions that are waiting for them. + /// + /// Returns (and removes) transactions that became ready after their last tag got + /// satisfied and now we can remove them from Future and move to Ready queue. + pub fn satisfy_tags>( + &mut self, + tags: impl IntoIterator, + ) -> Vec> { + let mut became_ready = vec![]; + + for tag in tags { + if let Some(hashes) = self.wanted_tags.remove(tag.as_ref()) { + for hash in hashes { + let is_ready = { + let tx = self.waiting.get_mut(&hash).expect(WAITING_PROOF); + tx.satisfy_tag(tag.as_ref()); + tx.is_ready() + }; + + if is_ready { + let tx = self.waiting.remove(&hash).expect(WAITING_PROOF); + became_ready.push(tx); + } + } + } + } + + became_ready + } + + /// Removes transactions for given list of hashes. + /// + /// Returns a list of actually removed transactions. + pub fn remove(&mut self, hashes: &[Hash]) -> Vec>> { + let mut removed = vec![]; + for hash in hashes { + if let Some(waiting_tx) = self.waiting.remove(hash) { + // remove from wanted_tags as well + for tag in waiting_tx.missing_tags { + let remove = if let Some(wanted) = self.wanted_tags.get_mut(&tag) { + wanted.remove(hash); + wanted.is_empty() + } else { + false + }; + if remove { + self.wanted_tags.remove(&tag); + } + } + // add to result + removed.push(waiting_tx.transaction) + } + } + removed + } + + /// Fold a list of future transactions to compute a single value. + pub fn fold, &WaitingTransaction) -> Option>( + &mut self, + f: F, + ) -> Option { + self.waiting.values().fold(None, f) + } + + /// Returns iterator over all future transactions + pub fn all(&self) -> impl Iterator> { + self.waiting.values().map(|waiting| &*waiting.transaction) + } + + /// Returns number of transactions in the Future queue. + pub fn len(&self) -> usize { + self.waiting.len() + } + + /// Returns sum of encoding lengths of all transactions in this queue. + pub fn bytes(&self) -> usize { + self.waiting + .values() + .fold(0, |acc, tx| acc + tx.transaction.bytes) + } } diff --git a/core/transaction-pool/graph/src/lib.rs b/core/transaction-pool/graph/src/lib.rs index ea890a5cd0..32bb70e794 100644 --- a/core/transaction-pool/graph/src/lib.rs +++ b/core/transaction-pool/graph/src/lib.rs @@ -34,6 +34,9 @@ pub mod base_pool; pub mod error; pub mod watcher; +pub use self::base_pool::{Status, Transaction}; pub use self::error::IntoPoolError; -pub use self::base_pool::{Transaction, Status}; -pub use self::pool::{Pool, Options, ChainApi, EventStream, ExtrinsicFor, BlockHash, ExHash, NumberFor, TransactionFor}; +pub use self::pool::{ + BlockHash, ChainApi, EventStream, ExHash, ExtrinsicFor, NumberFor, Options, Pool, + TransactionFor, +}; diff --git a/core/transaction-pool/graph/src/listener.rs b/core/transaction-pool/graph/src/listener.rs index 335ff8a053..2bfcf2aa23 100644 --- a/core/transaction-pool/graph/src/listener.rs +++ b/core/transaction-pool/graph/src/listener.rs @@ -1,4 +1,3 @@ - // Copyright 2018-2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. @@ -15,84 +14,87 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::{ - collections::HashMap, - hash, -}; -use serde::Serialize; use crate::watcher; -use sr_primitives::traits; use log::warn; +use serde::Serialize; +use sr_primitives::traits; +use std::{collections::HashMap, hash}; /// Extrinsic pool default listener. pub struct Listener { - watchers: HashMap> + watchers: HashMap>, } impl Default for Listener { - fn default() -> Self { - Listener { - watchers: Default::default(), - } - } + fn default() -> Self { + Listener { + watchers: Default::default(), + } + } } impl Listener { - fn fire(&mut self, hash: &H, fun: F) where F: FnOnce(&mut watcher::Sender) { - let clean = if let Some(h) = self.watchers.get_mut(hash) { - fun(h); - h.is_done() - } else { - false - }; - - if clean { - self.watchers.remove(hash); - } - } - - /// Creates a new watcher for given verified extrinsic. - /// - /// The watcher can be used to subscribe to lifecycle events of that extrinsic. - pub fn create_watcher(&mut self, hash: H) -> watcher::Watcher { - let sender = self.watchers.entry(hash.clone()).or_insert_with(watcher::Sender::default); - sender.new_watcher(hash) - } - - /// Notify the listeners about extrinsic broadcast. - pub fn broadcasted(&mut self, hash: &H, peers: Vec) { - self.fire(hash, |watcher| watcher.broadcast(peers)); - } - - /// New transaction was added to the ready pool or promoted from the future pool. - pub fn ready(&mut self, tx: &H, old: Option<&H>) { - self.fire(tx, |watcher| watcher.ready()); - if let Some(old) = old { - self.fire(old, |watcher| watcher.usurped(tx.clone())); - } - } - - /// New transaction was added to the future pool. - pub fn future(&mut self, tx: &H) { - self.fire(tx, |watcher| watcher.future()); - } - - /// Transaction was dropped from the pool because of the limit. - pub fn dropped(&mut self, tx: &H, by: Option<&H>) { - self.fire(tx, |watcher| match by { - Some(t) => watcher.usurped(t.clone()), - None => watcher.dropped(), - }) - } - - /// Transaction was removed as invalid. - pub fn invalid(&mut self, tx: &H) { - warn!(target: "transaction-pool", "Extrinsic invalid: {:?}", tx); - self.fire(tx, |watcher| watcher.invalid()); - } - - /// Transaction was pruned from the pool. - pub fn pruned(&mut self, header_hash: H2, tx: &H) { - self.fire(tx, |watcher| watcher.finalized(header_hash)) - } + fn fire(&mut self, hash: &H, fun: F) + where + F: FnOnce(&mut watcher::Sender), + { + let clean = if let Some(h) = self.watchers.get_mut(hash) { + fun(h); + h.is_done() + } else { + false + }; + + if clean { + self.watchers.remove(hash); + } + } + + /// Creates a new watcher for given verified extrinsic. + /// + /// The watcher can be used to subscribe to lifecycle events of that extrinsic. + pub fn create_watcher(&mut self, hash: H) -> watcher::Watcher { + let sender = self + .watchers + .entry(hash.clone()) + .or_insert_with(watcher::Sender::default); + sender.new_watcher(hash) + } + + /// Notify the listeners about extrinsic broadcast. + pub fn broadcasted(&mut self, hash: &H, peers: Vec) { + self.fire(hash, |watcher| watcher.broadcast(peers)); + } + + /// New transaction was added to the ready pool or promoted from the future pool. + pub fn ready(&mut self, tx: &H, old: Option<&H>) { + self.fire(tx, |watcher| watcher.ready()); + if let Some(old) = old { + self.fire(old, |watcher| watcher.usurped(tx.clone())); + } + } + + /// New transaction was added to the future pool. + pub fn future(&mut self, tx: &H) { + self.fire(tx, |watcher| watcher.future()); + } + + /// Transaction was dropped from the pool because of the limit. + pub fn dropped(&mut self, tx: &H, by: Option<&H>) { + self.fire(tx, |watcher| match by { + Some(t) => watcher.usurped(t.clone()), + None => watcher.dropped(), + }) + } + + /// Transaction was removed as invalid. + pub fn invalid(&mut self, tx: &H) { + warn!(target: "transaction-pool", "Extrinsic invalid: {:?}", tx); + self.fire(tx, |watcher| watcher.invalid()); + } + + /// Transaction was pruned from the pool. + pub fn pruned(&mut self, header_hash: H2, tx: &H) { + self.fire(tx, |watcher| watcher.finalized(header_hash)) + } } diff --git a/core/transaction-pool/graph/src/pool.rs b/core/transaction-pool/graph/src/pool.rs index 91ded26630..d85a02c5d2 100644 --- a/core/transaction-pool/graph/src/pool.rs +++ b/core/transaction-pool/graph/src/pool.rs @@ -15,10 +15,10 @@ // along with Substrate. If not, see . use std::{ - collections::{HashSet, HashMap}, - hash, - sync::Arc, - time, + collections::{HashMap, HashSet}, + hash, + sync::Arc, + time, }; use crate::base_pool as base; @@ -26,16 +26,16 @@ use crate::error; use crate::listener::Listener; use crate::rotator::PoolRotator; use crate::watcher::Watcher; -use serde::Serialize; use error_chain::bail; use log::debug; +use serde::Serialize; use futures::sync::mpsc; use parking_lot::{Mutex, RwLock}; use sr_primitives::{ - generic::BlockId, - traits::{self, As}, - transaction_validity::{TransactionValidity, TransactionTag as Tag}, + generic::BlockId, + traits::{self, As}, + transaction_validity::{TransactionTag as Tag, TransactionValidity}, }; pub use crate::base_pool::Limit; @@ -56,894 +56,1073 @@ pub type TransactionFor = Arc, ExtrinsicFor>>; /// Concrete extrinsic validation and query logic. pub trait ChainApi: Send + Sync { - /// Block type. - type Block: traits::Block; - /// Transaction Hash type - type Hash: hash::Hash + Eq + traits::Member + Serialize; - /// Error type. - type Error: From + error::IntoPoolError; - - /// Verify extrinsic at given block. - fn validate_transaction(&self, at: &BlockId, uxt: ExtrinsicFor) -> Result; - - /// Returns a block number given the block id. - fn block_id_to_number(&self, at: &BlockId) -> Result>, Self::Error>; - - /// Returns a block hash given the block id. - fn block_id_to_hash(&self, at: &BlockId) -> Result>, Self::Error>; - - /// Returns hash and encoding length of the extrinsic. - fn hash_and_length(&self, uxt: &ExtrinsicFor) -> (Self::Hash, usize); + /// Block type. + type Block: traits::Block; + /// Transaction Hash type + type Hash: hash::Hash + Eq + traits::Member + Serialize; + /// Error type. + type Error: From + error::IntoPoolError; + + /// Verify extrinsic at given block. + fn validate_transaction( + &self, + at: &BlockId, + uxt: ExtrinsicFor, + ) -> Result; + + /// Returns a block number given the block id. + fn block_id_to_number( + &self, + at: &BlockId, + ) -> Result>, Self::Error>; + + /// Returns a block hash given the block id. + fn block_id_to_hash( + &self, + at: &BlockId, + ) -> Result>, Self::Error>; + + /// Returns hash and encoding length of the extrinsic. + fn hash_and_length(&self, uxt: &ExtrinsicFor) -> (Self::Hash, usize); } /// Pool configuration options. #[derive(Debug, Clone)] pub struct Options { - /// Ready queue limits. - pub ready: Limit, - /// Future queue limits. - pub future: Limit, + /// Ready queue limits. + pub ready: Limit, + /// Future queue limits. + pub future: Limit, } impl Default for Options { - fn default() -> Self { - Options { - ready: Limit { - count: 512, - total_bytes: 10 * 1024 * 1024, - }, - future: Limit { - count: 128, - total_bytes: 1 * 1024 * 1024, - }, - } - } + fn default() -> Self { + Options { + ready: Limit { + count: 512, + total_bytes: 10 * 1024 * 1024, + }, + future: Limit { + count: 128, + total_bytes: 1 * 1024 * 1024, + }, + } + } } /// Extrinsics pool. pub struct Pool { - api: B, - options: Options, - listener: RwLock, BlockHash>>, - pool: RwLock, - ExtrinsicFor, - >>, - import_notification_sinks: Mutex>>, - rotator: PoolRotator>, + api: B, + options: Options, + listener: RwLock, BlockHash>>, + pool: RwLock, ExtrinsicFor>>, + import_notification_sinks: Mutex>>, + rotator: PoolRotator>, } impl Pool { - /// Imports a bunch of unverified extrinsics to the pool - pub fn submit_at(&self, at: &BlockId, xts: T) -> Result, B::Error>>, B::Error> where - T: IntoIterator> - { - let block_number = self.api.block_id_to_number(at)? - .ok_or_else(|| error::ErrorKind::Msg(format!("Invalid block id: {:?}", at)).into())?; - - let results = xts - .into_iter() - .map(|xt| -> Result<_, B::Error> { - let (hash, bytes) = self.api.hash_and_length(&xt); - if self.rotator.is_banned(&hash) { - bail!(error::Error::from(error::ErrorKind::TemporarilyBanned)) - } - - match self.api.validate_transaction(at, xt.clone())? { - TransactionValidity::Valid { priority, requires, provides, longevity } => { - Ok(base::Transaction { - data: xt, - bytes, - hash, - priority, - requires, - provides, - valid_till: block_number.as_().saturating_add(longevity), - }) - }, - TransactionValidity::Invalid(e) => { - bail!(error::Error::from(error::ErrorKind::InvalidTransaction(e))) - }, - TransactionValidity::Unknown(e) => { - self.listener.write().invalid(&hash); - bail!(error::Error::from(error::ErrorKind::UnknownTransactionValidity(e))) - }, - } - }) - .map(|tx| { - let imported = self.pool.write().import(tx?)?; - - if let base::Imported::Ready { .. } = imported { - self.import_notification_sinks.lock().retain(|sink| sink.unbounded_send(()).is_ok()); - } - - let mut listener = self.listener.write(); - fire_events(&mut *listener, &imported); - Ok(imported.hash().clone()) - }) - .collect::>(); - - let removed = self.enforce_limits(); - - Ok(results.into_iter().map(|res| match res { - Ok(ref hash) if removed.contains(hash) => Err(error::Error::from(error::ErrorKind::ImmediatelyDropped).into()), - other => other, - }).collect()) - } - - fn enforce_limits(&self) -> HashSet> { - let status = self.pool.read().status(); - let ready_limit = &self.options.ready; - let future_limit = &self.options.future; - - debug!(target: "txpool", "Pool Status: {:?}", status); - - if ready_limit.is_exceeded(status.ready, status.ready_bytes) - || future_limit.is_exceeded(status.future, status.future_bytes) { - // clean up the pool - let removed = { - let mut pool = self.pool.write(); - let removed = pool.enforce_limits(ready_limit, future_limit) - .into_iter().map(|x| x.hash.clone()).collect::>(); - // ban all removed transactions - self.rotator.ban(&std::time::Instant::now(), removed.iter().map(|x| x.clone())); - removed - }; - // run notifications - let mut listener = self.listener.write(); - for h in &removed { - listener.dropped(h, None); - } - - removed - } else { - Default::default() - } - } - - /// Imports one unverified extrinsic to the pool - pub fn submit_one(&self, at: &BlockId, xt: ExtrinsicFor) -> Result, B::Error> { - Ok(self.submit_at(at, ::std::iter::once(xt))?.pop().expect("One extrinsic passed; one result returned; qed")?) - } - - /// Import a single extrinsic and starts to watch their progress in the pool. - pub fn submit_and_watch(&self, at: &BlockId, xt: ExtrinsicFor) -> Result, BlockHash>, B::Error> { - let hash = self.api.hash_and_length(&xt).0; - let watcher = self.listener.write().create_watcher(hash); - self.submit_one(at, xt)?; - Ok(watcher) - } - - /// Prunes ready transactions. - /// - /// Used to clear the pool from transactions that were part of recently imported block. - /// To perform pruning we need the tags that each extrinsic provides and to avoid calling - /// into runtime too often we first lookup all extrinsics that are in the pool and get - /// their provided tags from there. Otherwise we query the runtime at the `parent` block. - pub fn prune(&self, at: &BlockId, parent: &BlockId, extrinsics: &[ExtrinsicFor]) -> Result<(), B::Error> { - let mut tags = Vec::with_capacity(extrinsics.len()); - // Get details of all extrinsics that are already in the pool - let hashes = extrinsics.iter().map(|extrinsic| self.api.hash_and_length(extrinsic).0).collect::>(); - let in_pool = self.pool.read().by_hash(&hashes); - { - // Zip the ones from the pool with the full list (we get pairs `(Extrinsic, Option)`) - let all = extrinsics.iter().zip(in_pool.iter()); - - for (extrinsic, existing_in_pool) in all { - match *existing_in_pool { - // reuse the tags for extrinsis that were found in the pool - Some(ref transaction) => { - tags.extend(transaction.provides.iter().cloned()); - }, - // if it's not found in the pool query the runtime at parent block - // to get validity info and tags that the extrinsic provides. - None => { - let validity = self.api.validate_transaction(parent, extrinsic.clone()); - match validity { - Ok(TransactionValidity::Valid { mut provides, .. }) => { - tags.append(&mut provides); - }, - // silently ignore invalid extrinsics, - // cause they might just be inherent - _ => {} - } - }, - } - } - } - - self.prune_tags(at, tags, in_pool.into_iter().filter_map(|x| x).map(|x| x.hash.clone()))?; - - Ok(()) - } - - /// Prunes ready transactions that provide given list of tags. - /// - /// Given tags are assumed to be always provided now, so all transactions - /// in the Future Queue that require that particular tag (and have other - /// requirements satisfied) are promoted to Ready Queue. - /// - /// Moreover for each provided tag we remove transactions in the pool that: - /// 1. Provide that tag directly - /// 2. Are a dependency of pruned transaction. - /// - /// By removing predecessor transactions as well we might actually end up - /// pruning too much, so all removed transactions are reverified against - /// the runtime (`validate_transaction`) to make sure they are invalid. - /// - /// However we avoid revalidating transactions that are contained within - /// the second parameter of `known_imported_hashes`. These transactions - /// (if pruned) are not revalidated and become temporarily banned to - /// prevent importing them in the (near) future. - pub fn prune_tags( - &self, - at: &BlockId, - tags: impl IntoIterator, - known_imported_hashes: impl IntoIterator> + Clone, - ) -> Result<(), B::Error> { - // Perform tag-based pruning in the base pool - let status = self.pool.write().prune_tags(tags); - // Notify event listeners of all transactions - // that were promoted to `Ready` or were dropped. - { - let mut listener = self.listener.write(); - for promoted in &status.promoted { - fire_events(&mut *listener, promoted); - } - for f in &status.failed { - listener.dropped(f, None); - } - } - // make sure that we don't revalidate extrinsics that were part of the recently - // imported block. This is especially important for UTXO-like chains cause the - // inputs are pruned so such transaction would go to future again. - self.rotator.ban(&std::time::Instant::now(), known_imported_hashes.clone().into_iter()); - - // try to re-submit pruned transactions since some of them might be still valid. - // note that `known_imported_hashes` will be rejected here due to temporary ban. - let hashes = status.pruned.iter().map(|tx| tx.hash.clone()).collect::>(); - let results = self.submit_at(at, status.pruned.into_iter().map(|tx| tx.data.clone()))?; - - // Collect the hashes of transactions that now became invalid (meaning that they are succesfuly pruned). - let hashes = results.into_iter().enumerate().filter_map(|(idx, r)| match r.map_err(error::IntoPoolError::into_pool_error) { - Err(Ok(err)) => match err.kind() { - error::ErrorKind::InvalidTransaction(_) => Some(hashes[idx].clone()), - _ => None, - }, - _ => None, - }); - // Fire `pruned` notifications for collected hashes and make sure to include - // `known_imported_hashes` since they were just imported as part of the block. - let hashes = hashes.chain(known_imported_hashes.into_iter()); - { - let header_hash = self.api.block_id_to_hash(at)? - .ok_or_else(|| error::ErrorKind::Msg(format!("Invalid block id: {:?}", at)).into())?; - let mut listener = self.listener.write(); - for h in hashes { - listener.pruned(header_hash, &h); - } - } - // perform regular cleanup of old transactions in the pool - // and update temporary bans. - self.clear_stale(at)?; - Ok(()) - } - - /// Removes stale transactions from the pool. - /// - /// Stale transactions are transaction beyond their longevity period. - /// Note this function does not remove transactions that are already included in the chain. - /// See `prune_tags` ifyou want this. - pub fn clear_stale(&self, at: &BlockId) -> Result<(), B::Error> { - let block_number = self.api.block_id_to_number(at)? - .ok_or_else(|| error::ErrorKind::Msg(format!("Invalid block id: {:?}", at)).into())? - .as_(); - let now = time::Instant::now(); - let to_remove = { - self.ready() - .filter(|tx| self.rotator.ban_if_stale(&now, block_number, &tx)) - .map(|tx| tx.hash.clone()) - .collect::>() - }; - let futures_to_remove: Vec> = { - let p = self.pool.read(); - let mut hashes = Vec::new(); - for tx in p.futures() { - if self.rotator.ban_if_stale(&now, block_number, &tx) { - hashes.push(tx.hash.clone()); - } - } - hashes - }; - // removing old transactions - self.remove_invalid(&to_remove); - self.remove_invalid(&futures_to_remove); - // clear banned transactions timeouts - self.rotator.clear_timeouts(&now); - - Ok(()) - } - - /// Create a new transaction pool. - pub fn new(options: Options, api: B) -> Self { - Pool { - api, - options, - listener: Default::default(), - pool: Default::default(), - import_notification_sinks: Default::default(), - rotator: Default::default(), - } - } - - /// Return an event stream of transactions imported to the pool. - pub fn import_notification_stream(&self) -> EventStream { - let (sink, stream) = mpsc::unbounded(); - self.import_notification_sinks.lock().push(sink); - stream - } - - /// Invoked when extrinsics are broadcasted. - pub fn on_broadcasted(&self, propagated: HashMap, Vec>) { - let mut listener = self.listener.write(); - for (hash, peers) in propagated.into_iter() { - listener.broadcasted(&hash, peers); - } - } - - /// Remove from the pool. - pub fn remove_invalid(&self, hashes: &[ExHash]) -> Vec> { - // temporarily ban invalid transactions - debug!(target: "txpool", "Banning invalid transactions: {:?}", hashes); - self.rotator.ban(&time::Instant::now(), hashes.iter().cloned()); - - let invalid = self.pool.write().remove_invalid(hashes); - - let mut listener = self.listener.write(); - for tx in &invalid { - listener.invalid(&tx.hash); - } - - invalid - } - - /// Get an iterator for ready transactions ordered by priority - pub fn ready(&self) -> impl Iterator> { - self.pool.read().ready() - } - - /// Returns pool status. - pub fn status(&self) -> base::Status { - self.pool.read().status() - } - - /// Returns transaction hash - #[cfg(test)] - fn hash_of(&self, xt: &ExtrinsicFor) -> ExHash { - self.api.hash_and_length(xt).0 - } + /// Imports a bunch of unverified extrinsics to the pool + pub fn submit_at( + &self, + at: &BlockId, + xts: T, + ) -> Result, B::Error>>, B::Error> + where + T: IntoIterator>, + { + let block_number = self + .api + .block_id_to_number(at)? + .ok_or_else(|| error::ErrorKind::Msg(format!("Invalid block id: {:?}", at)).into())?; + + let results = xts + .into_iter() + .map(|xt| -> Result<_, B::Error> { + let (hash, bytes) = self.api.hash_and_length(&xt); + if self.rotator.is_banned(&hash) { + bail!(error::Error::from(error::ErrorKind::TemporarilyBanned)) + } + + match self.api.validate_transaction(at, xt.clone())? { + TransactionValidity::Valid { + priority, + requires, + provides, + longevity, + } => Ok(base::Transaction { + data: xt, + bytes, + hash, + priority, + requires, + provides, + valid_till: block_number.as_().saturating_add(longevity), + }), + TransactionValidity::Invalid(e) => { + bail!(error::Error::from(error::ErrorKind::InvalidTransaction(e))) + } + TransactionValidity::Unknown(e) => { + self.listener.write().invalid(&hash); + bail!(error::Error::from( + error::ErrorKind::UnknownTransactionValidity(e) + )) + } + } + }) + .map(|tx| { + let imported = self.pool.write().import(tx?)?; + + if let base::Imported::Ready { .. } = imported { + self.import_notification_sinks + .lock() + .retain(|sink| sink.unbounded_send(()).is_ok()); + } + + let mut listener = self.listener.write(); + fire_events(&mut *listener, &imported); + Ok(imported.hash().clone()) + }) + .collect::>(); + + let removed = self.enforce_limits(); + + Ok(results + .into_iter() + .map(|res| match res { + Ok(ref hash) if removed.contains(hash) => { + Err(error::Error::from(error::ErrorKind::ImmediatelyDropped).into()) + } + other => other, + }) + .collect()) + } + + fn enforce_limits(&self) -> HashSet> { + let status = self.pool.read().status(); + let ready_limit = &self.options.ready; + let future_limit = &self.options.future; + + debug!(target: "txpool", "Pool Status: {:?}", status); + + if ready_limit.is_exceeded(status.ready, status.ready_bytes) + || future_limit.is_exceeded(status.future, status.future_bytes) + { + // clean up the pool + let removed = { + let mut pool = self.pool.write(); + let removed = pool + .enforce_limits(ready_limit, future_limit) + .into_iter() + .map(|x| x.hash.clone()) + .collect::>(); + // ban all removed transactions + self.rotator.ban( + &std::time::Instant::now(), + removed.iter().map(|x| x.clone()), + ); + removed + }; + // run notifications + let mut listener = self.listener.write(); + for h in &removed { + listener.dropped(h, None); + } + + removed + } else { + Default::default() + } + } + + /// Imports one unverified extrinsic to the pool + pub fn submit_one( + &self, + at: &BlockId, + xt: ExtrinsicFor, + ) -> Result, B::Error> { + Ok(self + .submit_at(at, ::std::iter::once(xt))? + .pop() + .expect("One extrinsic passed; one result returned; qed")?) + } + + /// Import a single extrinsic and starts to watch their progress in the pool. + pub fn submit_and_watch( + &self, + at: &BlockId, + xt: ExtrinsicFor, + ) -> Result, BlockHash>, B::Error> { + let hash = self.api.hash_and_length(&xt).0; + let watcher = self.listener.write().create_watcher(hash); + self.submit_one(at, xt)?; + Ok(watcher) + } + + /// Prunes ready transactions. + /// + /// Used to clear the pool from transactions that were part of recently imported block. + /// To perform pruning we need the tags that each extrinsic provides and to avoid calling + /// into runtime too often we first lookup all extrinsics that are in the pool and get + /// their provided tags from there. Otherwise we query the runtime at the `parent` block. + pub fn prune( + &self, + at: &BlockId, + parent: &BlockId, + extrinsics: &[ExtrinsicFor], + ) -> Result<(), B::Error> { + let mut tags = Vec::with_capacity(extrinsics.len()); + // Get details of all extrinsics that are already in the pool + let hashes = extrinsics + .iter() + .map(|extrinsic| self.api.hash_and_length(extrinsic).0) + .collect::>(); + let in_pool = self.pool.read().by_hash(&hashes); + { + // Zip the ones from the pool with the full list (we get pairs `(Extrinsic, Option)`) + let all = extrinsics.iter().zip(in_pool.iter()); + + for (extrinsic, existing_in_pool) in all { + match *existing_in_pool { + // reuse the tags for extrinsis that were found in the pool + Some(ref transaction) => { + tags.extend(transaction.provides.iter().cloned()); + } + // if it's not found in the pool query the runtime at parent block + // to get validity info and tags that the extrinsic provides. + None => { + let validity = self.api.validate_transaction(parent, extrinsic.clone()); + match validity { + Ok(TransactionValidity::Valid { mut provides, .. }) => { + tags.append(&mut provides); + } + // silently ignore invalid extrinsics, + // cause they might just be inherent + _ => {} + } + } + } + } + } + + self.prune_tags( + at, + tags, + in_pool + .into_iter() + .filter_map(|x| x) + .map(|x| x.hash.clone()), + )?; + + Ok(()) + } + + /// Prunes ready transactions that provide given list of tags. + /// + /// Given tags are assumed to be always provided now, so all transactions + /// in the Future Queue that require that particular tag (and have other + /// requirements satisfied) are promoted to Ready Queue. + /// + /// Moreover for each provided tag we remove transactions in the pool that: + /// 1. Provide that tag directly + /// 2. Are a dependency of pruned transaction. + /// + /// By removing predecessor transactions as well we might actually end up + /// pruning too much, so all removed transactions are reverified against + /// the runtime (`validate_transaction`) to make sure they are invalid. + /// + /// However we avoid revalidating transactions that are contained within + /// the second parameter of `known_imported_hashes`. These transactions + /// (if pruned) are not revalidated and become temporarily banned to + /// prevent importing them in the (near) future. + pub fn prune_tags( + &self, + at: &BlockId, + tags: impl IntoIterator, + known_imported_hashes: impl IntoIterator> + Clone, + ) -> Result<(), B::Error> { + // Perform tag-based pruning in the base pool + let status = self.pool.write().prune_tags(tags); + // Notify event listeners of all transactions + // that were promoted to `Ready` or were dropped. + { + let mut listener = self.listener.write(); + for promoted in &status.promoted { + fire_events(&mut *listener, promoted); + } + for f in &status.failed { + listener.dropped(f, None); + } + } + // make sure that we don't revalidate extrinsics that were part of the recently + // imported block. This is especially important for UTXO-like chains cause the + // inputs are pruned so such transaction would go to future again. + self.rotator.ban( + &std::time::Instant::now(), + known_imported_hashes.clone().into_iter(), + ); + + // try to re-submit pruned transactions since some of them might be still valid. + // note that `known_imported_hashes` will be rejected here due to temporary ban. + let hashes = status + .pruned + .iter() + .map(|tx| tx.hash.clone()) + .collect::>(); + let results = self.submit_at(at, status.pruned.into_iter().map(|tx| tx.data.clone()))?; + + // Collect the hashes of transactions that now became invalid (meaning that they are succesfuly pruned). + let hashes = results.into_iter().enumerate().filter_map(|(idx, r)| { + match r.map_err(error::IntoPoolError::into_pool_error) { + Err(Ok(err)) => match err.kind() { + error::ErrorKind::InvalidTransaction(_) => Some(hashes[idx].clone()), + _ => None, + }, + _ => None, + } + }); + // Fire `pruned` notifications for collected hashes and make sure to include + // `known_imported_hashes` since they were just imported as part of the block. + let hashes = hashes.chain(known_imported_hashes.into_iter()); + { + let header_hash = self.api.block_id_to_hash(at)?.ok_or_else(|| { + error::ErrorKind::Msg(format!("Invalid block id: {:?}", at)).into() + })?; + let mut listener = self.listener.write(); + for h in hashes { + listener.pruned(header_hash, &h); + } + } + // perform regular cleanup of old transactions in the pool + // and update temporary bans. + self.clear_stale(at)?; + Ok(()) + } + + /// Removes stale transactions from the pool. + /// + /// Stale transactions are transaction beyond their longevity period. + /// Note this function does not remove transactions that are already included in the chain. + /// See `prune_tags` ifyou want this. + pub fn clear_stale(&self, at: &BlockId) -> Result<(), B::Error> { + let block_number = self + .api + .block_id_to_number(at)? + .ok_or_else(|| error::ErrorKind::Msg(format!("Invalid block id: {:?}", at)).into())? + .as_(); + let now = time::Instant::now(); + let to_remove = { + self.ready() + .filter(|tx| self.rotator.ban_if_stale(&now, block_number, &tx)) + .map(|tx| tx.hash.clone()) + .collect::>() + }; + let futures_to_remove: Vec> = { + let p = self.pool.read(); + let mut hashes = Vec::new(); + for tx in p.futures() { + if self.rotator.ban_if_stale(&now, block_number, &tx) { + hashes.push(tx.hash.clone()); + } + } + hashes + }; + // removing old transactions + self.remove_invalid(&to_remove); + self.remove_invalid(&futures_to_remove); + // clear banned transactions timeouts + self.rotator.clear_timeouts(&now); + + Ok(()) + } + + /// Create a new transaction pool. + pub fn new(options: Options, api: B) -> Self { + Pool { + api, + options, + listener: Default::default(), + pool: Default::default(), + import_notification_sinks: Default::default(), + rotator: Default::default(), + } + } + + /// Return an event stream of transactions imported to the pool. + pub fn import_notification_stream(&self) -> EventStream { + let (sink, stream) = mpsc::unbounded(); + self.import_notification_sinks.lock().push(sink); + stream + } + + /// Invoked when extrinsics are broadcasted. + pub fn on_broadcasted(&self, propagated: HashMap, Vec>) { + let mut listener = self.listener.write(); + for (hash, peers) in propagated.into_iter() { + listener.broadcasted(&hash, peers); + } + } + + /// Remove from the pool. + pub fn remove_invalid(&self, hashes: &[ExHash]) -> Vec> { + // temporarily ban invalid transactions + debug!(target: "txpool", "Banning invalid transactions: {:?}", hashes); + self.rotator + .ban(&time::Instant::now(), hashes.iter().cloned()); + + let invalid = self.pool.write().remove_invalid(hashes); + + let mut listener = self.listener.write(); + for tx in &invalid { + listener.invalid(&tx.hash); + } + + invalid + } + + /// Get an iterator for ready transactions ordered by priority + pub fn ready(&self) -> impl Iterator> { + self.pool.read().ready() + } + + /// Returns pool status. + pub fn status(&self) -> base::Status { + self.pool.read().status() + } + + /// Returns transaction hash + #[cfg(test)] + fn hash_of(&self, xt: &ExtrinsicFor) -> ExHash { + self.api.hash_and_length(xt).0 + } } -fn fire_events( - listener: &mut Listener, - imported: &base::Imported, -) where - H: hash::Hash + Eq + traits::Member + Serialize, - H2: Clone, +fn fire_events(listener: &mut Listener, imported: &base::Imported) +where + H: hash::Hash + Eq + traits::Member + Serialize, + H2: Clone, { - match *imported { - base::Imported::Ready { ref promoted, ref failed, ref removed, ref hash } => { - listener.ready(hash, None); - for f in failed { - listener.invalid(f); - } - for r in removed { - listener.dropped(&r.hash, Some(hash)); - } - for p in promoted { - listener.ready(p, None); - } - }, - base::Imported::Future { ref hash } => { - listener.future(hash) - }, - } + match *imported { + base::Imported::Ready { + ref promoted, + ref failed, + ref removed, + ref hash, + } => { + listener.ready(hash, None); + for f in failed { + listener.invalid(f); + } + for r in removed { + listener.dropped(&r.hash, Some(hash)); + } + for p in promoted { + listener.ready(p, None); + } + } + base::Imported::Future { ref hash } => listener.future(hash), + } } #[cfg(test)] mod tests { - use super::*; - use futures::Stream; - use parity_codec::Encode; - use test_runtime::{Block, Extrinsic, Transfer, H256, AccountId}; - use assert_matches::assert_matches; - use crate::watcher; - - #[derive(Debug, Default)] - struct TestApi { - delay: Mutex>>, - } - - impl ChainApi for TestApi { - type Block = Block; - type Hash = u64; - type Error = error::Error; - - /// Verify extrinsic at given block. - fn validate_transaction(&self, at: &BlockId, uxt: ExtrinsicFor) -> Result { - - let block_number = self.block_id_to_number(at)?.unwrap(); - let nonce = uxt.transfer().nonce; - - // This is used to control the test flow. - if nonce > 0 { - let opt = self.delay.lock().take(); - if let Some(delay) = opt { - if delay.recv().is_err() { - println!("Error waiting for delay!"); - } - } - } - - if nonce < block_number { - Ok(TransactionValidity::Invalid(0)) - } else { - Ok(TransactionValidity::Valid { - priority: 4, - requires: if nonce > block_number { vec![vec![nonce as u8 - 1]] } else { vec![] }, - provides: vec![vec![nonce as u8]], - longevity: 3, - }) - } - } - - /// Returns a block number given the block id. - fn block_id_to_number(&self, at: &BlockId) -> Result>, Self::Error> { - Ok(match at { - BlockId::Number(num) => Some(*num), - BlockId::Hash(_) => None, - }) - } - - /// Returns a block hash given the block id. - fn block_id_to_hash(&self, at: &BlockId) -> Result>, Self::Error> { - Ok(match at { - BlockId::Number(num) => Some(H256::from_low_u64_be(*num)).into(), - BlockId::Hash(_) => None, - }) - } - - /// Hash the extrinsic. - fn hash_and_length(&self, uxt: &ExtrinsicFor) -> (Self::Hash, usize) { - let len = uxt.encode().len(); - ( - (H256::from(uxt.transfer().from.clone()).to_low_u64_be() << 5) + uxt.transfer().nonce, - len - ) - } - } - - fn uxt(transfer: Transfer) -> Extrinsic { - Extrinsic::Transfer(transfer, Default::default()) - } - - fn pool() -> Pool { - Pool::new(Default::default(), TestApi::default()) - } - - - #[test] - fn should_validate_and_import_transaction() { - // given - let pool = pool(); - - // when - let hash = pool.submit_one(&BlockId::Number(0), uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - })).unwrap(); - - // then - assert_eq!(pool.ready().map(|v| v.hash).collect::>(), vec![hash]); - } - - #[test] - fn should_reject_if_temporarily_banned() { - // given - let pool = pool(); - let uxt = uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }); - - // when - pool.rotator.ban(&time::Instant::now(), vec![pool.hash_of(&uxt)]); - let res = pool.submit_one(&BlockId::Number(0), uxt); - assert_eq!(pool.status().ready, 0); - assert_eq!(pool.status().future, 0); - - // then - assert_matches!(res.unwrap_err().kind(), error::ErrorKind::TemporarilyBanned); - } - - #[test] - fn should_notify_about_pool_events() { - let stream = { - // given - let pool = pool(); - let stream = pool.import_notification_stream(); - - // when - let _hash = pool.submit_one(&BlockId::Number(0), uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - })).unwrap(); - let _hash = pool.submit_one(&BlockId::Number(0), uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - })).unwrap(); - // future doesn't count - let _hash = pool.submit_one(&BlockId::Number(0), uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 3, - })).unwrap(); - - assert_eq!(pool.status().ready, 2); - assert_eq!(pool.status().future, 1); - stream - }; - - // then - let mut it = stream.wait(); - assert_eq!(it.next(), Some(Ok(()))); - assert_eq!(it.next(), Some(Ok(()))); - assert_eq!(it.next(), None); - } - - #[test] - fn should_clear_stale_transactions() { - // given - let pool = pool(); - let hash1 = pool.submit_one(&BlockId::Number(0), uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - })).unwrap(); - let hash2 = pool.submit_one(&BlockId::Number(0), uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - })).unwrap(); - let hash3 = pool.submit_one(&BlockId::Number(0), uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 3, - })).unwrap(); - - // when - pool.clear_stale(&BlockId::Number(5)).unwrap(); - - // then - assert_eq!(pool.ready().count(), 0); - assert_eq!(pool.status().future, 0); - assert_eq!(pool.status().ready, 0); - // make sure they are temporarily banned as well - assert!(pool.rotator.is_banned(&hash1)); - assert!(pool.rotator.is_banned(&hash2)); - assert!(pool.rotator.is_banned(&hash3)); - } - - #[test] - fn should_ban_mined_transactions() { - // given - let pool = pool(); - let hash1 = pool.submit_one(&BlockId::Number(0), uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - })).unwrap(); - - // when - pool.prune_tags(&BlockId::Number(1), vec![vec![0]], vec![hash1.clone()]).unwrap(); - - // then - assert!(pool.rotator.is_banned(&hash1)); - } - - #[test] - fn should_limit_futures() { - // given - let limit = Limit { - count: 100, - total_bytes: 200, - }; - let pool = Pool::new(Options { - ready: limit.clone(), - future: limit.clone(), - }, TestApi::default()); - - let hash1 = pool.submit_one(&BlockId::Number(0), uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - })).unwrap(); - assert_eq!(pool.status().future, 1); - - // when - let hash2 = pool.submit_one(&BlockId::Number(0), uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(2)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 10, - })).unwrap(); - - // then - assert_eq!(pool.status().future, 1); - assert!(pool.rotator.is_banned(&hash1)); - assert!(!pool.rotator.is_banned(&hash2)); - } - - #[test] - fn should_error_if_reject_immediately() { - // given - let limit = Limit { - count: 100, - total_bytes: 10, - }; - let pool = Pool::new(Options { - ready: limit.clone(), - future: limit.clone(), - }, TestApi::default()); - - // when - pool.submit_one(&BlockId::Number(0), uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - })).unwrap_err(); - - // then - assert_eq!(pool.status().ready, 0); - assert_eq!(pool.status().future, 0); - } - - - mod listener { - use super::*; - - #[test] - fn should_trigger_ready_and_finalized() { - // given - let pool = pool(); - let watcher = pool.submit_and_watch(&BlockId::Number(0), uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - })).unwrap(); - assert_eq!(pool.status().ready, 1); - assert_eq!(pool.status().future, 0); - - // when - pool.prune_tags(&BlockId::Number(2), vec![vec![0u8]], vec![]).unwrap(); - assert_eq!(pool.status().ready, 0); - assert_eq!(pool.status().future, 0); - - // then - let mut stream = watcher.into_stream().wait(); - assert_eq!(stream.next(), Some(Ok(watcher::Status::Ready))); - assert_eq!(stream.next(), Some(Ok(watcher::Status::Finalized(H256::from_low_u64_be(2).into())))); - assert_eq!(stream.next(), None); - } - - #[test] - fn should_trigger_ready_and_finalized_when_pruning_via_hash() { - // given - let pool = pool(); - let watcher = pool.submit_and_watch(&BlockId::Number(0), uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - })).unwrap(); - assert_eq!(pool.status().ready, 1); - assert_eq!(pool.status().future, 0); - - // when - pool.prune_tags(&BlockId::Number(2), vec![vec![0u8]], vec![2u64]).unwrap(); - assert_eq!(pool.status().ready, 0); - assert_eq!(pool.status().future, 0); - - // then - let mut stream = watcher.into_stream().wait(); - assert_eq!(stream.next(), Some(Ok(watcher::Status::Ready))); - assert_eq!(stream.next(), Some(Ok(watcher::Status::Finalized(H256::from_low_u64_be(2).into())))); - assert_eq!(stream.next(), None); - } - - #[test] - fn should_trigger_future_and_ready_after_promoted() { - // given - let pool = pool(); - let watcher = pool.submit_and_watch(&BlockId::Number(0), uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - })).unwrap(); - assert_eq!(pool.status().ready, 0); - assert_eq!(pool.status().future, 1); - - // when - pool.submit_one(&BlockId::Number(0), uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - })).unwrap(); - assert_eq!(pool.status().ready, 2); - - // then - let mut stream = watcher.into_stream().wait(); - assert_eq!(stream.next(), Some(Ok(watcher::Status::Future))); - assert_eq!(stream.next(), Some(Ok(watcher::Status::Ready))); - } - - #[test] - fn should_trigger_invalid_and_ban() { - // given - let pool = pool(); - let uxt = uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }); - let watcher = pool.submit_and_watch(&BlockId::Number(0), uxt).unwrap(); - assert_eq!(pool.status().ready, 1); - - // when - pool.remove_invalid(&[*watcher.hash()]); - - - // then - let mut stream = watcher.into_stream().wait(); - assert_eq!(stream.next(), Some(Ok(watcher::Status::Ready))); - assert_eq!(stream.next(), Some(Ok(watcher::Status::Invalid))); - assert_eq!(stream.next(), None); - } - - #[test] - fn should_trigger_broadcasted() { - // given - let pool = pool(); - let uxt = uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }); - let watcher = pool.submit_and_watch(&BlockId::Number(0), uxt).unwrap(); - assert_eq!(pool.status().ready, 1); - - // when - let mut map = HashMap::new(); - let peers = vec!["a".into(), "b".into(), "c".into()]; - map.insert(*watcher.hash(), peers.clone()); - pool.on_broadcasted(map); - - - // then - let mut stream = watcher.into_stream().wait(); - assert_eq!(stream.next(), Some(Ok(watcher::Status::Ready))); - assert_eq!(stream.next(), Some(Ok(watcher::Status::Broadcast(peers)))); - } - - #[test] - fn should_trigger_dropped() { - // given - let limit = Limit { - count: 1, - total_bytes: 1000, - }; - let pool = Pool::new(Options { - ready: limit.clone(), - future: limit.clone(), - }, TestApi::default()); - - let xt = uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }); - let watcher = pool.submit_and_watch(&BlockId::Number(0), xt).unwrap(); - assert_eq!(pool.status().ready, 1); - - // when - let xt = uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(2)), - to: AccountId::from_h256(H256::from_low_u64_be(1)), - amount: 4, - nonce: 1, - }); - pool.submit_one(&BlockId::Number(1), xt).unwrap(); - assert_eq!(pool.status().ready, 1); - - // then - let mut stream = watcher.into_stream().wait(); - assert_eq!(stream.next(), Some(Ok(watcher::Status::Ready))); - assert_eq!(stream.next(), Some(Ok(watcher::Status::Dropped))); - } - - #[test] - fn should_handle_pruning_in_the_middle_of_import() { - let _ = env_logger::try_init(); - // given - let (ready, is_ready) = std::sync::mpsc::sync_channel(0); - let (tx, rx) = std::sync::mpsc::sync_channel(1); - let mut api = TestApi::default(); - api.delay = Mutex::new(rx.into()); - let pool = Arc::new(Pool::new(Default::default(), api)); - - // when - let xt = uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }); - - // This transaction should go to future, since we use `nonce: 1` - let pool2 = pool.clone(); - std::thread::spawn(move || { - pool2.submit_one(&BlockId::Number(0), xt).unwrap(); - ready.send(()).unwrap(); - }); - - // But now before the previous one is imported we import - // the one that it depends on. - let xt = uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 4, - nonce: 0, - }); - // The tag the above transaction provides (TestApi is using just nonce as u8) - let provides = vec![0_u8]; - pool.submit_one(&BlockId::Number(0), xt).unwrap(); - assert_eq!(pool.status().ready, 1); - - // Now block import happens before the second transaction is able to finish verification. - pool.prune_tags(&BlockId::Number(1), vec![provides], vec![]).unwrap(); - assert_eq!(pool.status().ready, 0); - - - // so when we release the verification of the previous one it will have - // something in `requires`, but should go to ready directly, since the previous transaction was imported - // correctly. - tx.send(()).unwrap(); - - // then - is_ready.recv().unwrap(); // wait for finish - assert_eq!(pool.status().ready, 1); - assert_eq!(pool.status().future, 0); - } - } + use super::*; + use crate::watcher; + use assert_matches::assert_matches; + use futures::Stream; + use parity_codec::Encode; + use test_runtime::{AccountId, Block, Extrinsic, Transfer, H256}; + + #[derive(Debug, Default)] + struct TestApi { + delay: Mutex>>, + } + + impl ChainApi for TestApi { + type Block = Block; + type Hash = u64; + type Error = error::Error; + + /// Verify extrinsic at given block. + fn validate_transaction( + &self, + at: &BlockId, + uxt: ExtrinsicFor, + ) -> Result { + let block_number = self.block_id_to_number(at)?.unwrap(); + let nonce = uxt.transfer().nonce; + + // This is used to control the test flow. + if nonce > 0 { + let opt = self.delay.lock().take(); + if let Some(delay) = opt { + if delay.recv().is_err() { + println!("Error waiting for delay!"); + } + } + } + + if nonce < block_number { + Ok(TransactionValidity::Invalid(0)) + } else { + Ok(TransactionValidity::Valid { + priority: 4, + requires: if nonce > block_number { + vec![vec![nonce as u8 - 1]] + } else { + vec![] + }, + provides: vec![vec![nonce as u8]], + longevity: 3, + }) + } + } + + /// Returns a block number given the block id. + fn block_id_to_number( + &self, + at: &BlockId, + ) -> Result>, Self::Error> { + Ok(match at { + BlockId::Number(num) => Some(*num), + BlockId::Hash(_) => None, + }) + } + + /// Returns a block hash given the block id. + fn block_id_to_hash( + &self, + at: &BlockId, + ) -> Result>, Self::Error> { + Ok(match at { + BlockId::Number(num) => Some(H256::from_low_u64_be(*num)).into(), + BlockId::Hash(_) => None, + }) + } + + /// Hash the extrinsic. + fn hash_and_length(&self, uxt: &ExtrinsicFor) -> (Self::Hash, usize) { + let len = uxt.encode().len(); + ( + (H256::from(uxt.transfer().from.clone()).to_low_u64_be() << 5) + + uxt.transfer().nonce, + len, + ) + } + } + + fn uxt(transfer: Transfer) -> Extrinsic { + Extrinsic::Transfer(transfer, Default::default()) + } + + fn pool() -> Pool { + Pool::new(Default::default(), TestApi::default()) + } + + #[test] + fn should_validate_and_import_transaction() { + // given + let pool = pool(); + + // when + let hash = pool + .submit_one( + &BlockId::Number(0), + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + ) + .unwrap(); + + // then + assert_eq!(pool.ready().map(|v| v.hash).collect::>(), vec![hash]); + } + + #[test] + fn should_reject_if_temporarily_banned() { + // given + let pool = pool(); + let uxt = uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }); + + // when + pool.rotator + .ban(&time::Instant::now(), vec![pool.hash_of(&uxt)]); + let res = pool.submit_one(&BlockId::Number(0), uxt); + assert_eq!(pool.status().ready, 0); + assert_eq!(pool.status().future, 0); + + // then + assert_matches!(res.unwrap_err().kind(), error::ErrorKind::TemporarilyBanned); + } + + #[test] + fn should_notify_about_pool_events() { + let stream = { + // given + let pool = pool(); + let stream = pool.import_notification_stream(); + + // when + let _hash = pool + .submit_one( + &BlockId::Number(0), + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + ) + .unwrap(); + let _hash = pool + .submit_one( + &BlockId::Number(0), + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }), + ) + .unwrap(); + // future doesn't count + let _hash = pool + .submit_one( + &BlockId::Number(0), + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 3, + }), + ) + .unwrap(); + + assert_eq!(pool.status().ready, 2); + assert_eq!(pool.status().future, 1); + stream + }; + + // then + let mut it = stream.wait(); + assert_eq!(it.next(), Some(Ok(()))); + assert_eq!(it.next(), Some(Ok(()))); + assert_eq!(it.next(), None); + } + + #[test] + fn should_clear_stale_transactions() { + // given + let pool = pool(); + let hash1 = pool + .submit_one( + &BlockId::Number(0), + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + ) + .unwrap(); + let hash2 = pool + .submit_one( + &BlockId::Number(0), + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }), + ) + .unwrap(); + let hash3 = pool + .submit_one( + &BlockId::Number(0), + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 3, + }), + ) + .unwrap(); + + // when + pool.clear_stale(&BlockId::Number(5)).unwrap(); + + // then + assert_eq!(pool.ready().count(), 0); + assert_eq!(pool.status().future, 0); + assert_eq!(pool.status().ready, 0); + // make sure they are temporarily banned as well + assert!(pool.rotator.is_banned(&hash1)); + assert!(pool.rotator.is_banned(&hash2)); + assert!(pool.rotator.is_banned(&hash3)); + } + + #[test] + fn should_ban_mined_transactions() { + // given + let pool = pool(); + let hash1 = pool + .submit_one( + &BlockId::Number(0), + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + ) + .unwrap(); + + // when + pool.prune_tags(&BlockId::Number(1), vec![vec![0]], vec![hash1.clone()]) + .unwrap(); + + // then + assert!(pool.rotator.is_banned(&hash1)); + } + + #[test] + fn should_limit_futures() { + // given + let limit = Limit { + count: 100, + total_bytes: 200, + }; + let pool = Pool::new( + Options { + ready: limit.clone(), + future: limit.clone(), + }, + TestApi::default(), + ); + + let hash1 = pool + .submit_one( + &BlockId::Number(0), + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }), + ) + .unwrap(); + assert_eq!(pool.status().future, 1); + + // when + let hash2 = pool + .submit_one( + &BlockId::Number(0), + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(2)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 10, + }), + ) + .unwrap(); + + // then + assert_eq!(pool.status().future, 1); + assert!(pool.rotator.is_banned(&hash1)); + assert!(!pool.rotator.is_banned(&hash2)); + } + + #[test] + fn should_error_if_reject_immediately() { + // given + let limit = Limit { + count: 100, + total_bytes: 10, + }; + let pool = Pool::new( + Options { + ready: limit.clone(), + future: limit.clone(), + }, + TestApi::default(), + ); + + // when + pool.submit_one( + &BlockId::Number(0), + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }), + ) + .unwrap_err(); + + // then + assert_eq!(pool.status().ready, 0); + assert_eq!(pool.status().future, 0); + } + + mod listener { + use super::*; + + #[test] + fn should_trigger_ready_and_finalized() { + // given + let pool = pool(); + let watcher = pool + .submit_and_watch( + &BlockId::Number(0), + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + ) + .unwrap(); + assert_eq!(pool.status().ready, 1); + assert_eq!(pool.status().future, 0); + + // when + pool.prune_tags(&BlockId::Number(2), vec![vec![0u8]], vec![]) + .unwrap(); + assert_eq!(pool.status().ready, 0); + assert_eq!(pool.status().future, 0); + + // then + let mut stream = watcher.into_stream().wait(); + assert_eq!(stream.next(), Some(Ok(watcher::Status::Ready))); + assert_eq!( + stream.next(), + Some(Ok(watcher::Status::Finalized( + H256::from_low_u64_be(2).into() + ))) + ); + assert_eq!(stream.next(), None); + } + + #[test] + fn should_trigger_ready_and_finalized_when_pruning_via_hash() { + // given + let pool = pool(); + let watcher = pool + .submit_and_watch( + &BlockId::Number(0), + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + ) + .unwrap(); + assert_eq!(pool.status().ready, 1); + assert_eq!(pool.status().future, 0); + + // when + pool.prune_tags(&BlockId::Number(2), vec![vec![0u8]], vec![2u64]) + .unwrap(); + assert_eq!(pool.status().ready, 0); + assert_eq!(pool.status().future, 0); + + // then + let mut stream = watcher.into_stream().wait(); + assert_eq!(stream.next(), Some(Ok(watcher::Status::Ready))); + assert_eq!( + stream.next(), + Some(Ok(watcher::Status::Finalized( + H256::from_low_u64_be(2).into() + ))) + ); + assert_eq!(stream.next(), None); + } + + #[test] + fn should_trigger_future_and_ready_after_promoted() { + // given + let pool = pool(); + let watcher = pool + .submit_and_watch( + &BlockId::Number(0), + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }), + ) + .unwrap(); + assert_eq!(pool.status().ready, 0); + assert_eq!(pool.status().future, 1); + + // when + pool.submit_one( + &BlockId::Number(0), + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + ) + .unwrap(); + assert_eq!(pool.status().ready, 2); + + // then + let mut stream = watcher.into_stream().wait(); + assert_eq!(stream.next(), Some(Ok(watcher::Status::Future))); + assert_eq!(stream.next(), Some(Ok(watcher::Status::Ready))); + } + + #[test] + fn should_trigger_invalid_and_ban() { + // given + let pool = pool(); + let uxt = uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }); + let watcher = pool.submit_and_watch(&BlockId::Number(0), uxt).unwrap(); + assert_eq!(pool.status().ready, 1); + + // when + pool.remove_invalid(&[*watcher.hash()]); + + // then + let mut stream = watcher.into_stream().wait(); + assert_eq!(stream.next(), Some(Ok(watcher::Status::Ready))); + assert_eq!(stream.next(), Some(Ok(watcher::Status::Invalid))); + assert_eq!(stream.next(), None); + } + + #[test] + fn should_trigger_broadcasted() { + // given + let pool = pool(); + let uxt = uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }); + let watcher = pool.submit_and_watch(&BlockId::Number(0), uxt).unwrap(); + assert_eq!(pool.status().ready, 1); + + // when + let mut map = HashMap::new(); + let peers = vec!["a".into(), "b".into(), "c".into()]; + map.insert(*watcher.hash(), peers.clone()); + pool.on_broadcasted(map); + + // then + let mut stream = watcher.into_stream().wait(); + assert_eq!(stream.next(), Some(Ok(watcher::Status::Ready))); + assert_eq!(stream.next(), Some(Ok(watcher::Status::Broadcast(peers)))); + } + + #[test] + fn should_trigger_dropped() { + // given + let limit = Limit { + count: 1, + total_bytes: 1000, + }; + let pool = Pool::new( + Options { + ready: limit.clone(), + future: limit.clone(), + }, + TestApi::default(), + ); + + let xt = uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }); + let watcher = pool.submit_and_watch(&BlockId::Number(0), xt).unwrap(); + assert_eq!(pool.status().ready, 1); + + // when + let xt = uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(2)), + to: AccountId::from_h256(H256::from_low_u64_be(1)), + amount: 4, + nonce: 1, + }); + pool.submit_one(&BlockId::Number(1), xt).unwrap(); + assert_eq!(pool.status().ready, 1); + + // then + let mut stream = watcher.into_stream().wait(); + assert_eq!(stream.next(), Some(Ok(watcher::Status::Ready))); + assert_eq!(stream.next(), Some(Ok(watcher::Status::Dropped))); + } + + #[test] + fn should_handle_pruning_in_the_middle_of_import() { + let _ = env_logger::try_init(); + // given + let (ready, is_ready) = std::sync::mpsc::sync_channel(0); + let (tx, rx) = std::sync::mpsc::sync_channel(1); + let mut api = TestApi::default(); + api.delay = Mutex::new(rx.into()); + let pool = Arc::new(Pool::new(Default::default(), api)); + + // when + let xt = uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }); + + // This transaction should go to future, since we use `nonce: 1` + let pool2 = pool.clone(); + std::thread::spawn(move || { + pool2.submit_one(&BlockId::Number(0), xt).unwrap(); + ready.send(()).unwrap(); + }); + + // But now before the previous one is imported we import + // the one that it depends on. + let xt = uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 4, + nonce: 0, + }); + // The tag the above transaction provides (TestApi is using just nonce as u8) + let provides = vec![0_u8]; + pool.submit_one(&BlockId::Number(0), xt).unwrap(); + assert_eq!(pool.status().ready, 1); + + // Now block import happens before the second transaction is able to finish verification. + pool.prune_tags(&BlockId::Number(1), vec![provides], vec![]) + .unwrap(); + assert_eq!(pool.status().ready, 0); + + // so when we release the verification of the previous one it will have + // something in `requires`, but should go to ready directly, since the previous transaction was imported + // correctly. + tx.send(()).unwrap(); + + // then + is_ready.recv().unwrap(); // wait for finish + assert_eq!(pool.status().ready, 1); + assert_eq!(pool.status().future, 0); + } + } } diff --git a/core/transaction-pool/graph/src/ready.rs b/core/transaction-pool/graph/src/ready.rs index befb1b60cc..a87551df27 100644 --- a/core/transaction-pool/graph/src/ready.rs +++ b/core/transaction-pool/graph/src/ready.rs @@ -15,87 +15,92 @@ // along with Substrate. If not, see . use std::{ - collections::{HashMap, HashSet, BTreeSet}, - cmp, - hash, - sync::Arc, + cmp, + collections::{BTreeSet, HashMap, HashSet}, + hash, + sync::Arc, }; -use serde::Serialize; -use log::debug; use error_chain::bail; +use log::debug; use parking_lot::RwLock; +use serde::Serialize; use sr_primitives::traits::Member; -use sr_primitives::transaction_validity::{ - TransactionTag as Tag, -}; +use sr_primitives::transaction_validity::TransactionTag as Tag; +use crate::base_pool::Transaction; use crate::error; use crate::future::WaitingTransaction; -use crate::base_pool::Transaction; /// An in-pool transaction reference. /// /// Should be cheap to clone. #[derive(Debug)] pub struct TransactionRef { - /// The actual transaction data. - pub transaction: Arc>, - /// Unique id when transaction was inserted into the pool. - pub insertion_id: u64, + /// The actual transaction data. + pub transaction: Arc>, + /// Unique id when transaction was inserted into the pool. + pub insertion_id: u64, } impl Clone for TransactionRef { - fn clone(&self) -> Self { - TransactionRef { - transaction: self.transaction.clone(), - insertion_id: self.insertion_id, - } - } + fn clone(&self) -> Self { + TransactionRef { + transaction: self.transaction.clone(), + insertion_id: self.insertion_id, + } + } } impl Ord for TransactionRef { - fn cmp(&self, other: &Self) -> cmp::Ordering { - self.transaction.priority.cmp(&other.transaction.priority) - .then(other.transaction.valid_till.cmp(&self.transaction.valid_till)) - .then(other.insertion_id.cmp(&self.insertion_id)) - } + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.transaction + .priority + .cmp(&other.transaction.priority) + .then( + other + .transaction + .valid_till + .cmp(&self.transaction.valid_till), + ) + .then(other.insertion_id.cmp(&self.insertion_id)) + } } impl PartialOrd for TransactionRef { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } } impl PartialEq for TransactionRef { - fn eq(&self, other: &Self) -> bool { - self.cmp(other) == cmp::Ordering::Equal - } + fn eq(&self, other: &Self) -> bool { + self.cmp(other) == cmp::Ordering::Equal + } } impl Eq for TransactionRef {} #[derive(Debug)] pub struct ReadyTx { - /// A reference to a transaction - pub transaction: TransactionRef, - /// A list of transactions that get unlocked by this one - pub unlocks: Vec, - /// How many required tags are provided inherently - /// - /// Some transactions might be already pruned from the queue, - /// so when we compute ready set we may consider this transactions ready earlier. - pub requires_offset: usize, + /// A reference to a transaction + pub transaction: TransactionRef, + /// A list of transactions that get unlocked by this one + pub unlocks: Vec, + /// How many required tags are provided inherently + /// + /// Some transactions might be already pruned from the queue, + /// so when we compute ready set we may consider this transactions ready earlier. + pub requires_offset: usize, } impl Clone for ReadyTx { - fn clone(&self) -> Self { - ReadyTx { - transaction: self.transaction.clone(), - unlocks: self.unlocks.clone(), - requires_offset: self.requires_offset, - } - } + fn clone(&self) -> Self { + ReadyTx { + transaction: self.transaction.clone(), + unlocks: self.unlocks.clone(), + requires_offset: self.requires_offset, + } + } } const HASH_READY: &str = r#" @@ -107,518 +112,542 @@ qed #[derive(Debug)] pub struct ReadyTransactions { - /// Insertion id - insertion_id: u64, - /// tags that are provided by Ready transactions - provided_tags: HashMap, - /// Transactions that are ready (i.e. don't have any requirements external to the pool) - ready: Arc>>>, - /// Best transactions that are ready to be included to the block without any other previous transaction. - best: BTreeSet>, + /// Insertion id + insertion_id: u64, + /// tags that are provided by Ready transactions + provided_tags: HashMap, + /// Transactions that are ready (i.e. don't have any requirements external to the pool) + ready: Arc>>>, + /// Best transactions that are ready to be included to the block without any other previous transaction. + best: BTreeSet>, } impl Default for ReadyTransactions { - fn default() -> Self { - ReadyTransactions { - insertion_id: Default::default(), - provided_tags: Default::default(), - ready: Default::default(), - best: Default::default(), - } - } + fn default() -> Self { + ReadyTransactions { + insertion_id: Default::default(), + provided_tags: Default::default(), + ready: Default::default(), + best: Default::default(), + } + } } impl ReadyTransactions { - /// Borrows a map of tags that are provided by transactions in this queue. - pub fn provided_tags(&self) -> &HashMap { - &self.provided_tags - } - - /// Returns an iterator of ready transactions. - /// - /// Transactions are returned in order: - /// 1. First by the dependencies: - /// - never return transaction that requires a tag, which was not provided by one of the previously returned transactions - /// 2. Then by priority: - /// - If there are two transactions with all requirements satisfied the one with higher priority goes first. - /// 3. Then by the ttl that's left - /// - transactions that are valid for a shorter time go first - /// 4. Lastly we sort by the time in the queue - /// - transactions that are longer in the queue go first - pub fn get(&self) -> impl Iterator>> { - BestIterator { - all: self.ready.clone(), - best: self.best.clone(), - awaiting: Default::default(), - } - } - - /// Imports transactions to the pool of ready transactions. - /// - /// The transaction needs to have all tags satisfied (be ready) by transactions - /// that are in this queue. - /// Returns transactions that were replaced by the one imported. - pub fn import( - &mut self, - tx: WaitingTransaction, - ) -> error::Result>>> { - assert!(tx.is_ready(), "Only ready transactions can be imported."); - assert!(!self.ready.read().contains_key(&tx.transaction.hash), "Transaction is already imported."); - - self.insertion_id += 1; - let insertion_id = self.insertion_id; - let hash = tx.transaction.hash.clone(); - let transaction = tx.transaction; - - let replaced = self.replace_previous(&transaction)?; - - let mut goes_to_best = true; - let mut ready = self.ready.write(); - // Add links to transactions that unlock the current one - for tag in &transaction.requires { - // Check if the transaction that satisfies the tag is still in the queue. - if let Some(other) = self.provided_tags.get(tag) { - let tx = ready.get_mut(other).expect(HASH_READY); - tx.unlocks.push(hash.clone()); - // this transaction depends on some other, so it doesn't go to best directly. - goes_to_best = false; - } - } - - // update provided_tags - for tag in &transaction.provides { - self.provided_tags.insert(tag.clone(), hash.clone()); - } - - let transaction = TransactionRef { - insertion_id, - transaction - }; - - // insert to best if it doesn't require any other transaction to be included before it - if goes_to_best { - self.best.insert(transaction.clone()); - } - - // insert to Ready - ready.insert(hash, ReadyTx { - transaction, - unlocks: vec![], - requires_offset: 0, - }); - - Ok(replaced) - } - - /// Fold a list of ready transactions to compute a single value. - pub fn fold, &ReadyTx) -> Option>(&mut self, f: F) -> Option { - self.ready - .read() - .values() - .fold(None, f) - } - - /// Returns true if given hash is part of the queue. - pub fn contains(&self, hash: &Hash) -> bool { - self.ready.read().contains_key(hash) - } - - /// Retrieve transaction by hash - pub fn by_hash(&self, hashes: &[Hash]) -> Vec>>> { - let ready = self.ready.read(); - hashes.iter().map(|hash| { - ready.get(hash).map(|x| x.transaction.transaction.clone()) - }).collect() - } - - /// Removes invalid transactions from the ready pool. - /// - /// NOTE removing a transaction will also cause a removal of all transactions that depend on that one - /// (i.e. the entire subgraph that this transaction is a start of will be removed). - /// All removed transactions are returned. - pub fn remove_invalid(&mut self, hashes: &[Hash]) -> Vec>> { - let mut removed = vec![]; - let mut to_remove = hashes.iter().cloned().collect::>(); - - let mut ready = self.ready.write(); - loop { - let hash = match to_remove.pop() { - Some(hash) => hash, - None => return removed, - }; - - if let Some(mut tx) = ready.remove(&hash) { - // remove entries from provided_tags - for tag in &tx.transaction.transaction.provides { - self.provided_tags.remove(tag); - } - // remove from unlocks - for tag in &tx.transaction.transaction.requires { - if let Some(hash) = self.provided_tags.get(tag) { - if let Some(tx) = ready.get_mut(hash) { - remove_item(&mut tx.unlocks, &hash); - } - } - } - - // remove from best - self.best.remove(&tx.transaction); - - // remove all transactions that the current one unlocks - to_remove.append(&mut tx.unlocks); - - // add to removed - debug!(target: "txpool", "[{:?}] Removed as invalid: ", hash); - removed.push(tx.transaction.transaction); - } - } - } - - /// Removes transactions that provide given tag. - /// - /// All transactions that lead to a transaction, which provides this tag - /// are going to be removed from the queue, but no other transactions are touched - - /// i.e. all other subgraphs starting from given tag are still considered valid & ready. - pub fn prune_tags(&mut self, tag: Tag) -> Vec>> { - let mut removed = vec![]; - let mut to_remove = vec![tag]; - - loop { - let tag = match to_remove.pop() { - Some(tag) => tag, - None => return removed, - }; - - let res = self.provided_tags.remove(&tag) - .and_then(|hash| self.ready.write().remove(&hash)); - - if let Some(tx) = res { - let unlocks = tx.unlocks; - let tx = tx.transaction.transaction; - - // prune previous transactions as well - { - let hash = &tx.hash; - let mut ready = self.ready.write(); - let mut find_previous = |tag| -> Option> { - let prev_hash = self.provided_tags.get(tag)?; - let tx2 = ready.get_mut(&prev_hash)?; - remove_item(&mut tx2.unlocks, hash); - // We eagerly prune previous transactions as well. - // But it might not always be good. - // Possible edge case: - // - tx provides two tags - // - the second tag enables some subgraph we don't know of yet - // - we will prune the transaction - // - when we learn about the subgraph it will go to future - // - we will have to wait for re-propagation of that transaction - // Alternatively the caller may attempt to re-import these transactions. - if tx2.unlocks.is_empty() { - Some(tx2.transaction.transaction.provides.clone()) - } else { - None - } - }; - - // find previous transactions - for tag in &tx.requires { - if let Some(mut tags_to_remove) = find_previous(tag) { - to_remove.append(&mut tags_to_remove); - } - } - } - - // add the transactions that just got unlocked to `best` - for hash in unlocks { - if let Some(tx) = self.ready.write().get_mut(&hash) { - tx.requires_offset += 1; - // this transaction is ready - if tx.requires_offset == tx.transaction.transaction.requires.len() { - self.best.insert(tx.transaction.clone()); - } - } - } - - debug!(target: "txpool", "[{:?}] Pruned.", tx.hash); - removed.push(tx); - } - } - } - - /// Checks if the transaction is providing the same tags as other transactions. - /// - /// In case that's true it determines if the priority of transactions that - /// we are about to replace is lower than the priority of the replacement transaction. - /// We remove/replace old transactions in case they have lower priority. - /// - /// In case replacement is succesful returns a list of removed transactions. - fn replace_previous(&mut self, tx: &Transaction) -> error::Result>>> { - let mut to_remove = { - // check if we are replacing a transaction - let replace_hashes = tx.provides - .iter() - .filter_map(|tag| self.provided_tags.get(tag)) - .collect::>(); - - // early exit if we are not replacing anything. - if replace_hashes.is_empty() { - return Ok(vec![]); - } - - // now check if collective priority is lower than the replacement transaction. - let old_priority = { - let ready = self.ready.read(); - replace_hashes - .iter() - .filter_map(|hash| ready.get(hash)) - .fold(0u64, |total, tx| total.saturating_add(tx.transaction.transaction.priority)) - }; - - // bail - the transaction has too low priority to replace the old ones - if old_priority >= tx.priority { - bail!(error::ErrorKind::TooLowPriority(old_priority, tx.priority)) - } - - replace_hashes.into_iter().cloned().collect::>() - }; - - let new_provides = tx.provides.iter().cloned().collect::>(); - let mut removed = vec![]; - loop { - let hash = match to_remove.pop() { - Some(hash) => hash, - None => return Ok(removed), - }; - - let tx = self.ready.write().remove(&hash).expect(HASH_READY); - // check if this transaction provides stuff that is not provided by the new one. - let (mut unlocks, tx) = (tx.unlocks, tx.transaction.transaction); - { - let invalidated = tx.provides - .iter() - .filter(|tag| !new_provides.contains(&**tag)); - - for tag in invalidated { - // remove the tag since it's no longer provided by any transaction - self.provided_tags.remove(tag); - // add more transactions to remove - to_remove.append(&mut unlocks); - } - } - - removed.push(tx); - } - } - - /// Returns number of transactions in this queue. - pub fn len(&self) -> usize { - self.ready.read().len() - } - - /// Returns sum of encoding lengths of all transactions in this queue. - pub fn bytes(&self) -> usize { - self.ready.read().values().fold(0, |acc, tx| acc + tx.transaction.transaction.bytes) - } + /// Borrows a map of tags that are provided by transactions in this queue. + pub fn provided_tags(&self) -> &HashMap { + &self.provided_tags + } + + /// Returns an iterator of ready transactions. + /// + /// Transactions are returned in order: + /// 1. First by the dependencies: + /// - never return transaction that requires a tag, which was not provided by one of the previously returned transactions + /// 2. Then by priority: + /// - If there are two transactions with all requirements satisfied the one with higher priority goes first. + /// 3. Then by the ttl that's left + /// - transactions that are valid for a shorter time go first + /// 4. Lastly we sort by the time in the queue + /// - transactions that are longer in the queue go first + pub fn get(&self) -> impl Iterator>> { + BestIterator { + all: self.ready.clone(), + best: self.best.clone(), + awaiting: Default::default(), + } + } + + /// Imports transactions to the pool of ready transactions. + /// + /// The transaction needs to have all tags satisfied (be ready) by transactions + /// that are in this queue. + /// Returns transactions that were replaced by the one imported. + pub fn import( + &mut self, + tx: WaitingTransaction, + ) -> error::Result>>> { + assert!(tx.is_ready(), "Only ready transactions can be imported."); + assert!( + !self.ready.read().contains_key(&tx.transaction.hash), + "Transaction is already imported." + ); + + self.insertion_id += 1; + let insertion_id = self.insertion_id; + let hash = tx.transaction.hash.clone(); + let transaction = tx.transaction; + + let replaced = self.replace_previous(&transaction)?; + + let mut goes_to_best = true; + let mut ready = self.ready.write(); + // Add links to transactions that unlock the current one + for tag in &transaction.requires { + // Check if the transaction that satisfies the tag is still in the queue. + if let Some(other) = self.provided_tags.get(tag) { + let tx = ready.get_mut(other).expect(HASH_READY); + tx.unlocks.push(hash.clone()); + // this transaction depends on some other, so it doesn't go to best directly. + goes_to_best = false; + } + } + + // update provided_tags + for tag in &transaction.provides { + self.provided_tags.insert(tag.clone(), hash.clone()); + } + + let transaction = TransactionRef { + insertion_id, + transaction, + }; + + // insert to best if it doesn't require any other transaction to be included before it + if goes_to_best { + self.best.insert(transaction.clone()); + } + + // insert to Ready + ready.insert( + hash, + ReadyTx { + transaction, + unlocks: vec![], + requires_offset: 0, + }, + ); + + Ok(replaced) + } + + /// Fold a list of ready transactions to compute a single value. + pub fn fold, &ReadyTx) -> Option>( + &mut self, + f: F, + ) -> Option { + self.ready.read().values().fold(None, f) + } + + /// Returns true if given hash is part of the queue. + pub fn contains(&self, hash: &Hash) -> bool { + self.ready.read().contains_key(hash) + } + + /// Retrieve transaction by hash + pub fn by_hash(&self, hashes: &[Hash]) -> Vec>>> { + let ready = self.ready.read(); + hashes + .iter() + .map(|hash| ready.get(hash).map(|x| x.transaction.transaction.clone())) + .collect() + } + + /// Removes invalid transactions from the ready pool. + /// + /// NOTE removing a transaction will also cause a removal of all transactions that depend on that one + /// (i.e. the entire subgraph that this transaction is a start of will be removed). + /// All removed transactions are returned. + pub fn remove_invalid(&mut self, hashes: &[Hash]) -> Vec>> { + let mut removed = vec![]; + let mut to_remove = hashes.iter().cloned().collect::>(); + + let mut ready = self.ready.write(); + loop { + let hash = match to_remove.pop() { + Some(hash) => hash, + None => return removed, + }; + + if let Some(mut tx) = ready.remove(&hash) { + // remove entries from provided_tags + for tag in &tx.transaction.transaction.provides { + self.provided_tags.remove(tag); + } + // remove from unlocks + for tag in &tx.transaction.transaction.requires { + if let Some(hash) = self.provided_tags.get(tag) { + if let Some(tx) = ready.get_mut(hash) { + remove_item(&mut tx.unlocks, &hash); + } + } + } + + // remove from best + self.best.remove(&tx.transaction); + + // remove all transactions that the current one unlocks + to_remove.append(&mut tx.unlocks); + + // add to removed + debug!(target: "txpool", "[{:?}] Removed as invalid: ", hash); + removed.push(tx.transaction.transaction); + } + } + } + + /// Removes transactions that provide given tag. + /// + /// All transactions that lead to a transaction, which provides this tag + /// are going to be removed from the queue, but no other transactions are touched - + /// i.e. all other subgraphs starting from given tag are still considered valid & ready. + pub fn prune_tags(&mut self, tag: Tag) -> Vec>> { + let mut removed = vec![]; + let mut to_remove = vec![tag]; + + loop { + let tag = match to_remove.pop() { + Some(tag) => tag, + None => return removed, + }; + + let res = self + .provided_tags + .remove(&tag) + .and_then(|hash| self.ready.write().remove(&hash)); + + if let Some(tx) = res { + let unlocks = tx.unlocks; + let tx = tx.transaction.transaction; + + // prune previous transactions as well + { + let hash = &tx.hash; + let mut ready = self.ready.write(); + let mut find_previous = |tag| -> Option> { + let prev_hash = self.provided_tags.get(tag)?; + let tx2 = ready.get_mut(&prev_hash)?; + remove_item(&mut tx2.unlocks, hash); + // We eagerly prune previous transactions as well. + // But it might not always be good. + // Possible edge case: + // - tx provides two tags + // - the second tag enables some subgraph we don't know of yet + // - we will prune the transaction + // - when we learn about the subgraph it will go to future + // - we will have to wait for re-propagation of that transaction + // Alternatively the caller may attempt to re-import these transactions. + if tx2.unlocks.is_empty() { + Some(tx2.transaction.transaction.provides.clone()) + } else { + None + } + }; + + // find previous transactions + for tag in &tx.requires { + if let Some(mut tags_to_remove) = find_previous(tag) { + to_remove.append(&mut tags_to_remove); + } + } + } + + // add the transactions that just got unlocked to `best` + for hash in unlocks { + if let Some(tx) = self.ready.write().get_mut(&hash) { + tx.requires_offset += 1; + // this transaction is ready + if tx.requires_offset == tx.transaction.transaction.requires.len() { + self.best.insert(tx.transaction.clone()); + } + } + } + + debug!(target: "txpool", "[{:?}] Pruned.", tx.hash); + removed.push(tx); + } + } + } + + /// Checks if the transaction is providing the same tags as other transactions. + /// + /// In case that's true it determines if the priority of transactions that + /// we are about to replace is lower than the priority of the replacement transaction. + /// We remove/replace old transactions in case they have lower priority. + /// + /// In case replacement is succesful returns a list of removed transactions. + fn replace_previous( + &mut self, + tx: &Transaction, + ) -> error::Result>>> { + let mut to_remove = { + // check if we are replacing a transaction + let replace_hashes = tx + .provides + .iter() + .filter_map(|tag| self.provided_tags.get(tag)) + .collect::>(); + + // early exit if we are not replacing anything. + if replace_hashes.is_empty() { + return Ok(vec![]); + } + + // now check if collective priority is lower than the replacement transaction. + let old_priority = { + let ready = self.ready.read(); + replace_hashes + .iter() + .filter_map(|hash| ready.get(hash)) + .fold(0u64, |total, tx| { + total.saturating_add(tx.transaction.transaction.priority) + }) + }; + + // bail - the transaction has too low priority to replace the old ones + if old_priority >= tx.priority { + bail!(error::ErrorKind::TooLowPriority(old_priority, tx.priority)) + } + + replace_hashes.into_iter().cloned().collect::>() + }; + + let new_provides = tx.provides.iter().cloned().collect::>(); + let mut removed = vec![]; + loop { + let hash = match to_remove.pop() { + Some(hash) => hash, + None => return Ok(removed), + }; + + let tx = self.ready.write().remove(&hash).expect(HASH_READY); + // check if this transaction provides stuff that is not provided by the new one. + let (mut unlocks, tx) = (tx.unlocks, tx.transaction.transaction); + { + let invalidated = tx + .provides + .iter() + .filter(|tag| !new_provides.contains(&**tag)); + + for tag in invalidated { + // remove the tag since it's no longer provided by any transaction + self.provided_tags.remove(tag); + // add more transactions to remove + to_remove.append(&mut unlocks); + } + } + + removed.push(tx); + } + } + + /// Returns number of transactions in this queue. + pub fn len(&self) -> usize { + self.ready.read().len() + } + + /// Returns sum of encoding lengths of all transactions in this queue. + pub fn bytes(&self) -> usize { + self.ready + .read() + .values() + .fold(0, |acc, tx| acc + tx.transaction.transaction.bytes) + } } pub struct BestIterator { - all: Arc>>>, - awaiting: HashMap)>, - best: BTreeSet>, + all: Arc>>>, + awaiting: HashMap)>, + best: BTreeSet>, } impl BestIterator { - /// Depending on number of satisfied requirements insert given ref - /// either to awaiting set or to best set. - fn best_or_awaiting(&mut self, satisfied: usize, tx_ref: TransactionRef) { - if satisfied == tx_ref.transaction.requires.len() { - // If we have satisfied all deps insert to best - self.best.insert(tx_ref); - - } else { - // otherwise we're still awaiting for some deps - self.awaiting.insert(tx_ref.transaction.hash.clone(), (satisfied, tx_ref)); - } - } + /// Depending on number of satisfied requirements insert given ref + /// either to awaiting set or to best set. + fn best_or_awaiting(&mut self, satisfied: usize, tx_ref: TransactionRef) { + if satisfied == tx_ref.transaction.requires.len() { + // If we have satisfied all deps insert to best + self.best.insert(tx_ref); + } else { + // otherwise we're still awaiting for some deps + self.awaiting + .insert(tx_ref.transaction.hash.clone(), (satisfied, tx_ref)); + } + } } impl Iterator for BestIterator { - type Item = Arc>; - - fn next(&mut self) -> Option { - loop { - let best = self.best.iter().next_back()?.clone(); - let best = self.best.take(&best)?; - - let next = self.all.read().get(&best.transaction.hash).cloned(); - let ready = match next { - Some(ready) => ready, - // The transaction is not in all, maybe it was removed in the meantime? - None => continue, - }; - - // Insert transactions that just got unlocked. - for hash in &ready.unlocks { - // first check local awaiting transactions - let res = if let Some((mut satisfied, tx_ref)) = self.awaiting.remove(hash) { - satisfied += 1; - Some((satisfied, tx_ref)) - // then get from the pool - } else if let Some(next) = self.all.read().get(hash) { - Some((next.requires_offset + 1, next.transaction.clone())) - } else { - None - }; - - if let Some((satisfied, tx_ref)) = res { - self.best_or_awaiting(satisfied, tx_ref) - } - } - - return Some(best.transaction.clone()) - } - } + type Item = Arc>; + + fn next(&mut self) -> Option { + loop { + let best = self.best.iter().next_back()?.clone(); + let best = self.best.take(&best)?; + + let next = self.all.read().get(&best.transaction.hash).cloned(); + let ready = match next { + Some(ready) => ready, + // The transaction is not in all, maybe it was removed in the meantime? + None => continue, + }; + + // Insert transactions that just got unlocked. + for hash in &ready.unlocks { + // first check local awaiting transactions + let res = if let Some((mut satisfied, tx_ref)) = self.awaiting.remove(hash) { + satisfied += 1; + Some((satisfied, tx_ref)) + // then get from the pool + } else if let Some(next) = self.all.read().get(hash) { + Some((next.requires_offset + 1, next.transaction.clone())) + } else { + None + }; + + if let Some((satisfied, tx_ref)) = res { + self.best_or_awaiting(satisfied, tx_ref) + } + } + + return Some(best.transaction.clone()); + } + } } // See: https://github.com/rust-lang/rust/issues/40062 fn remove_item(vec: &mut Vec, item: &T) { - if let Some(idx) = vec.iter().position(|i| i == item) { - vec.swap_remove(idx); - } + if let Some(idx) = vec.iter().position(|i| i == item) { + vec.swap_remove(idx); + } } #[cfg(test)] mod tests { - use super::*; - - fn tx(id: u8) -> Transaction> { - Transaction { - data: vec![id], - bytes: 1, - hash: id as u64, - priority: 1, - valid_till: 2, - requires: vec![vec![1], vec![2]], - provides: vec![vec![3], vec![4]], - } - } - - #[test] - fn should_replace_transaction_that_provides_the_same_tag() { - // given - let mut ready = ReadyTransactions::default(); - let mut tx1 = tx(1); - tx1.requires.clear(); - let mut tx2 = tx(2); - tx2.requires.clear(); - tx2.provides = vec![vec![3]]; - let mut tx3 = tx(3); - tx3.requires.clear(); - tx3.provides = vec![vec![4]]; - - // when - let x = WaitingTransaction::new(tx2, &ready.provided_tags(), &[]); - ready.import(x).unwrap(); - let x = WaitingTransaction::new(tx3, &ready.provided_tags(), &[]); - ready.import(x).unwrap(); - assert_eq!(ready.get().count(), 2); - - // too low priority - let x = WaitingTransaction::new(tx1.clone(), &ready.provided_tags(), &[]); - ready.import(x).unwrap_err(); - - tx1.priority = 10; - let x = WaitingTransaction::new(tx1.clone(), &ready.provided_tags(), &[]); - ready.import(x).unwrap(); - - // then - assert_eq!(ready.get().count(), 1); - } - - - #[test] - fn should_return_best_transactions_in_correct_order() { - // given - let mut ready = ReadyTransactions::default(); - let mut tx1 = tx(1); - tx1.requires.clear(); - let mut tx2 = tx(2); - tx2.requires = tx1.provides.clone(); - tx2.provides = vec![vec![106]]; - let mut tx3 = tx(3); - tx3.requires = vec![tx1.provides[0].clone(), vec![106]]; - tx3.provides = vec![]; - let mut tx4 = tx(4); - tx4.requires = vec![tx1.provides[0].clone()]; - tx4.provides = vec![]; - let tx5 = Transaction { - data: vec![5], - bytes: 1, - hash: 5, - priority: 1, - valid_till: u64::max_value(), // use the max_value() here for testing. - requires: vec![tx1.provides[0].clone()], - provides: vec![], - }; - - // when - let x = WaitingTransaction::new(tx1, &ready.provided_tags(), &[]); - ready.import(x).unwrap(); - let x = WaitingTransaction::new(tx2, &ready.provided_tags(), &[]); - ready.import(x).unwrap(); - let x = WaitingTransaction::new(tx3, &ready.provided_tags(), &[]); - ready.import(x).unwrap(); - let x = WaitingTransaction::new(tx4, &ready.provided_tags(), &[]); - ready.import(x).unwrap(); - let x = WaitingTransaction::new(tx5, &ready.provided_tags(), &[]); - ready.import(x).unwrap(); - - // then - assert_eq!(ready.best.len(), 1); - - let mut it = ready.get().map(|tx| tx.data[0]); - - assert_eq!(it.next(), Some(1)); - assert_eq!(it.next(), Some(2)); - assert_eq!(it.next(), Some(3)); - assert_eq!(it.next(), Some(4)); - assert_eq!(it.next(), Some(5)); - assert_eq!(it.next(), None); - } - - #[test] - fn should_order_refs() { - let mut id = 1; - let mut with_priority = |priority, longevity| { - id += 1; - let mut tx = tx(id); - tx.priority = priority; - tx.valid_till = longevity; - tx - }; - // higher priority = better - assert!(TransactionRef { - transaction: Arc::new(with_priority(3, 3)), - insertion_id: 1, - } > TransactionRef { - transaction: Arc::new(with_priority(2, 3)), - insertion_id: 2, - }); - // lower validity = better - assert!(TransactionRef { - transaction: Arc::new(with_priority(3, 2)), - insertion_id: 1, - } > TransactionRef { - transaction: Arc::new(with_priority(3, 3)), - insertion_id: 2, - }); - // lower insertion_id = better - assert!(TransactionRef { - transaction: Arc::new(with_priority(3, 3)), - insertion_id: 1, - } > TransactionRef { - transaction: Arc::new(with_priority(3, 3)), - insertion_id: 2, - }); - } + use super::*; + + fn tx(id: u8) -> Transaction> { + Transaction { + data: vec![id], + bytes: 1, + hash: id as u64, + priority: 1, + valid_till: 2, + requires: vec![vec![1], vec![2]], + provides: vec![vec![3], vec![4]], + } + } + + #[test] + fn should_replace_transaction_that_provides_the_same_tag() { + // given + let mut ready = ReadyTransactions::default(); + let mut tx1 = tx(1); + tx1.requires.clear(); + let mut tx2 = tx(2); + tx2.requires.clear(); + tx2.provides = vec![vec![3]]; + let mut tx3 = tx(3); + tx3.requires.clear(); + tx3.provides = vec![vec![4]]; + + // when + let x = WaitingTransaction::new(tx2, &ready.provided_tags(), &[]); + ready.import(x).unwrap(); + let x = WaitingTransaction::new(tx3, &ready.provided_tags(), &[]); + ready.import(x).unwrap(); + assert_eq!(ready.get().count(), 2); + + // too low priority + let x = WaitingTransaction::new(tx1.clone(), &ready.provided_tags(), &[]); + ready.import(x).unwrap_err(); + + tx1.priority = 10; + let x = WaitingTransaction::new(tx1.clone(), &ready.provided_tags(), &[]); + ready.import(x).unwrap(); + + // then + assert_eq!(ready.get().count(), 1); + } + + #[test] + fn should_return_best_transactions_in_correct_order() { + // given + let mut ready = ReadyTransactions::default(); + let mut tx1 = tx(1); + tx1.requires.clear(); + let mut tx2 = tx(2); + tx2.requires = tx1.provides.clone(); + tx2.provides = vec![vec![106]]; + let mut tx3 = tx(3); + tx3.requires = vec![tx1.provides[0].clone(), vec![106]]; + tx3.provides = vec![]; + let mut tx4 = tx(4); + tx4.requires = vec![tx1.provides[0].clone()]; + tx4.provides = vec![]; + let tx5 = Transaction { + data: vec![5], + bytes: 1, + hash: 5, + priority: 1, + valid_till: u64::max_value(), // use the max_value() here for testing. + requires: vec![tx1.provides[0].clone()], + provides: vec![], + }; + + // when + let x = WaitingTransaction::new(tx1, &ready.provided_tags(), &[]); + ready.import(x).unwrap(); + let x = WaitingTransaction::new(tx2, &ready.provided_tags(), &[]); + ready.import(x).unwrap(); + let x = WaitingTransaction::new(tx3, &ready.provided_tags(), &[]); + ready.import(x).unwrap(); + let x = WaitingTransaction::new(tx4, &ready.provided_tags(), &[]); + ready.import(x).unwrap(); + let x = WaitingTransaction::new(tx5, &ready.provided_tags(), &[]); + ready.import(x).unwrap(); + + // then + assert_eq!(ready.best.len(), 1); + + let mut it = ready.get().map(|tx| tx.data[0]); + + assert_eq!(it.next(), Some(1)); + assert_eq!(it.next(), Some(2)); + assert_eq!(it.next(), Some(3)); + assert_eq!(it.next(), Some(4)); + assert_eq!(it.next(), Some(5)); + assert_eq!(it.next(), None); + } + + #[test] + fn should_order_refs() { + let mut id = 1; + let mut with_priority = |priority, longevity| { + id += 1; + let mut tx = tx(id); + tx.priority = priority; + tx.valid_till = longevity; + tx + }; + // higher priority = better + assert!( + TransactionRef { + transaction: Arc::new(with_priority(3, 3)), + insertion_id: 1, + } > TransactionRef { + transaction: Arc::new(with_priority(2, 3)), + insertion_id: 2, + } + ); + // lower validity = better + assert!( + TransactionRef { + transaction: Arc::new(with_priority(3, 2)), + insertion_id: 1, + } > TransactionRef { + transaction: Arc::new(with_priority(3, 3)), + insertion_id: 2, + } + ); + // lower insertion_id = better + assert!( + TransactionRef { + transaction: Arc::new(with_priority(3, 3)), + insertion_id: 1, + } > TransactionRef { + transaction: Arc::new(with_priority(3, 3)), + insertion_id: 2, + } + ); + } } diff --git a/core/transaction-pool/graph/src/rotator.rs b/core/transaction-pool/graph/src/rotator.rs index 2ca51ef74e..b91084bf19 100644 --- a/core/transaction-pool/graph/src/rotator.rs +++ b/core/transaction-pool/graph/src/rotator.rs @@ -19,13 +19,12 @@ //! Keeps only recent extrinsic and discard the ones kept for a significant amount of time. //! Discarded extrinsics are banned so that they don't get re-imported again. +use parking_lot::RwLock; use std::{ - collections::HashMap, - hash, - iter, - time::{Duration, Instant}, + collections::HashMap, + hash, iter, + time::{Duration, Instant}, }; -use parking_lot::RwLock; use crate::base_pool::Transaction; @@ -37,173 +36,176 @@ const EXPECTED_SIZE: usize = 2048; /// Extrinsics that occupy the pool for too long are culled and temporarily banned from entering /// the pool again. pub struct PoolRotator { - /// How long the extrinsic is banned for. - ban_time: Duration, - /// Currently banned extrinsics. - banned_until: RwLock>, + /// How long the extrinsic is banned for. + ban_time: Duration, + /// Currently banned extrinsics. + banned_until: RwLock>, } impl Default for PoolRotator { - fn default() -> Self { - PoolRotator { - ban_time: Duration::from_secs(60 * 30), - banned_until: Default::default(), - } - } + fn default() -> Self { + PoolRotator { + ban_time: Duration::from_secs(60 * 30), + banned_until: Default::default(), + } + } } impl PoolRotator { - /// Returns `true` if extrinsic hash is currently banned. - pub fn is_banned(&self, hash: &Hash) -> bool { - self.banned_until.read().contains_key(hash) - } - - /// Bans given set of hashes. - pub fn ban(&self, now: &Instant, hashes: impl IntoIterator) { - let mut banned = self.banned_until.write(); - - for hash in hashes { - banned.insert(hash, *now + self.ban_time); - } - - if banned.len() > 2 * EXPECTED_SIZE { - while banned.len() > EXPECTED_SIZE { - if let Some(key) = banned.keys().next().cloned() { - banned.remove(&key); - } - } - } - } - - - /// Bans extrinsic if it's stale. - /// - /// Returns `true` if extrinsic is stale and got banned. - pub fn ban_if_stale(&self, now: &Instant, current_block: u64, xt: &Transaction) -> bool { - if xt.valid_till > current_block { - return false; - } - - self.ban(now, iter::once(xt.hash.clone())); - true - } - - /// Removes timed bans. - pub fn clear_timeouts(&self, now: &Instant) { - let mut banned = self.banned_until.write(); - - banned.retain(|_, &mut v| v >= *now); - } + /// Returns `true` if extrinsic hash is currently banned. + pub fn is_banned(&self, hash: &Hash) -> bool { + self.banned_until.read().contains_key(hash) + } + + /// Bans given set of hashes. + pub fn ban(&self, now: &Instant, hashes: impl IntoIterator) { + let mut banned = self.banned_until.write(); + + for hash in hashes { + banned.insert(hash, *now + self.ban_time); + } + + if banned.len() > 2 * EXPECTED_SIZE { + while banned.len() > EXPECTED_SIZE { + if let Some(key) = banned.keys().next().cloned() { + banned.remove(&key); + } + } + } + } + + /// Bans extrinsic if it's stale. + /// + /// Returns `true` if extrinsic is stale and got banned. + pub fn ban_if_stale( + &self, + now: &Instant, + current_block: u64, + xt: &Transaction, + ) -> bool { + if xt.valid_till > current_block { + return false; + } + + self.ban(now, iter::once(xt.hash.clone())); + true + } + + /// Removes timed bans. + pub fn clear_timeouts(&self, now: &Instant) { + let mut banned = self.banned_until.write(); + + banned.retain(|_, &mut v| v >= *now); + } } #[cfg(test)] mod tests { - use super::*; - - type Hash = u64; - type Ex = (); - - fn rotator() -> PoolRotator { - PoolRotator { - ban_time: Duration::from_millis(10), - ..Default::default() - } - } - - fn tx() -> (Hash, Transaction) { - let hash = 5u64; - let tx = Transaction { - data: (), - bytes: 1, - hash: hash.clone(), - priority: 5, - valid_till: 1, - requires: vec![], - provides: vec![], - }; - - (hash, tx) - } - - #[test] - fn should_not_ban_if_not_stale() { - // given - let (hash, tx) = tx(); - let rotator = rotator(); - assert!(!rotator.is_banned(&hash)); - let now = Instant::now(); - let past_block = 0; - - // when - assert!(!rotator.ban_if_stale(&now, past_block, &tx)); - - // then - assert!(!rotator.is_banned(&hash)); - } - - #[test] - fn should_ban_stale_extrinsic() { - // given - let (hash, tx) = tx(); - let rotator = rotator(); - assert!(!rotator.is_banned(&hash)); - - // when - assert!(rotator.ban_if_stale(&Instant::now(), 1, &tx)); - - // then - assert!(rotator.is_banned(&hash)); - } - - - #[test] - fn should_clear_banned() { - // given - let (hash, tx) = tx(); - let rotator = rotator(); - assert!(rotator.ban_if_stale(&Instant::now(), 1, &tx)); - assert!(rotator.is_banned(&hash)); - - // when - let future = Instant::now() + rotator.ban_time + rotator.ban_time; - rotator.clear_timeouts(&future); - - // then - assert!(!rotator.is_banned(&hash)); - } - - #[test] - fn should_garbage_collect() { - // given - fn tx_with(i: u64, valid_till: u64) -> Transaction { - let hash = i; - Transaction { - data: (), - bytes: 2, - hash, - priority: 5, - valid_till, - requires: vec![], - provides: vec![], - } - } - - let rotator = rotator(); - - let now = Instant::now(); - let past_block = 0; - - // when - for i in 0..2*EXPECTED_SIZE { - let tx = tx_with(i as u64, past_block); - assert!(rotator.ban_if_stale(&now, past_block, &tx)); - } - assert_eq!(rotator.banned_until.read().len(), 2*EXPECTED_SIZE); - - // then - let tx = tx_with(2*EXPECTED_SIZE as u64, past_block); - // trigger a garbage collection - assert!(rotator.ban_if_stale(&now, past_block, &tx)); - assert_eq!(rotator.banned_until.read().len(), EXPECTED_SIZE); - } + use super::*; + + type Hash = u64; + type Ex = (); + + fn rotator() -> PoolRotator { + PoolRotator { + ban_time: Duration::from_millis(10), + ..Default::default() + } + } + + fn tx() -> (Hash, Transaction) { + let hash = 5u64; + let tx = Transaction { + data: (), + bytes: 1, + hash: hash.clone(), + priority: 5, + valid_till: 1, + requires: vec![], + provides: vec![], + }; + + (hash, tx) + } + + #[test] + fn should_not_ban_if_not_stale() { + // given + let (hash, tx) = tx(); + let rotator = rotator(); + assert!(!rotator.is_banned(&hash)); + let now = Instant::now(); + let past_block = 0; + + // when + assert!(!rotator.ban_if_stale(&now, past_block, &tx)); + + // then + assert!(!rotator.is_banned(&hash)); + } + + #[test] + fn should_ban_stale_extrinsic() { + // given + let (hash, tx) = tx(); + let rotator = rotator(); + assert!(!rotator.is_banned(&hash)); + + // when + assert!(rotator.ban_if_stale(&Instant::now(), 1, &tx)); + + // then + assert!(rotator.is_banned(&hash)); + } + + #[test] + fn should_clear_banned() { + // given + let (hash, tx) = tx(); + let rotator = rotator(); + assert!(rotator.ban_if_stale(&Instant::now(), 1, &tx)); + assert!(rotator.is_banned(&hash)); + + // when + let future = Instant::now() + rotator.ban_time + rotator.ban_time; + rotator.clear_timeouts(&future); + + // then + assert!(!rotator.is_banned(&hash)); + } + + #[test] + fn should_garbage_collect() { + // given + fn tx_with(i: u64, valid_till: u64) -> Transaction { + let hash = i; + Transaction { + data: (), + bytes: 2, + hash, + priority: 5, + valid_till, + requires: vec![], + provides: vec![], + } + } + + let rotator = rotator(); + + let now = Instant::now(); + let past_block = 0; + + // when + for i in 0..2 * EXPECTED_SIZE { + let tx = tx_with(i as u64, past_block); + assert!(rotator.ban_if_stale(&now, past_block, &tx)); + } + assert_eq!(rotator.banned_until.read().len(), 2 * EXPECTED_SIZE); + + // then + let tx = tx_with(2 * EXPECTED_SIZE as u64, past_block); + // trigger a garbage collection + assert!(rotator.ban_if_stale(&now, past_block, &tx)); + assert_eq!(rotator.banned_until.read().len(), EXPECTED_SIZE); + } } diff --git a/core/transaction-pool/graph/src/watcher.rs b/core/transaction-pool/graph/src/watcher.rs index 5516d8c43c..77a8d77e9a 100644 --- a/core/transaction-pool/graph/src/watcher.rs +++ b/core/transaction-pool/graph/src/watcher.rs @@ -16,30 +16,27 @@ //! Extrinsics status updates. -use futures::{ - Stream, - sync::mpsc, -}; -use serde_derive::{Serialize, Deserialize}; +use futures::{sync::mpsc, Stream}; +use serde_derive::{Deserialize, Serialize}; /// Possible extrinsic status events #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum Status { - /// Extrinsic is part of the future queue. - Future, - /// Extrinsic is part of the ready queue. - Ready, - /// Extrinsic has been finalized in block with given hash. - Finalized(H2), - /// Some state change (perhaps another extrinsic was included) rendered this extrinsic invalid. - Usurped(H), - /// The extrinsic has been broadcast to the given peers. - Broadcast(Vec), - /// Extrinsic has been dropped from the pool because of the limit. - Dropped, - /// Extrinsic was detected as invalid. - Invalid, + /// Extrinsic is part of the future queue. + Future, + /// Extrinsic is part of the ready queue. + Ready, + /// Extrinsic has been finalized in block with given hash. + Finalized(H2), + /// Some state change (perhaps another extrinsic was included) rendered this extrinsic invalid. + Usurped(H), + /// The extrinsic has been broadcast to the given peers. + Broadcast(Vec), + /// Extrinsic has been dropped from the pool because of the limit. + Dropped, + /// Extrinsic was detected as invalid. + Invalid, } /// Extrinsic watcher. @@ -47,97 +44,94 @@ pub enum Status { /// Represents a stream of status updates for particular extrinsic. #[derive(Debug)] pub struct Watcher { - receiver: mpsc::UnboundedReceiver>, - hash: H, + receiver: mpsc::UnboundedReceiver>, + hash: H, } impl Watcher { - /// Returns the transaction hash. - pub fn hash(&self) -> &H { - &self.hash - } - - /// Pipe the notifications to given sink. - /// - /// Make sure to drive the future to completion. - pub fn into_stream(self) -> impl Stream, Error=()> { - // we can safely ignore the error here, `UnboundedReceiver` never fails. - self.receiver.map_err(|_| ()) - } + /// Returns the transaction hash. + pub fn hash(&self) -> &H { + &self.hash + } + + /// Pipe the notifications to given sink. + /// + /// Make sure to drive the future to completion. + pub fn into_stream(self) -> impl Stream, Error = ()> { + // we can safely ignore the error here, `UnboundedReceiver` never fails. + self.receiver.map_err(|_| ()) + } } /// Sender part of the watcher. Exposed only for testing purposes. #[derive(Debug)] pub struct Sender { - receivers: Vec>>, - finalized: bool, + receivers: Vec>>, + finalized: bool, } impl Default for Sender { - fn default() -> Self { - Sender { - receivers: Default::default(), - finalized: false, - } - } + fn default() -> Self { + Sender { + receivers: Default::default(), + finalized: false, + } + } } impl Sender { - /// Add a new watcher to this sender object. - pub fn new_watcher(&mut self, hash: H) -> Watcher { - let (tx, receiver) = mpsc::unbounded(); - self.receivers.push(tx); - Watcher { - receiver, - hash, - } - } - - /// Transaction became ready. - pub fn ready(&mut self) { - self.send(Status::Ready) - } - - /// Transaction was moved to future. - pub fn future(&mut self) { - self.send(Status::Future) - } - - /// Some state change (perhaps another extrinsic was included) rendered this extrinsic invalid. - pub fn usurped(&mut self, hash: H) { - self.send(Status::Usurped(hash)) - } - - /// Extrinsic has been finalized in block with given hash. - pub fn finalized(&mut self, hash: H2) { - self.send(Status::Finalized(hash)); - self.finalized = true; - } - - /// Extrinsic has been marked as invalid by the block builder. - pub fn invalid(&mut self) { - self.send(Status::Invalid); - // we mark as finalized as there are no more notifications - self.finalized = true; - } - - /// Transaction has been dropped from the pool because of the limit. - pub fn dropped(&mut self) { - self.send(Status::Dropped); - } - - /// The extrinsic has been broadcast to the given peers. - pub fn broadcast(&mut self, peers: Vec) { - self.send(Status::Broadcast(peers)) - } - - - /// Returns true if the are no more listeners for this extrinsic or it was finalized. - pub fn is_done(&self) -> bool { - self.finalized || self.receivers.is_empty() - } - - fn send(&mut self, status: Status) { - self.receivers.retain(|sender| sender.unbounded_send(status.clone()).is_ok()) - } + /// Add a new watcher to this sender object. + pub fn new_watcher(&mut self, hash: H) -> Watcher { + let (tx, receiver) = mpsc::unbounded(); + self.receivers.push(tx); + Watcher { receiver, hash } + } + + /// Transaction became ready. + pub fn ready(&mut self) { + self.send(Status::Ready) + } + + /// Transaction was moved to future. + pub fn future(&mut self) { + self.send(Status::Future) + } + + /// Some state change (perhaps another extrinsic was included) rendered this extrinsic invalid. + pub fn usurped(&mut self, hash: H) { + self.send(Status::Usurped(hash)) + } + + /// Extrinsic has been finalized in block with given hash. + pub fn finalized(&mut self, hash: H2) { + self.send(Status::Finalized(hash)); + self.finalized = true; + } + + /// Extrinsic has been marked as invalid by the block builder. + pub fn invalid(&mut self) { + self.send(Status::Invalid); + // we mark as finalized as there are no more notifications + self.finalized = true; + } + + /// Transaction has been dropped from the pool because of the limit. + pub fn dropped(&mut self) { + self.send(Status::Dropped); + } + + /// The extrinsic has been broadcast to the given peers. + pub fn broadcast(&mut self, peers: Vec) { + self.send(Status::Broadcast(peers)) + } + + /// Returns true if the are no more listeners for this extrinsic or it was finalized. + pub fn is_done(&self) -> bool { + self.finalized || self.receivers.is_empty() + } + + fn send(&mut self, status: Status) { + self.receivers + .retain(|sender| sender.unbounded_send(status.clone()).is_ok()) + } } diff --git a/core/transaction-pool/src/api.rs b/core/transaction-pool/src/api.rs index 84475376fe..a37deb0450 100644 --- a/core/transaction-pool/src/api.rs +++ b/core/transaction-pool/src/api.rs @@ -16,68 +16,68 @@ //! Chain api required for the transaction pool. -use std::{ - sync::Arc, - marker::PhantomData, -}; -use client::{runtime_api::TaggedTransactionQueue, blockchain::HeaderBackend}; +use client::{blockchain::HeaderBackend, runtime_api::TaggedTransactionQueue}; use parity_codec::Encode; +use sr_primitives::{generic::BlockId, traits, transaction_validity::TransactionValidity}; +use std::{marker::PhantomData, sync::Arc}; +use substrate_primitives::{Blake2Hasher, Hasher, H256}; use txpool; -use substrate_primitives::{ - H256, - Blake2Hasher, - Hasher, -}; -use sr_primitives::{ - generic::BlockId, - traits, - transaction_validity::TransactionValidity, -}; use crate::error; /// The transaction pool logic pub struct ChainApi { - client: Arc, - _marker: PhantomData, + client: Arc, + _marker: PhantomData, } -impl ChainApi where - Block: traits::Block, - T: traits::ProvideRuntimeApi + HeaderBackend { - /// Create new transaction pool logic. - pub fn new(client: Arc) -> Self { - ChainApi { - client, - _marker: Default::default() - } - } +impl ChainApi +where + Block: traits::Block, + T: traits::ProvideRuntimeApi + HeaderBackend, +{ + /// Create new transaction pool logic. + pub fn new(client: Arc) -> Self { + ChainApi { + client, + _marker: Default::default(), + } + } } -impl txpool::ChainApi for ChainApi where - Block: traits::Block, - T: traits::ProvideRuntimeApi + HeaderBackend, - T::Api: TaggedTransactionQueue +impl txpool::ChainApi for ChainApi +where + Block: traits::Block, + T: traits::ProvideRuntimeApi + HeaderBackend, + T::Api: TaggedTransactionQueue, { - type Block = Block; - type Hash = H256; - type Error = error::Error; + type Block = Block; + type Hash = H256; + type Error = error::Error; - fn validate_transaction(&self, at: &BlockId, uxt: txpool::ExtrinsicFor) -> error::Result { - Ok(self.client.runtime_api().validate_transaction(at, uxt)?) - } + fn validate_transaction( + &self, + at: &BlockId, + uxt: txpool::ExtrinsicFor, + ) -> error::Result { + Ok(self.client.runtime_api().validate_transaction(at, uxt)?) + } - fn block_id_to_number(&self, at: &BlockId) -> error::Result>> { - Ok(self.client.block_number_from_id(at)?) - } + fn block_id_to_number( + &self, + at: &BlockId, + ) -> error::Result>> { + Ok(self.client.block_number_from_id(at)?) + } - fn block_id_to_hash(&self, at: &BlockId) -> error::Result>> { - Ok(self.client.block_hash_from_id(at)?) - } + fn block_id_to_hash( + &self, + at: &BlockId, + ) -> error::Result>> { + Ok(self.client.block_hash_from_id(at)?) + } - fn hash_and_length(&self, ex: &txpool::ExtrinsicFor) -> (Self::Hash, usize) { - ex.using_encoded(|x| { - (Blake2Hasher::hash(x), x.len()) - }) - } + fn hash_and_length(&self, ex: &txpool::ExtrinsicFor) -> (Self::Hash, usize) { + ex.using_encoded(|x| (Blake2Hasher::hash(x), x.len())) + } } diff --git a/core/transaction-pool/src/error.rs b/core/transaction-pool/src/error.rs index e1223c537d..730bd29dc9 100644 --- a/core/transaction-pool/src/error.rs +++ b/core/transaction-pool/src/error.rs @@ -17,23 +17,24 @@ //! Transaction pool error. use client; -use txpool; use error_chain::{ - error_chain, error_chain_processing, impl_error_chain_processed, impl_extract_backtrace, impl_error_chain_kind + error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed, + impl_extract_backtrace, }; +use txpool; error_chain! { - links { - Client(client::error::Error, client::error::ErrorKind) #[doc = "Client error"]; - Pool(txpool::error::Error, txpool::error::ErrorKind) #[doc = "Pool error"]; - } + links { + Client(client::error::Error, client::error::ErrorKind) #[doc = "Client error"]; + Pool(txpool::error::Error, txpool::error::ErrorKind) #[doc = "Pool error"]; + } } impl txpool::IntoPoolError for Error { - fn into_pool_error(self) -> ::std::result::Result { - match self { - Error(ErrorKind::Pool(e), c) => Ok(txpool::error::Error(e, c)), - e => Err(e), - } - } + fn into_pool_error(self) -> ::std::result::Result { + match self { + Error(ErrorKind::Pool(e), c) => Ok(txpool::error::Error(e, c)), + e => Err(e), + } + } } diff --git a/core/transaction-pool/src/tests.rs b/core/transaction-pool/src/tests.rs index cab44f49cc..c0438475d4 100644 --- a/core/transaction-pool/src/tests.rs +++ b/core/transaction-pool/src/tests.rs @@ -14,161 +14,183 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . - use super::*; use parity_codec::Encode; -use txpool::{self, Pool}; -use test_client::{runtime::{AccountId, Block, Hash, Index, Extrinsic, Transfer}, AccountKeyring::{self, *}}; use sr_primitives::{ - generic::{self, BlockId}, - traits::{Hash as HashT, BlakeTwo256}, - transaction_validity::TransactionValidity, + generic::{self, BlockId}, + traits::{BlakeTwo256, Hash as HashT}, + transaction_validity::TransactionValidity, +}; +use test_client::{ + runtime::{AccountId, Block, Extrinsic, Hash, Index, Transfer}, + AccountKeyring::{self, *}, }; +use txpool::{self, Pool}; struct TestApi; impl TestApi { - fn default() -> Self { - TestApi - } + fn default() -> Self { + TestApi + } } impl txpool::ChainApi for TestApi { - type Block = Block; - type Hash = Hash; - type Error = error::Error; - - fn validate_transaction(&self, at: &BlockId, uxt: txpool::ExtrinsicFor) -> error::Result { - let expected = index(at); - let requires = if expected == uxt.transfer().nonce { - vec![] - } else { - vec![vec![uxt.transfer().nonce as u8 - 1]] - }; - let provides = vec![vec![uxt.transfer().nonce as u8]]; - - Ok(TransactionValidity::Valid { - priority: 1, - requires, - provides, - longevity: 64, - }) - } - - fn block_id_to_number(&self, at: &BlockId) -> error::Result>> { - Ok(Some(number_of(at))) - } - - fn block_id_to_hash(&self, at: &BlockId) -> error::Result>> { - Ok(match at { - generic::BlockId::Hash(x) => Some(x.clone()), - _ => Some(Default::default()), - }) - } - - fn hash_and_length(&self, ex: &txpool::ExtrinsicFor) -> (Self::Hash, usize) { - let encoded = ex.encode(); - (BlakeTwo256::hash(&encoded), encoded.len()) - } - + type Block = Block; + type Hash = Hash; + type Error = error::Error; + + fn validate_transaction( + &self, + at: &BlockId, + uxt: txpool::ExtrinsicFor, + ) -> error::Result { + let expected = index(at); + let requires = if expected == uxt.transfer().nonce { + vec![] + } else { + vec![vec![uxt.transfer().nonce as u8 - 1]] + }; + let provides = vec![vec![uxt.transfer().nonce as u8]]; + + Ok(TransactionValidity::Valid { + priority: 1, + requires, + provides, + longevity: 64, + }) + } + + fn block_id_to_number( + &self, + at: &BlockId, + ) -> error::Result>> { + Ok(Some(number_of(at))) + } + + fn block_id_to_hash( + &self, + at: &BlockId, + ) -> error::Result>> { + Ok(match at { + generic::BlockId::Hash(x) => Some(x.clone()), + _ => Some(Default::default()), + }) + } + + fn hash_and_length(&self, ex: &txpool::ExtrinsicFor) -> (Self::Hash, usize) { + let encoded = ex.encode(); + (BlakeTwo256::hash(&encoded), encoded.len()) + } } fn index(at: &BlockId) -> u64 { - 209 + number_of(at) + 209 + number_of(at) } fn number_of(at: &BlockId) -> u64 { - match at { - generic::BlockId::Number(n) => *n as u64, - _ => 0, - } + match at { + generic::BlockId::Number(n) => *n as u64, + _ => 0, + } } fn uxt(who: AccountKeyring, nonce: Index) -> Extrinsic { - let transfer = Transfer { - from: who.into(), - to: AccountId::default(), - nonce, - amount: 1, - }; - let signature = transfer.using_encoded(|e| who.sign(e)); - Extrinsic::Transfer(transfer, signature.into()) + let transfer = Transfer { + from: who.into(), + to: AccountId::default(), + nonce, + amount: 1, + }; + let signature = transfer.using_encoded(|e| who.sign(e)); + Extrinsic::Transfer(transfer, signature.into()) } fn pool() -> Pool { - Pool::new(Default::default(), TestApi::default()) + Pool::new(Default::default(), TestApi::default()) } #[test] fn submission_should_work() { - let pool = pool(); - assert_eq!(209, index(&BlockId::number(0))); - pool.submit_one(&BlockId::number(0), uxt(Alice, 209)).unwrap(); + let pool = pool(); + assert_eq!(209, index(&BlockId::number(0))); + pool.submit_one(&BlockId::number(0), uxt(Alice, 209)) + .unwrap(); - let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); - assert_eq!(pending, vec![209]); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); + assert_eq!(pending, vec![209]); } #[test] fn multiple_submission_should_work() { - let pool = pool(); - pool.submit_one(&BlockId::number(0), uxt(Alice, 209)).unwrap(); - pool.submit_one(&BlockId::number(0), uxt(Alice, 210)).unwrap(); - - let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); - assert_eq!(pending, vec![209, 210]); + let pool = pool(); + pool.submit_one(&BlockId::number(0), uxt(Alice, 209)) + .unwrap(); + pool.submit_one(&BlockId::number(0), uxt(Alice, 210)) + .unwrap(); + + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); + assert_eq!(pending, vec![209, 210]); } #[test] fn early_nonce_should_be_culled() { - let pool = pool(); - pool.submit_one(&BlockId::number(0), uxt(Alice, 208)).unwrap(); + let pool = pool(); + pool.submit_one(&BlockId::number(0), uxt(Alice, 208)) + .unwrap(); - let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); - assert_eq!(pending, Vec::::new()); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); + assert_eq!(pending, Vec::::new()); } #[test] fn late_nonce_should_be_queued() { - let pool = pool(); + let pool = pool(); - pool.submit_one(&BlockId::number(0), uxt(Alice, 210)).unwrap(); - let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); - assert_eq!(pending, Vec::::new()); + pool.submit_one(&BlockId::number(0), uxt(Alice, 210)) + .unwrap(); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); + assert_eq!(pending, Vec::::new()); - pool.submit_one(&BlockId::number(0), uxt(Alice, 209)).unwrap(); - let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); - assert_eq!(pending, vec![209, 210]); + pool.submit_one(&BlockId::number(0), uxt(Alice, 209)) + .unwrap(); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); + assert_eq!(pending, vec![209, 210]); } #[test] fn prune_tags_should_work() { - let pool = pool(); - pool.submit_one(&BlockId::number(0), uxt(Alice, 209)).unwrap(); - pool.submit_one(&BlockId::number(0), uxt(Alice, 210)).unwrap(); + let pool = pool(); + pool.submit_one(&BlockId::number(0), uxt(Alice, 209)) + .unwrap(); + pool.submit_one(&BlockId::number(0), uxt(Alice, 210)) + .unwrap(); - let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); - assert_eq!(pending, vec![209, 210]); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); + assert_eq!(pending, vec![209, 210]); - pool.prune_tags(&BlockId::number(1), vec![vec![209]], vec![]).unwrap(); + pool.prune_tags(&BlockId::number(1), vec![vec![209]], vec![]) + .unwrap(); - let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); - assert_eq!(pending, vec![210]); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); + assert_eq!(pending, vec![210]); } #[test] fn should_ban_invalid_transactions() { - let pool = pool(); - let uxt = uxt(Alice, 209); - let hash = pool.submit_one(&BlockId::number(0), uxt.clone()).unwrap(); - pool.remove_invalid(&[hash]); - pool.submit_one(&BlockId::number(0), uxt.clone()).unwrap_err(); - - // when - let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); - assert_eq!(pending, Vec::::new()); - - // then - pool.submit_one(&BlockId::number(0), uxt.clone()).unwrap_err(); + let pool = pool(); + let uxt = uxt(Alice, 209); + let hash = pool.submit_one(&BlockId::number(0), uxt.clone()).unwrap(); + pool.remove_invalid(&[hash]); + pool.submit_one(&BlockId::number(0), uxt.clone()) + .unwrap_err(); + + // when + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer().nonce).collect(); + assert_eq!(pending, Vec::::new()); + + // then + pool.submit_one(&BlockId::number(0), uxt.clone()) + .unwrap_err(); } diff --git a/core/trie/benches/bench.rs b/core/trie/benches/bench.rs index 179dc6aaf8..4f95b79aea 100644 --- a/core/trie/benches/bench.rs +++ b/core/trie/benches/bench.rs @@ -14,19 +14,19 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use criterion::{Criterion, criterion_group, criterion_main}; +use criterion::{criterion_group, criterion_main, Criterion}; criterion_group!(benches, benchmark); criterion_main!(benches); fn benchmark(c: &mut Criterion) { - trie_bench::standard_benchmark::< - substrate_primitives::Blake2Hasher, - substrate_trie::NodeCodec, - substrate_trie::TrieStream, - >(c, "substrate-blake2"); - trie_bench::standard_benchmark::< - keccak_hasher::KeccakHasher, - substrate_trie::NodeCodec, - substrate_trie::TrieStream, - >(c, "substrate-keccak"); + trie_bench::standard_benchmark::< + substrate_primitives::Blake2Hasher, + substrate_trie::NodeCodec, + substrate_trie::TrieStream, + >(c, "substrate-blake2"); + trie_bench::standard_benchmark::< + keccak_hasher::KeccakHasher, + substrate_trie::NodeCodec, + substrate_trie::TrieStream, + >(c, "substrate-keccak"); } diff --git a/core/trie/src/error.rs b/core/trie/src/error.rs index c717f45639..83a8e55684 100644 --- a/core/trie/src/error.rs +++ b/core/trie/src/error.rs @@ -6,24 +6,24 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::fmt; use std::error::Error as StdError; +use std::fmt; #[derive(Debug, PartialEq, Eq, Clone)] /// Error concerning the Parity-Codec based decoder. pub enum Error { - /// Bad format. - BadFormat, + /// Bad format. + BadFormat, } impl StdError for Error { - fn description(&self) -> &str { - "codec error" - } + fn description(&self) -> &str { + "codec error" + } } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(&self, f) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self, f) + } } diff --git a/core/trie/src/lib.rs b/core/trie/src/lib.rs index 999f1e67c9..656a3b6273 100644 --- a/core/trie/src/lib.rs +++ b/core/trie/src/lib.rs @@ -19,21 +19,21 @@ // FIXME: no_std - https://github.com/paritytech/substrate/issues/1574 mod error; -mod node_header; mod node_codec; +mod node_header; mod trie_stream; -use hash_db::Hasher; /// Our `NodeCodec`-specific error. pub use error::Error; -/// The Substrate format implementation of `TrieStream`. -pub use trie_stream::TrieStream; +use hash_db::Hasher; +/// Various re-exports from the `memory-db` crate. +pub use memory_db::{prefixed_key, KeyFunction}; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; /// Various re-exports from the `trie-db` crate. -pub use trie_db::{Trie, TrieMut, DBValue, Recorder, Query}; -/// Various re-exports from the `memory-db` crate. -pub use memory_db::{KeyFunction, prefixed_key}; +pub use trie_db::{DBValue, Query, Recorder, Trie, TrieMut}; +/// The Substrate format implementation of `TrieStream`. +pub use trie_stream::TrieStream; /// As in `trie_db`, but less generic, error type for the crate. pub type TrieError = trie_db::TrieError; @@ -59,209 +59,235 @@ pub type TrieDBMut<'a, H> = trie_db::TrieDBMut<'a, H, NodeCodec>; pub type Lookup<'a, H, Q> = trie_db::Lookup<'a, H, NodeCodec, Q>; /// Determine a trie root given its ordered contents, closed form. -pub fn trie_root(input: I) -> H::Out where - I: IntoIterator, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, +pub fn trie_root(input: I) -> H::Out +where + I: IntoIterator, + A: AsRef<[u8]> + Ord, + B: AsRef<[u8]>, { - trie_root::trie_root::(input) + trie_root::trie_root::(input) } /// Determine a trie root given a hash DB and delta values. pub fn delta_trie_root( - db: &mut DB, - mut root: H::Out, - delta: I -) -> Result>> where - I: IntoIterator)>, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, - DB: hash_db::HashDB, + db: &mut DB, + mut root: H::Out, + delta: I, +) -> Result>> +where + I: IntoIterator)>, + A: AsRef<[u8]> + Ord, + B: AsRef<[u8]>, + DB: hash_db::HashDB, { - { - let mut trie = TrieDBMut::::from_existing(&mut *db, &mut root)?; - - for (key, change) in delta { - match change { - Some(val) => trie.insert(key.as_ref(), val.as_ref())?, - None => trie.remove(key.as_ref())?, - }; - } - } - - Ok(root) + { + let mut trie = TrieDBMut::::from_existing(&mut *db, &mut root)?; + + for (key, change) in delta { + match change { + Some(val) => trie.insert(key.as_ref(), val.as_ref())?, + None => trie.remove(key.as_ref())?, + }; + } + } + + Ok(root) } /// Read a value from the trie. pub fn read_trie_value>( - db: &DB, - root: &H::Out, - key: &[u8] + db: &DB, + root: &H::Out, + key: &[u8], ) -> Result>, Box>> { - Ok(TrieDB::::new(&*db, root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) + Ok(TrieDB::::new(&*db, root)? + .get(key) + .map(|x| x.map(|val| val.to_vec()))?) } /// Read a value from the trie with given Query. -pub fn read_trie_value_with, DB: hash_db::HashDBRef>( - db: &DB, - root: &H::Out, - key: &[u8], - query: Q +pub fn read_trie_value_with< + H: Hasher, + Q: Query, + DB: hash_db::HashDBRef, +>( + db: &DB, + root: &H::Out, + key: &[u8], + query: Q, ) -> Result>, Box>> { - Ok(TrieDB::::new(&*db, root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) + Ok(TrieDB::::new(&*db, root)? + .get_with(key, query) + .map(|x| x.map(|val| val.to_vec()))?) } /// Determine a trie root node's data given its ordered contents, closed form. -pub fn unhashed_trie(input: I) -> Vec where - I: IntoIterator, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, +pub fn unhashed_trie(input: I) -> Vec +where + I: IntoIterator, + A: AsRef<[u8]> + Ord, + B: AsRef<[u8]>, { - trie_root::unhashed_trie::(input) + trie_root::unhashed_trie::(input) } /// A trie root formed from the items, with keys attached according to their /// compact-encoded index (using `parity-codec` crate). pub fn ordered_trie_root(input: I) -> H::Out where - I: IntoIterator, - A: AsRef<[u8]>, + I: IntoIterator, + A: AsRef<[u8]>, { - trie_root::(input - .into_iter() - .enumerate() - .map(|(i, v)| (codec::Encode::encode(&codec::Compact(i as u32)), v)) - ) + trie_root::( + input + .into_iter() + .enumerate() + .map(|(i, v)| (codec::Encode::encode(&codec::Compact(i as u32)), v)), + ) } /// Determine whether a child trie key is valid. `child_trie_root` and `child_delta_trie_root` can panic if invalid value is provided to them. pub fn is_child_trie_key_valid(_storage_key: &[u8]) -> bool { - true + true } /// Determine the default child trie root. pub fn default_child_trie_root(_storage_key: &[u8]) -> Vec { - let mut db = MemoryDB::default(); - let mut root = H::Out::default(); - let mut empty = TrieDBMut::::new(&mut db, &mut root); - empty.commit(); - empty.root().as_ref().to_vec() + let mut db = MemoryDB::default(); + let mut root = H::Out::default(); + let mut empty = TrieDBMut::::new(&mut db, &mut root); + empty.commit(); + empty.root().as_ref().to_vec() } /// Determine a child trie root given its ordered contents, closed form. H is the default hasher, but a generic /// implementation may ignore this type parameter and use other hashers. -pub fn child_trie_root(_storage_key: &[u8], input: I) -> Vec where - I: IntoIterator, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, +pub fn child_trie_root(_storage_key: &[u8], input: I) -> Vec +where + I: IntoIterator, + A: AsRef<[u8]> + Ord, + B: AsRef<[u8]>, { - trie_root::(input).as_ref().iter().cloned().collect() + trie_root::(input) + .as_ref() + .iter() + .cloned() + .collect() } /// Determine a child trie root given a hash DB and delta values. H is the default hasher, but a generic implementation may ignore this type parameter and use other hashers. pub fn child_delta_trie_root( - _storage_key: &[u8], - db: &mut DB, - root_vec: Vec, - delta: I -) -> Result, Box>> where - I: IntoIterator)>, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, - DB: hash_db::HashDB + hash_db::PlainDB, + _storage_key: &[u8], + db: &mut DB, + root_vec: Vec, + delta: I, +) -> Result, Box>> +where + I: IntoIterator)>, + A: AsRef<[u8]> + Ord, + B: AsRef<[u8]>, + DB: hash_db::HashDB + hash_db::PlainDB, { - let mut root = H::Out::default(); - root.as_mut().copy_from_slice(&root_vec); // root is fetched from DB, not writable by runtime, so it's always valid. + let mut root = H::Out::default(); + root.as_mut().copy_from_slice(&root_vec); // root is fetched from DB, not writable by runtime, so it's always valid. - { - let mut trie = TrieDBMut::::from_existing(&mut *db, &mut root)?; + { + let mut trie = TrieDBMut::::from_existing(&mut *db, &mut root)?; - for (key, change) in delta { - match change { - Some(val) => trie.insert(key.as_ref(), val.as_ref())?, - None => trie.remove(key.as_ref())?, - }; - } - } + for (key, change) in delta { + match change { + Some(val) => trie.insert(key.as_ref(), val.as_ref())?, + None => trie.remove(key.as_ref())?, + }; + } + } - Ok(root.as_ref().to_vec()) + Ok(root.as_ref().to_vec()) } /// Call `f` for all keys in a child trie. pub fn for_keys_in_child_trie( - _storage_key: &[u8], - db: &DB, - root_slice: &[u8], - mut f: F -) -> Result<(), Box>> where - DB: hash_db::HashDBRef + hash_db::PlainDBRef, + _storage_key: &[u8], + db: &DB, + root_slice: &[u8], + mut f: F, +) -> Result<(), Box>> +where + DB: hash_db::HashDBRef + hash_db::PlainDBRef, { - let mut root = H::Out::default(); - root.as_mut().copy_from_slice(root_slice); // root is fetched from DB, not writable by runtime, so it's always valid. + let mut root = H::Out::default(); + root.as_mut().copy_from_slice(root_slice); // root is fetched from DB, not writable by runtime, so it's always valid. - let trie = TrieDB::::new(&*db, &root)?; - let iter = trie.iter()?; + let trie = TrieDB::::new(&*db, &root)?; + let iter = trie.iter()?; - for x in iter { - let (key, _) = x?; - f(&key); - } + for x in iter { + let (key, _) = x?; + f(&key); + } - Ok(()) + Ok(()) } /// Record all keys for a given root. pub fn record_all_keys( - db: &DB, - root: &H::Out, - recorder: &mut Recorder -) -> Result<(), Box>> where - DB: hash_db::HashDBRef + db: &DB, + root: &H::Out, + recorder: &mut Recorder, +) -> Result<(), Box>> +where + DB: hash_db::HashDBRef, { - let trie = TrieDB::::new(&*db, root)?; - let iter = trie.iter()?; + let trie = TrieDB::::new(&*db, root)?; + let iter = trie.iter()?; - for x in iter { - let (key, _) = x?; + for x in iter { + let (key, _) = x?; - // there's currently no API like iter_with() - // => use iter to enumerate all keys AND lookup each - // key using get_with - trie.get_with(&key, &mut *recorder)?; - } + // there's currently no API like iter_with() + // => use iter to enumerate all keys AND lookup each + // key using get_with + trie.get_with(&key, &mut *recorder)?; + } - Ok(()) + Ok(()) } /// Read a value from the child trie. pub fn read_child_trie_value( - _storage_key: &[u8], - db: &DB, - root_slice: &[u8], - key: &[u8] -) -> Result>, Box>> where - DB: hash_db::HashDBRef + hash_db::PlainDBRef, + _storage_key: &[u8], + db: &DB, + root_slice: &[u8], + key: &[u8], +) -> Result>, Box>> +where + DB: hash_db::HashDBRef + hash_db::PlainDBRef, { - let mut root = H::Out::default(); - root.as_mut().copy_from_slice(root_slice); // root is fetched from DB, not writable by runtime, so it's always valid. + let mut root = H::Out::default(); + root.as_mut().copy_from_slice(root_slice); // root is fetched from DB, not writable by runtime, so it's always valid. - Ok(TrieDB::::new(&*db, &root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) + Ok(TrieDB::::new(&*db, &root)? + .get(key) + .map(|x| x.map(|val| val.to_vec()))?) } /// Read a value from the child trie with given query. -pub fn read_child_trie_value_with, DB>( - _storage_key: &[u8], - db: &DB, - root_slice: &[u8], - key: &[u8], - query: Q -) -> Result>, Box>> where - DB: hash_db::HashDBRef + hash_db::PlainDBRef, +pub fn read_child_trie_value_with, DB>( + _storage_key: &[u8], + db: &DB, + root_slice: &[u8], + key: &[u8], + query: Q, +) -> Result>, Box>> +where + DB: hash_db::HashDBRef + hash_db::PlainDBRef, { - let mut root = H::Out::default(); - root.as_mut().copy_from_slice(root_slice); // root is fetched from DB, not writable by runtime, so it's always valid. + let mut root = H::Out::default(); + root.as_mut().copy_from_slice(root_slice); // root is fetched from DB, not writable by runtime, so it's always valid. - Ok(TrieDB::::new(&*db, &root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) + Ok(TrieDB::::new(&*db, &root)? + .get_with(key, query) + .map(|x| x.map(|val| val.to_vec()))?) } // Utilities (not exported): @@ -274,317 +300,359 @@ const EXTENSION_NODE_BIG: u8 = 253; const BRANCH_NODE_NO_VALUE: u8 = 254; const BRANCH_NODE_WITH_VALUE: u8 = 255; const LEAF_NODE_THRESHOLD: u8 = LEAF_NODE_BIG - LEAF_NODE_OFFSET; -const EXTENSION_NODE_THRESHOLD: u8 = EXTENSION_NODE_BIG - EXTENSION_NODE_OFFSET; //125 +const EXTENSION_NODE_THRESHOLD: u8 = EXTENSION_NODE_BIG - EXTENSION_NODE_OFFSET; //125 const LEAF_NODE_SMALL_MAX: u8 = LEAF_NODE_BIG - 1; const EXTENSION_NODE_SMALL_MAX: u8 = EXTENSION_NODE_BIG - 1; -fn take<'a>(input: &mut &'a[u8], count: usize) -> Option<&'a[u8]> { - if input.len() < count { - return None - } - let r = &(*input)[..count]; - *input = &(*input)[count..]; - Some(r) +fn take<'a>(input: &mut &'a [u8], count: usize) -> Option<&'a [u8]> { + if input.len() < count { + return None; + } + let r = &(*input)[..count]; + *input = &(*input)[count..]; + Some(r) } fn partial_to_key(partial: &[u8], offset: u8, big: u8) -> Vec { - let nibble_count = (partial.len() - 1) * 2 + if partial[0] & 16 == 16 { 1 } else { 0 }; - let (first_byte_small, big_threshold) = (offset, (big - offset) as usize); - let mut output = vec![first_byte_small + nibble_count.min(big_threshold) as u8]; - if nibble_count >= big_threshold { output.push((nibble_count - big_threshold) as u8) } - if nibble_count % 2 == 1 { - output.push(partial[0] & 0x0f); - } - output.extend_from_slice(&partial[1..]); - output + let nibble_count = (partial.len() - 1) * 2 + if partial[0] & 16 == 16 { 1 } else { 0 }; + let (first_byte_small, big_threshold) = (offset, (big - offset) as usize); + let mut output = vec![first_byte_small + nibble_count.min(big_threshold) as u8]; + if nibble_count >= big_threshold { + output.push((nibble_count - big_threshold) as u8) + } + if nibble_count % 2 == 1 { + output.push(partial[0] & 0x0f); + } + output.extend_from_slice(&partial[1..]); + output } fn branch_node(has_value: bool, has_children: impl Iterator) -> [u8; 3] { - let first = if has_value { - BRANCH_NODE_WITH_VALUE - } else { - BRANCH_NODE_NO_VALUE - }; - let mut bitmap: u16 = 0; - let mut cursor: u16 = 1; - for v in has_children { - if v { bitmap |= cursor } - cursor <<= 1; - } - [first, (bitmap % 256 ) as u8, (bitmap / 256 ) as u8] + let first = if has_value { + BRANCH_NODE_WITH_VALUE + } else { + BRANCH_NODE_NO_VALUE + }; + let mut bitmap: u16 = 0; + let mut cursor: u16 = 1; + for v in has_children { + if v { + bitmap |= cursor + } + cursor <<= 1; + } + [first, (bitmap % 256) as u8, (bitmap / 256) as u8] } #[cfg(test)] mod tests { - use super::*; - use codec::{Encode, Compact}; - use substrate_primitives::Blake2Hasher; - use hash_db::{HashDB, Hasher}; - use trie_db::{DBValue, TrieMut, Trie}; - use trie_standardmap::{Alphabet, ValueMode, StandardMap}; - use hex_literal::{hex, hex_impl}; - - fn check_equivalent(input: &Vec<(&[u8], &[u8])>) { - { - let closed_form = trie_root::(input.clone()); - let d = unhashed_trie::(input.clone()); - println!("Data: {:#x?}, {:#x?}", d, Blake2Hasher::hash(&d[..])); - let persistent = { - let mut memdb = MemoryDB::default(); - let mut root = Default::default(); - let mut t = TrieDBMut::::new(&mut memdb, &mut root); - for (x, y) in input.iter().rev() { - t.insert(x, y).unwrap(); - } - t.root().clone() - }; - assert_eq!(closed_form, persistent); - } - } - - fn check_iteration(input: &Vec<(&[u8], &[u8])>) { - let mut memdb = MemoryDB::default(); - let mut root = Default::default(); - { - let mut t = TrieDBMut::::new(&mut memdb, &mut root); - for (x, y) in input.clone() { - t.insert(x, y).unwrap(); - } - } - { - let t = TrieDB::::new(&mut memdb, &root).unwrap(); - assert_eq!( - input.iter().map(|(i, j)| (i.to_vec(), j.to_vec())).collect::>(), - t.iter().unwrap().map(|x| x.map(|y| (y.0, y.1.to_vec())).unwrap()).collect::>() - ); - } - } - - #[test] - fn empty_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![]; - check_equivalent(&input); - check_iteration(&input); - } - - #[test] - fn leaf_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![(&[0xaa][..], &[0xbb][..])]; - check_equivalent(&input); - check_iteration(&input); - } - - #[test] - fn branch_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![(&[0xaa][..], &[0x10][..]), (&[0xba][..], &[0x11][..])]; - check_equivalent(&input); - check_iteration(&input); - } - - #[test] - fn extension_and_branch_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![(&[0xaa][..], &[0x10][..]), (&[0xab][..], &[0x11][..])]; - check_equivalent(&input); - check_iteration(&input); - } - - #[test] - fn standard_is_equivalent() { - let st = StandardMap { - alphabet: Alphabet::All, - min_key: 32, - journal_key: 0, - value_mode: ValueMode::Random, - count: 1000, - }; - let mut d = st.make(); - d.sort_unstable_by(|&(ref a, _), &(ref b, _)| a.cmp(b)); - let dr = d.iter().map(|v| (&v.0[..], &v.1[..])).collect(); - check_equivalent(&dr); - check_iteration(&dr); - } - - #[test] - fn extension_and_branch_with_value_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![ - (&[0xaa][..], &[0xa0][..]), - (&[0xaa, 0xaa][..], &[0xaa][..]), - (&[0xaa, 0xbb][..], &[0xab][..]) - ]; - check_equivalent(&input); - check_iteration(&input); - } - - #[test] - fn bigger_extension_and_branch_with_value_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![ - (&[0xaa][..], &[0xa0][..]), - (&[0xaa, 0xaa][..], &[0xaa][..]), - (&[0xaa, 0xbb][..], &[0xab][..]), - (&[0xbb][..], &[0xb0][..]), - (&[0xbb, 0xbb][..], &[0xbb][..]), - (&[0xbb, 0xcc][..], &[0xbc][..]), - ]; - check_equivalent(&input); - check_iteration(&input); - } - - #[test] - fn single_long_leaf_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![(&[0xaa][..], &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..]), (&[0xba][..], &[0x11][..])]; - check_equivalent(&input); - check_iteration(&input); - } - - #[test] - fn two_long_leaves_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![ - (&[0xaa][..], &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..]), - (&[0xba][..], &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..]) - ]; - check_equivalent(&input); - check_iteration(&input); - } - - fn populate_trie<'db>( - db: &'db mut HashDB, - root: &'db mut ::Out, - v: &[(Vec, Vec)] - ) -> TrieDBMut<'db, Blake2Hasher> { - let mut t = TrieDBMut::::new(db, root); - for i in 0..v.len() { - let key: &[u8]= &v[i].0; - let val: &[u8] = &v[i].1; - t.insert(key, val).unwrap(); - } - t - } - - fn unpopulate_trie<'db>(t: &mut TrieDBMut<'db, Blake2Hasher>, v: &[(Vec, Vec)]) { - for i in v { - let key: &[u8]= &i.0; - t.remove(key).unwrap(); - } - } - - #[test] - fn random_should_work() { - let mut seed = ::Out::zero(); - for test_i in 0..10000 { - if test_i % 50 == 0 { - println!("{:?} of 10000 stress tests done", test_i); - } - let x = StandardMap { - alphabet: Alphabet::Custom(b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_".to_vec()), - min_key: 5, - journal_key: 0, - value_mode: ValueMode::Index, - count: 100, - }.make_with(seed.as_fixed_bytes_mut()); - - let real = trie_root::(x.clone()); - let mut memdb = MemoryDB::default(); - let mut root = Default::default(); - let mut memtrie = populate_trie(&mut memdb, &mut root, &x); - - memtrie.commit(); - if *memtrie.root() != real { - println!("TRIE MISMATCH"); - println!(""); - println!("{:?} vs {:?}", memtrie.root(), real); - for i in &x { - println!("{:#x?} -> {:#x?}", i.0, i.1); - } - } - assert_eq!(*memtrie.root(), real); - unpopulate_trie(&mut memtrie, &x); - memtrie.commit(); - if *memtrie.root() != as trie_db::NodeCodec>::hashed_null_node() { - println!("- TRIE MISMATCH"); - println!(""); - println!("{:?} vs {:?}", memtrie.root(), as trie_db::NodeCodec>::hashed_null_node()); - for i in &x { - println!("{:#x?} -> {:#x?}", i.0, i.1); - } - } - assert_eq!(*memtrie.root(), as trie_db::NodeCodec>::hashed_null_node()); - } - } - - fn to_compact(n: u8) -> u8 { - Compact(n).encode()[0] - } - - #[test] - fn codec_trie_empty() { - let input: Vec<(&[u8], &[u8])> = vec![]; - let trie = unhashed_trie::(input); - println!("trie: {:#x?}", trie); - assert_eq!(trie, vec![0x0]); - } - - #[test] - fn codec_trie_single_tuple() { - let input = vec![ - (vec![0xaa], vec![0xbb]) - ]; - let trie = unhashed_trie::(input); - println!("trie: {:#x?}", trie); - - assert_eq!(trie, vec![ - 0x03, // leaf (0x01) with (+) key of 2 nibbles (0x02) - 0xaa, // key data - to_compact(1), // length of value in bytes as Compact - 0xbb // value data - ]); - } - - #[test] - fn codec_trie_two_tuples_disjoint_keys() { - let input = vec![(&[0x48, 0x19], &[0xfe]), (&[0x13, 0x14], &[0xff])]; - let trie = unhashed_trie::(input); - println!("trie: {:#x?}", trie); - - let mut ex = Vec::::new(); - ex.push(0xfe); // branch, no value - ex.push(0x12); // slots 1 & 4 are taken from 0-7 - ex.push(0x00); // no slots from 8-15 - ex.push(to_compact(0x05)); // first slot: LEAF, 5 bytes long. - ex.push(0x04); // leaf with 3 nibbles - ex.push(0x03); // first nibble - ex.push(0x14); // second & third nibble - ex.push(to_compact(0x01)); // 1 byte data - ex.push(0xff); // value data - ex.push(to_compact(0x05)); // second slot: LEAF, 5 bytes long. - ex.push(0x04); // leaf with 3 nibbles - ex.push(0x08); // first nibble - ex.push(0x19); // second & third nibble - ex.push(to_compact(0x01)); // 1 byte data - ex.push(0xfe); // value data - - assert_eq!(trie, ex); - } - - #[test] - fn iterator_works() { - let pairs = vec![ - (hex!("0103000000000000000464").to_vec(), hex!("0400000000").to_vec()), - (hex!("0103000000000000000469").to_vec(), hex!("0401000000").to_vec()), - ]; - - let mut mdb = MemoryDB::default(); - let mut root = Default::default(); - let _ = populate_trie(&mut mdb, &mut root, &pairs); - - let trie = TrieDB::::new(&mdb, &root).unwrap(); - - let iter = trie.iter().unwrap(); - let mut iter_pairs = Vec::new(); - for pair in iter { - let (key, value) = pair.unwrap(); - iter_pairs.push((key, value.to_vec())); - } - - assert_eq!(pairs, iter_pairs); - } + use super::*; + use codec::{Compact, Encode}; + use hash_db::{HashDB, Hasher}; + use hex_literal::{hex, hex_impl}; + use substrate_primitives::Blake2Hasher; + use trie_db::{DBValue, Trie, TrieMut}; + use trie_standardmap::{Alphabet, StandardMap, ValueMode}; + + fn check_equivalent(input: &Vec<(&[u8], &[u8])>) { + { + let closed_form = trie_root::(input.clone()); + let d = unhashed_trie::(input.clone()); + println!("Data: {:#x?}, {:#x?}", d, Blake2Hasher::hash(&d[..])); + let persistent = { + let mut memdb = MemoryDB::default(); + let mut root = Default::default(); + let mut t = TrieDBMut::::new(&mut memdb, &mut root); + for (x, y) in input.iter().rev() { + t.insert(x, y).unwrap(); + } + t.root().clone() + }; + assert_eq!(closed_form, persistent); + } + } + + fn check_iteration(input: &Vec<(&[u8], &[u8])>) { + let mut memdb = MemoryDB::default(); + let mut root = Default::default(); + { + let mut t = TrieDBMut::::new(&mut memdb, &mut root); + for (x, y) in input.clone() { + t.insert(x, y).unwrap(); + } + } + { + let t = TrieDB::::new(&mut memdb, &root).unwrap(); + assert_eq!( + input + .iter() + .map(|(i, j)| (i.to_vec(), j.to_vec())) + .collect::>(), + t.iter() + .unwrap() + .map(|x| x.map(|y| (y.0, y.1.to_vec())).unwrap()) + .collect::>() + ); + } + } + + #[test] + fn empty_is_equivalent() { + let input: Vec<(&[u8], &[u8])> = vec![]; + check_equivalent(&input); + check_iteration(&input); + } + + #[test] + fn leaf_is_equivalent() { + let input: Vec<(&[u8], &[u8])> = vec![(&[0xaa][..], &[0xbb][..])]; + check_equivalent(&input); + check_iteration(&input); + } + + #[test] + fn branch_is_equivalent() { + let input: Vec<(&[u8], &[u8])> = + vec![(&[0xaa][..], &[0x10][..]), (&[0xba][..], &[0x11][..])]; + check_equivalent(&input); + check_iteration(&input); + } + + #[test] + fn extension_and_branch_is_equivalent() { + let input: Vec<(&[u8], &[u8])> = + vec![(&[0xaa][..], &[0x10][..]), (&[0xab][..], &[0x11][..])]; + check_equivalent(&input); + check_iteration(&input); + } + + #[test] + fn standard_is_equivalent() { + let st = StandardMap { + alphabet: Alphabet::All, + min_key: 32, + journal_key: 0, + value_mode: ValueMode::Random, + count: 1000, + }; + let mut d = st.make(); + d.sort_unstable_by(|&(ref a, _), &(ref b, _)| a.cmp(b)); + let dr = d.iter().map(|v| (&v.0[..], &v.1[..])).collect(); + check_equivalent(&dr); + check_iteration(&dr); + } + + #[test] + fn extension_and_branch_with_value_is_equivalent() { + let input: Vec<(&[u8], &[u8])> = vec![ + (&[0xaa][..], &[0xa0][..]), + (&[0xaa, 0xaa][..], &[0xaa][..]), + (&[0xaa, 0xbb][..], &[0xab][..]), + ]; + check_equivalent(&input); + check_iteration(&input); + } + + #[test] + fn bigger_extension_and_branch_with_value_is_equivalent() { + let input: Vec<(&[u8], &[u8])> = vec![ + (&[0xaa][..], &[0xa0][..]), + (&[0xaa, 0xaa][..], &[0xaa][..]), + (&[0xaa, 0xbb][..], &[0xab][..]), + (&[0xbb][..], &[0xb0][..]), + (&[0xbb, 0xbb][..], &[0xbb][..]), + (&[0xbb, 0xcc][..], &[0xbc][..]), + ]; + check_equivalent(&input); + check_iteration(&input); + } + + #[test] + fn single_long_leaf_is_equivalent() { + let input: Vec<(&[u8], &[u8])> = vec![ + ( + &[0xaa][..], + &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..], + ), + (&[0xba][..], &[0x11][..]), + ]; + check_equivalent(&input); + check_iteration(&input); + } + + #[test] + fn two_long_leaves_is_equivalent() { + let input: Vec<(&[u8], &[u8])> = vec![ + ( + &[0xaa][..], + &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..], + ), + ( + &[0xba][..], + &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..], + ), + ]; + check_equivalent(&input); + check_iteration(&input); + } + + fn populate_trie<'db>( + db: &'db mut HashDB, + root: &'db mut ::Out, + v: &[(Vec, Vec)], + ) -> TrieDBMut<'db, Blake2Hasher> { + let mut t = TrieDBMut::::new(db, root); + for i in 0..v.len() { + let key: &[u8] = &v[i].0; + let val: &[u8] = &v[i].1; + t.insert(key, val).unwrap(); + } + t + } + + fn unpopulate_trie<'db>(t: &mut TrieDBMut<'db, Blake2Hasher>, v: &[(Vec, Vec)]) { + for i in v { + let key: &[u8] = &i.0; + t.remove(key).unwrap(); + } + } + + #[test] + fn random_should_work() { + let mut seed = ::Out::zero(); + for test_i in 0..10000 { + if test_i % 50 == 0 { + println!("{:?} of 10000 stress tests done", test_i); + } + let x = StandardMap { + alphabet: Alphabet::Custom(b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_".to_vec()), + min_key: 5, + journal_key: 0, + value_mode: ValueMode::Index, + count: 100, + } + .make_with(seed.as_fixed_bytes_mut()); + + let real = trie_root::(x.clone()); + let mut memdb = MemoryDB::default(); + let mut root = Default::default(); + let mut memtrie = populate_trie(&mut memdb, &mut root, &x); + + memtrie.commit(); + if *memtrie.root() != real { + println!("TRIE MISMATCH"); + println!(""); + println!("{:?} vs {:?}", memtrie.root(), real); + for i in &x { + println!("{:#x?} -> {:#x?}", i.0, i.1); + } + } + assert_eq!(*memtrie.root(), real); + unpopulate_trie(&mut memtrie, &x); + memtrie.commit(); + if *memtrie.root() + != as trie_db::NodeCodec>::hashed_null_node() + { + println!("- TRIE MISMATCH"); + println!(""); + println!( + "{:?} vs {:?}", + memtrie.root(), + as trie_db::NodeCodec>::hashed_null_node( + ) + ); + for i in &x { + println!("{:#x?} -> {:#x?}", i.0, i.1); + } + } + assert_eq!( + *memtrie.root(), + as trie_db::NodeCodec>::hashed_null_node() + ); + } + } + + fn to_compact(n: u8) -> u8 { + Compact(n).encode()[0] + } + + #[test] + fn codec_trie_empty() { + let input: Vec<(&[u8], &[u8])> = vec![]; + let trie = unhashed_trie::(input); + println!("trie: {:#x?}", trie); + assert_eq!(trie, vec![0x0]); + } + + #[test] + fn codec_trie_single_tuple() { + let input = vec![(vec![0xaa], vec![0xbb])]; + let trie = unhashed_trie::(input); + println!("trie: {:#x?}", trie); + + assert_eq!( + trie, + vec![ + 0x03, // leaf (0x01) with (+) key of 2 nibbles (0x02) + 0xaa, // key data + to_compact(1), // length of value in bytes as Compact + 0xbb // value data + ] + ); + } + + #[test] + fn codec_trie_two_tuples_disjoint_keys() { + let input = vec![(&[0x48, 0x19], &[0xfe]), (&[0x13, 0x14], &[0xff])]; + let trie = unhashed_trie::(input); + println!("trie: {:#x?}", trie); + + let mut ex = Vec::::new(); + ex.push(0xfe); // branch, no value + ex.push(0x12); // slots 1 & 4 are taken from 0-7 + ex.push(0x00); // no slots from 8-15 + ex.push(to_compact(0x05)); // first slot: LEAF, 5 bytes long. + ex.push(0x04); // leaf with 3 nibbles + ex.push(0x03); // first nibble + ex.push(0x14); // second & third nibble + ex.push(to_compact(0x01)); // 1 byte data + ex.push(0xff); // value data + ex.push(to_compact(0x05)); // second slot: LEAF, 5 bytes long. + ex.push(0x04); // leaf with 3 nibbles + ex.push(0x08); // first nibble + ex.push(0x19); // second & third nibble + ex.push(to_compact(0x01)); // 1 byte data + ex.push(0xfe); // value data + + assert_eq!(trie, ex); + } + + #[test] + fn iterator_works() { + let pairs = vec![ + ( + hex!("0103000000000000000464").to_vec(), + hex!("0400000000").to_vec(), + ), + ( + hex!("0103000000000000000469").to_vec(), + hex!("0401000000").to_vec(), + ), + ]; + + let mut mdb = MemoryDB::default(); + let mut root = Default::default(); + let _ = populate_trie(&mut mdb, &mut root, &pairs); + + let trie = TrieDB::::new(&mdb, &root).unwrap(); + + let iter = trie.iter().unwrap(); + let mut iter_pairs = Vec::new(); + for pair in iter { + let (key, value) = pair.unwrap(); + iter_pairs.push((key, value.to_vec())); + } + + assert_eq!(pairs, iter_pairs); + } } diff --git a/core/trie/src/node_codec.rs b/core/trie/src/node_codec.rs index b73519e87f..cfce6b7dac 100644 --- a/core/trie/src/node_codec.rs +++ b/core/trie/src/node_codec.rs @@ -16,13 +16,15 @@ //! `NodeCodec` implementation for Substrate's trie format. -use std::marker::PhantomData; -use codec::{Encode, Decode, Compact}; -use hash_db::Hasher; -use trie_db::{self, DBValue, NibbleSlice, node::Node, ChildReference}; +use super::{ + branch_node, node_header::NodeHeader, partial_to_key, take, EMPTY_TRIE, EXTENSION_NODE_BIG, + EXTENSION_NODE_OFFSET, LEAF_NODE_BIG, LEAF_NODE_OFFSET, +}; use crate::error::Error; -use super::{EMPTY_TRIE, LEAF_NODE_OFFSET, LEAF_NODE_BIG, EXTENSION_NODE_OFFSET, - EXTENSION_NODE_BIG, take, partial_to_key, node_header::NodeHeader, branch_node}; +use codec::{Compact, Decode, Encode}; +use hash_db::Hasher; +use std::marker::PhantomData; +use trie_db::{self, node::Node, ChildReference, DBValue, NibbleSlice}; /// Concrete implementation of a `NodeCodec` with Parity Codec encoding, generic over the `Hasher` #[derive(Default, Clone)] @@ -33,109 +35,120 @@ pub struct NodeCodec(PhantomData); // but due to the current limitations of Rust const evaluation we can't // do `const HASHED_NULL_NODE: H::Out = H::Out( … … )`. Perhaps one day soon? impl trie_db::NodeCodec for NodeCodec { - type Error = Error; + type Error = Error; - fn hashed_null_node() -> H::Out { - H::hash(&[0u8][..]) - } + fn hashed_null_node() -> H::Out { + H::hash(&[0u8][..]) + } - fn decode(data: &[u8]) -> ::std::result::Result { - use Error::BadFormat; - let input = &mut &*data; - match NodeHeader::decode(input).ok_or(BadFormat)? { - NodeHeader::Null => Ok(Node::Empty), - NodeHeader::Branch(has_value) => { - let bitmap = u16::decode(input).ok_or(BadFormat)?; - let value = if has_value { - let count = >::decode(input).ok_or(BadFormat)?.0 as usize; - Some(take(input, count).ok_or(BadFormat)?) - } else { - None - }; - let mut children = [None; 16]; - let mut pot_cursor = 1; - for i in 0..16 { - if bitmap & pot_cursor != 0 { - let count = >::decode(input).ok_or(BadFormat)?.0 as usize; - children[i] = Some(take(input, count).ok_or(BadFormat)?); - } - pot_cursor <<= 1; - } - Ok(Node::Branch(children, value)) - } - NodeHeader::Extension(nibble_count) => { - let nibble_data = take(input, (nibble_count + 1) / 2).ok_or(BadFormat)?; - let nibble_slice = NibbleSlice::new_offset(nibble_data, nibble_count % 2); - let count = >::decode(input).ok_or(BadFormat)?.0 as usize; - Ok(Node::Extension(nibble_slice, take(input, count).ok_or(BadFormat)?)) - } - NodeHeader::Leaf(nibble_count) => { - let nibble_data = take(input, (nibble_count + 1) / 2).ok_or(BadFormat)?; - let nibble_slice = NibbleSlice::new_offset(nibble_data, nibble_count % 2); - let count = >::decode(input).ok_or(BadFormat)?.0 as usize; - Ok(Node::Leaf(nibble_slice, take(input, count).ok_or(BadFormat)?)) - } - } - } + fn decode(data: &[u8]) -> ::std::result::Result { + use Error::BadFormat; + let input = &mut &*data; + match NodeHeader::decode(input).ok_or(BadFormat)? { + NodeHeader::Null => Ok(Node::Empty), + NodeHeader::Branch(has_value) => { + let bitmap = u16::decode(input).ok_or(BadFormat)?; + let value = if has_value { + let count = >::decode(input).ok_or(BadFormat)?.0 as usize; + Some(take(input, count).ok_or(BadFormat)?) + } else { + None + }; + let mut children = [None; 16]; + let mut pot_cursor = 1; + for i in 0..16 { + if bitmap & pot_cursor != 0 { + let count = >::decode(input).ok_or(BadFormat)?.0 as usize; + children[i] = Some(take(input, count).ok_or(BadFormat)?); + } + pot_cursor <<= 1; + } + Ok(Node::Branch(children, value)) + } + NodeHeader::Extension(nibble_count) => { + let nibble_data = take(input, (nibble_count + 1) / 2).ok_or(BadFormat)?; + let nibble_slice = NibbleSlice::new_offset(nibble_data, nibble_count % 2); + let count = >::decode(input).ok_or(BadFormat)?.0 as usize; + Ok(Node::Extension( + nibble_slice, + take(input, count).ok_or(BadFormat)?, + )) + } + NodeHeader::Leaf(nibble_count) => { + let nibble_data = take(input, (nibble_count + 1) / 2).ok_or(BadFormat)?; + let nibble_slice = NibbleSlice::new_offset(nibble_data, nibble_count % 2); + let count = >::decode(input).ok_or(BadFormat)?.0 as usize; + Ok(Node::Leaf( + nibble_slice, + take(input, count).ok_or(BadFormat)?, + )) + } + } + } - fn try_decode_hash(data: &[u8]) -> Option { - if data.len() == H::LENGTH { - let mut r = H::Out::default(); - r.as_mut().copy_from_slice(data); - Some(r) - } else { - None - } - } + fn try_decode_hash(data: &[u8]) -> Option { + if data.len() == H::LENGTH { + let mut r = H::Out::default(); + r.as_mut().copy_from_slice(data); + Some(r) + } else { + None + } + } - fn is_empty_node(data: &[u8]) -> bool { - data == &[EMPTY_TRIE][..] - } - fn empty_node() -> Vec { - vec![EMPTY_TRIE] - } + fn is_empty_node(data: &[u8]) -> bool { + data == &[EMPTY_TRIE][..] + } + fn empty_node() -> Vec { + vec![EMPTY_TRIE] + } - // FIXME: refactor this so that `partial` isn't already encoded with HPE. Should just be an `impl Iterator`. - fn leaf_node(partial: &[u8], value: &[u8]) -> Vec { - let mut output = partial_to_key(partial, LEAF_NODE_OFFSET, LEAF_NODE_BIG); - value.encode_to(&mut output); - output - } + // FIXME: refactor this so that `partial` isn't already encoded with HPE. Should just be an `impl Iterator`. + fn leaf_node(partial: &[u8], value: &[u8]) -> Vec { + let mut output = partial_to_key(partial, LEAF_NODE_OFFSET, LEAF_NODE_BIG); + value.encode_to(&mut output); + output + } - // FIXME: refactor this so that `partial` isn't already encoded with HPE. Should just be an `impl Iterator`. - fn ext_node(partial: &[u8], child: ChildReference) -> Vec { - let mut output = partial_to_key(partial, EXTENSION_NODE_OFFSET, EXTENSION_NODE_BIG); - match child { - ChildReference::Hash(h) => - h.as_ref().encode_to(&mut output), - ChildReference::Inline(inline_data, len) => - (&AsRef::<[u8]>::as_ref(&inline_data)[..len]).encode_to(&mut output), - }; - output - } + // FIXME: refactor this so that `partial` isn't already encoded with HPE. Should just be an `impl Iterator`. + fn ext_node(partial: &[u8], child: ChildReference) -> Vec { + let mut output = partial_to_key(partial, EXTENSION_NODE_OFFSET, EXTENSION_NODE_BIG); + match child { + ChildReference::Hash(h) => h.as_ref().encode_to(&mut output), + ChildReference::Inline(inline_data, len) => { + (&AsRef::<[u8]>::as_ref(&inline_data)[..len]).encode_to(&mut output) + } + }; + output + } - fn branch_node(children: I, maybe_value: Option) -> Vec - where I: IntoIterator>> + Iterator>> - { - let mut output = vec![0, 0, 0]; - let have_value = if let Some(value) = maybe_value { - (&*value).encode_to(&mut output); - true - } else { - false - }; - let prefix = branch_node(have_value, children.map(|maybe_child| match maybe_child { - Some(ChildReference::Hash(h)) => { - h.as_ref().encode_to(&mut output); - true - } - Some(ChildReference::Inline(inline_data, len)) => { - (&AsRef::<[u8]>::as_ref(&inline_data)[..len]).encode_to(&mut output); - true - } - None => false, - })); - output[0..3].copy_from_slice(&prefix[..]); - output - } + fn branch_node(children: I, maybe_value: Option) -> Vec + where + I: IntoIterator>> + + Iterator>>, + { + let mut output = vec![0, 0, 0]; + let have_value = if let Some(value) = maybe_value { + (&*value).encode_to(&mut output); + true + } else { + false + }; + let prefix = branch_node( + have_value, + children.map(|maybe_child| match maybe_child { + Some(ChildReference::Hash(h)) => { + h.as_ref().encode_to(&mut output); + true + } + Some(ChildReference::Inline(inline_data, len)) => { + (&AsRef::<[u8]>::as_ref(&inline_data)[..len]).encode_to(&mut output); + true + } + None => false, + }), + ); + output[0..3].copy_from_slice(&prefix[..]); + output + } } diff --git a/core/trie/src/node_header.rs b/core/trie/src/node_header.rs index 4f7617c068..f074f8b257 100644 --- a/core/trie/src/node_header.rs +++ b/core/trie/src/node_header.rs @@ -16,62 +16,82 @@ //! The node header. -use codec::{Encode, Decode, Input, Output}; -use super::{EMPTY_TRIE, LEAF_NODE_OFFSET, LEAF_NODE_BIG, EXTENSION_NODE_OFFSET, - EXTENSION_NODE_BIG, BRANCH_NODE_NO_VALUE, BRANCH_NODE_WITH_VALUE, LEAF_NODE_THRESHOLD, - EXTENSION_NODE_THRESHOLD, LEAF_NODE_SMALL_MAX, EXTENSION_NODE_SMALL_MAX}; +use super::{ + BRANCH_NODE_NO_VALUE, BRANCH_NODE_WITH_VALUE, EMPTY_TRIE, EXTENSION_NODE_BIG, + EXTENSION_NODE_OFFSET, EXTENSION_NODE_SMALL_MAX, EXTENSION_NODE_THRESHOLD, LEAF_NODE_BIG, + LEAF_NODE_OFFSET, LEAF_NODE_SMALL_MAX, LEAF_NODE_THRESHOLD, +}; +use codec::{Decode, Encode, Input, Output}; /// A node header. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum NodeHeader { - Null, - Branch(bool), - Extension(usize), - Leaf(usize), + Null, + Branch(bool), + Extension(usize), + Leaf(usize), } impl Encode for NodeHeader { - fn encode_to(&self, output: &mut T) { - match self { - NodeHeader::Null => output.push_byte(EMPTY_TRIE), + fn encode_to(&self, output: &mut T) { + match self { + NodeHeader::Null => output.push_byte(EMPTY_TRIE), - NodeHeader::Branch(true) => output.push_byte(BRANCH_NODE_WITH_VALUE), - NodeHeader::Branch(false) => output.push_byte(BRANCH_NODE_NO_VALUE), + NodeHeader::Branch(true) => output.push_byte(BRANCH_NODE_WITH_VALUE), + NodeHeader::Branch(false) => output.push_byte(BRANCH_NODE_NO_VALUE), - NodeHeader::Leaf(nibble_count) if *nibble_count < LEAF_NODE_THRESHOLD as usize => - output.push_byte(LEAF_NODE_OFFSET + *nibble_count as u8), - NodeHeader::Leaf(nibble_count) => { - output.push_byte(LEAF_NODE_BIG); - output.push_byte((*nibble_count - LEAF_NODE_THRESHOLD as usize) as u8); - } + NodeHeader::Leaf(nibble_count) if *nibble_count < LEAF_NODE_THRESHOLD as usize => { + output.push_byte(LEAF_NODE_OFFSET + *nibble_count as u8) + } + NodeHeader::Leaf(nibble_count) => { + output.push_byte(LEAF_NODE_BIG); + output.push_byte((*nibble_count - LEAF_NODE_THRESHOLD as usize) as u8); + } - NodeHeader::Extension(nibble_count) if *nibble_count < EXTENSION_NODE_THRESHOLD as usize => - output.push_byte(EXTENSION_NODE_OFFSET + *nibble_count as u8), - NodeHeader::Extension(nibble_count) => { - output.push_byte(EXTENSION_NODE_BIG); - output.push_byte((*nibble_count - EXTENSION_NODE_THRESHOLD as usize) as u8); - } - } - } + NodeHeader::Extension(nibble_count) + if *nibble_count < EXTENSION_NODE_THRESHOLD as usize => + { + output.push_byte(EXTENSION_NODE_OFFSET + *nibble_count as u8) + } + NodeHeader::Extension(nibble_count) => { + output.push_byte(EXTENSION_NODE_BIG); + output.push_byte((*nibble_count - EXTENSION_NODE_THRESHOLD as usize) as u8); + } + } + } } impl Decode for NodeHeader { - fn decode(input: &mut I) -> Option { - Some(match input.read_byte()? { - EMPTY_TRIE => NodeHeader::Null, // 0 + fn decode(input: &mut I) -> Option { + Some(match input.read_byte()? { + EMPTY_TRIE => NodeHeader::Null, // 0 - i @ LEAF_NODE_OFFSET ... LEAF_NODE_SMALL_MAX => // 1 ... (127 - 1) - NodeHeader::Leaf((i - LEAF_NODE_OFFSET) as usize), - LEAF_NODE_BIG => // 127 - NodeHeader::Leaf(input.read_byte()? as usize + LEAF_NODE_THRESHOLD as usize), + i @ LEAF_NODE_OFFSET...LEAF_NODE_SMALL_MAX => + // 1 ... (127 - 1) + { + NodeHeader::Leaf((i - LEAF_NODE_OFFSET) as usize) + } + LEAF_NODE_BIG => + // 127 + { + NodeHeader::Leaf(input.read_byte()? as usize + LEAF_NODE_THRESHOLD as usize) + } - i @ EXTENSION_NODE_OFFSET ... EXTENSION_NODE_SMALL_MAX =>// 128 ... (253 - 1) - NodeHeader::Extension((i - EXTENSION_NODE_OFFSET) as usize), - EXTENSION_NODE_BIG => // 253 - NodeHeader::Extension(input.read_byte()? as usize + EXTENSION_NODE_THRESHOLD as usize), + i @ EXTENSION_NODE_OFFSET...EXTENSION_NODE_SMALL_MAX => + // 128 ... (253 - 1) + { + NodeHeader::Extension((i - EXTENSION_NODE_OFFSET) as usize) + } + EXTENSION_NODE_BIG => + // 253 + { + NodeHeader::Extension( + input.read_byte()? as usize + EXTENSION_NODE_THRESHOLD as usize, + ) + } - BRANCH_NODE_NO_VALUE => NodeHeader::Branch(false), // 254 - BRANCH_NODE_WITH_VALUE => NodeHeader::Branch(true), // 255 - }) - } + BRANCH_NODE_NO_VALUE => NodeHeader::Branch(false), // 254 + BRANCH_NODE_WITH_VALUE => NodeHeader::Branch(true), // 255 + }) + } } diff --git a/core/trie/src/trie_stream.rs b/core/trie/src/trie_stream.rs index e283a512bb..f5c0eb6b91 100644 --- a/core/trie/src/trie_stream.rs +++ b/core/trie/src/trie_stream.rs @@ -14,82 +14,109 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! `TrieStream` implementation for Substrate's trie format. +//! `TrieStream` implementation for Substrate's trie format. -use std::iter::once; +use codec::Encode; use hash_db::Hasher; +use std::iter::once; use trie_root; -use codec::Encode; -use super::{EMPTY_TRIE, LEAF_NODE_OFFSET, LEAF_NODE_BIG, EXTENSION_NODE_OFFSET, - EXTENSION_NODE_BIG, branch_node}; +use super::{ + branch_node, EMPTY_TRIE, EXTENSION_NODE_BIG, EXTENSION_NODE_OFFSET, LEAF_NODE_BIG, + LEAF_NODE_OFFSET, +}; /// Codec-flavored TrieStream pub struct TrieStream { - buffer: Vec, + buffer: Vec, } impl TrieStream { - // useful for debugging but not used otherwise - pub fn as_raw(&self) -> &[u8] { &self.buffer } + // useful for debugging but not used otherwise + pub fn as_raw(&self) -> &[u8] { + &self.buffer + } } /// Create a leaf/extension node, encoding a number of nibbles. Note that this /// cannot handle a number of nibbles that is zero or greater than 127 and if /// you attempt to do so *IT WILL PANIC*. fn fuse_nibbles_node<'a>(nibbles: &'a [u8], leaf: bool) -> impl Iterator + 'a { - debug_assert!(nibbles.len() < 255 + 126, "nibbles length too long. what kind of size of key are you trying to include in the trie!?!"); - // We use two ranges of possible values; one for leafs and the other for extensions. - // Each range encodes zero following nibbles up to some maximum. If the maximum is - // reached, then it is considered "big" and a second byte follows it in order to - // encode a further offset to the number of nibbles of up to 255. Beyond that, we - // cannot encode. This shouldn't be a problem though since that allows for keys of - // up to 380 nibbles (190 bytes) and we expect key sizes to be generally 128-bit (16 - // bytes) or, at a push, 384-bit (48 bytes). + debug_assert!(nibbles.len() < 255 + 126, "nibbles length too long. what kind of size of key are you trying to include in the trie!?!"); + // We use two ranges of possible values; one for leafs and the other for extensions. + // Each range encodes zero following nibbles up to some maximum. If the maximum is + // reached, then it is considered "big" and a second byte follows it in order to + // encode a further offset to the number of nibbles of up to 255. Beyond that, we + // cannot encode. This shouldn't be a problem though since that allows for keys of + // up to 380 nibbles (190 bytes) and we expect key sizes to be generally 128-bit (16 + // bytes) or, at a push, 384-bit (48 bytes). - let (first_byte_small, big_threshold) = if leaf { - (LEAF_NODE_OFFSET, (LEAF_NODE_BIG - LEAF_NODE_OFFSET) as usize) - } else { - (EXTENSION_NODE_OFFSET, (EXTENSION_NODE_BIG - EXTENSION_NODE_OFFSET) as usize) - }; - let first_byte = first_byte_small + nibbles.len().min(big_threshold) as u8; - once(first_byte) - .chain(if nibbles.len() >= big_threshold { Some((nibbles.len() - big_threshold) as u8) } else { None }) - .chain(if nibbles.len() % 2 == 1 { Some(nibbles[0]) } else { None }) - .chain(nibbles[nibbles.len() % 2..].chunks(2).map(|ch| ch[0] << 4 | ch[1])) + let (first_byte_small, big_threshold) = if leaf { + ( + LEAF_NODE_OFFSET, + (LEAF_NODE_BIG - LEAF_NODE_OFFSET) as usize, + ) + } else { + ( + EXTENSION_NODE_OFFSET, + (EXTENSION_NODE_BIG - EXTENSION_NODE_OFFSET) as usize, + ) + }; + let first_byte = first_byte_small + nibbles.len().min(big_threshold) as u8; + once(first_byte) + .chain(if nibbles.len() >= big_threshold { + Some((nibbles.len() - big_threshold) as u8) + } else { + None + }) + .chain(if nibbles.len() % 2 == 1 { + Some(nibbles[0]) + } else { + None + }) + .chain( + nibbles[nibbles.len() % 2..] + .chunks(2) + .map(|ch| ch[0] << 4 | ch[1]), + ) } impl trie_root::TrieStream for TrieStream { - fn new() -> Self { Self {buffer: Vec::new() } } - fn append_empty_data(&mut self) { - self.buffer.push(EMPTY_TRIE); - } + fn new() -> Self { + Self { buffer: Vec::new() } + } + fn append_empty_data(&mut self) { + self.buffer.push(EMPTY_TRIE); + } - fn append_leaf(&mut self, key: &[u8], value: &[u8]) { - self.buffer.extend(fuse_nibbles_node(key, true)); - value.encode_to(&mut self.buffer); - } - fn begin_branch(&mut self, maybe_value: Option<&[u8]>, has_children: impl Iterator) { - self.buffer.extend(&branch_node(maybe_value.is_some(), has_children)); - // Push the value if one exists. - if let Some(value) = maybe_value { - value.encode_to(&mut self.buffer); - } - } - fn append_extension(&mut self, key: &[u8]) { - self.buffer.extend(fuse_nibbles_node(key, false)); - } - fn append_substream(&mut self, other: Self) { - let data = other.out(); - match data.len() { - 0...31 => { - data.encode_to(&mut self.buffer) - }, - _ => { - H::hash(&data).as_ref().encode_to(&mut self.buffer) - } - } - } + fn append_leaf(&mut self, key: &[u8], value: &[u8]) { + self.buffer.extend(fuse_nibbles_node(key, true)); + value.encode_to(&mut self.buffer); + } + fn begin_branch( + &mut self, + maybe_value: Option<&[u8]>, + has_children: impl Iterator, + ) { + self.buffer + .extend(&branch_node(maybe_value.is_some(), has_children)); + // Push the value if one exists. + if let Some(value) = maybe_value { + value.encode_to(&mut self.buffer); + } + } + fn append_extension(&mut self, key: &[u8]) { + self.buffer.extend(fuse_nibbles_node(key, false)); + } + fn append_substream(&mut self, other: Self) { + let data = other.out(); + match data.len() { + 0...31 => data.encode_to(&mut self.buffer), + _ => H::hash(&data).as_ref().encode_to(&mut self.buffer), + } + } - fn out(self) -> Vec { self.buffer } + fn out(self) -> Vec { + self.buffer + } } diff --git a/core/util/fork-tree/src/lib.rs b/core/util/fork-tree/src/lib.rs index f194ac8915..d1e10f688e 100644 --- a/core/util/fork-tree/src/lib.rs +++ b/core/util/fork-tree/src/lib.rs @@ -19,57 +19,57 @@ #![warn(missing_docs)] -use std::fmt; use parity_codec::{Decode, Encode}; +use std::fmt; /// Error occured when interating with the tree. #[derive(Clone, Debug, PartialEq)] pub enum Error { - /// Adding duplicate node to tree. - Duplicate, - /// Finalizing descendent of tree node without finalizing ancestor(s). - UnfinalizedAncestor, - /// Imported or finalized node that is an ancestor of previously finalized node. - Revert, - /// Error throw by client when checking for node ancestry. - Client(E), + /// Adding duplicate node to tree. + Duplicate, + /// Finalizing descendent of tree node without finalizing ancestor(s). + UnfinalizedAncestor, + /// Imported or finalized node that is an ancestor of previously finalized node. + Revert, + /// Error throw by client when checking for node ancestry. + Client(E), } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use std::error::Error; - write!(f, "{}", self.description()) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use std::error::Error; + write!(f, "{}", self.description()) + } } impl std::error::Error for Error { - fn description(&self) -> &str { - match *self { + fn description(&self) -> &str { + match *self { Error::Duplicate => "Hash already exists in Tree", Error::UnfinalizedAncestor => "Finalized descendent of Tree node without finalizing its ancestor(s) first", Error::Revert => "Tried to import or finalize node that is an ancestor of a previously finalized node", Error::Client(ref err) => err.description(), } - } + } - fn cause(&self) -> Option<&std::error::Error> { - None - } + fn cause(&self) -> Option<&std::error::Error> { + None + } } impl From for Error { - fn from(err: E) -> Error { - Error::Client(err) - } + fn from(err: E) -> Error { + Error::Client(err) + } } /// Result of finalizing a node (that could be a part of the tree or not). #[derive(Debug, PartialEq)] pub enum FinalizationResult { - /// The tree has changed, optionally return the value associated with the finalized node. - Changed(Option), - /// The tree has not changed. - Unchanged, + /// The tree has changed, optionally return the value associated with the finalized node. + Changed(Option), + /// The tree has not changed. + Unchanged, } /// A tree data structure that stores several nodes across multiple branches. @@ -81,613 +81,641 @@ pub enum FinalizationResult { /// when interacting with the tree to establish a node's ancestry. #[derive(Clone, Debug, Decode, Encode, PartialEq)] pub struct ForkTree { - roots: Vec>, - best_finalized_number: Option, + roots: Vec>, + best_finalized_number: Option, } -impl ForkTree where - H: PartialEq, - N: Ord, +impl ForkTree +where + H: PartialEq, + N: Ord, { - /// Create a new empty tree. - pub fn new() -> ForkTree { - ForkTree { - roots: Vec::new(), - best_finalized_number: None, - } - } - - /// Import a new node into the tree. The given function `is_descendent_of` - /// should return `true` if the second hash (target) is a descendent of the - /// first hash (base). This method assumes that nodes in the same branch are - /// imported in order. - pub fn import( - &mut self, - mut hash: H, - mut number: N, - mut data: V, - is_descendent_of: &F, - ) -> Result> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - { - if let Some(ref best_finalized_number) = self.best_finalized_number { - if number <= *best_finalized_number { - return Err(Error::Revert); - } - } - - for root in self.roots.iter_mut() { - if root.hash == hash { - return Err(Error::Duplicate); - } - - match root.import(hash, number, data, is_descendent_of)? { - Some((h, n, d)) => { - hash = h; - number = n; - data = d; - }, - None => return Ok(false), - } - } - - self.roots.push(Node { - data, - hash: hash, - number: number, - children: Vec::new(), - }); - - Ok(true) - } - - /// Iterates over the existing roots in the tree. - pub fn roots(&self) -> impl Iterator { - self.roots.iter().map(|node| (&node.hash, &node.number, &node.data)) - } - - fn node_iter(&self) -> impl Iterator> { - ForkTreeIterator { stack: self.roots.iter().collect() } - } - - /// Iterates the nodes in the tree in pre-order. - pub fn iter(&self) -> impl Iterator { - self.node_iter().map(|node| (&node.hash, &node.number, &node.data)) - } - - /// Finalize a root in the tree and return it, return `None` in case no root - /// with the given hash exists. All other roots are pruned, and the children - /// of the finalized node become the new roots. - pub fn finalize_root(&mut self, hash: &H) -> Option { - if let Some(position) = self.roots.iter().position(|node| node.hash == *hash) { - let node = self.roots.swap_remove(position); - self.roots = node.children; - self.best_finalized_number = Some(node.number); - return Some(node.data); - } - - None - } - - /// Finalize a node in the tree. This method will make sure that the node - /// being finalized is either an existing root (an return its data), or a - /// node from a competing branch (not in the tree), tree pruning is done - /// accordingly. The given function `is_descendent_of` should return `true` - /// if the second hash (target) is a descendent of the first hash (base). - pub fn finalize( - &mut self, - hash: &H, - number: N, - is_descendent_of: &F, - ) -> Result, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result - { - if let Some(ref best_finalized_number) = self.best_finalized_number { - if number <= *best_finalized_number { - return Err(Error::Revert); - } - } - - // check if one of the current roots is being finalized - if let Some(root) = self.finalize_root(hash) { - return Ok(FinalizationResult::Changed(Some(root))); - } - - // make sure we're not finalizing a descendent of any root - for root in self.roots.iter() { - if number > root.number && is_descendent_of(&root.hash, hash)? { - return Err(Error::UnfinalizedAncestor); - } - } - - // we finalized a block earlier than any existing root (or possibly - // another fork not part of the tree). make sure to only keep roots that - // are part of the finalized branch - let mut changed = false; - self.roots.retain(|root| { - let retain = root.number > number && is_descendent_of(hash, &root.hash).unwrap_or(false); - - if !retain { - changed = true; - } - - retain - }); - - self.best_finalized_number = Some(number); - - if changed { - Ok(FinalizationResult::Changed(None)) - } else { - Ok(FinalizationResult::Unchanged) - } - } - - /// Checks if any node in the tree is finalized by either finalizing the - /// node itself or a child node that's not in the tree, guaranteeing that - /// the node being finalized isn't a descendent of any of the node's - /// children. Returns `Some(true)` if the node being finalized is a root, - /// `Some(false)` if the node being finalized is not a root, and `None` if - /// no node in the tree is finalized. The given `predicate` is checked on - /// the prospective finalized root and must pass for finalization to occur. - /// The given function `is_descendent_of` should return `true` if the second - /// hash (target) is a descendent of the first hash (base). - pub fn finalizes_any_with_descendent_if( - &self, - hash: &H, - number: N, - is_descendent_of: &F, - predicate: P, - ) -> Result, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, - { - if let Some(ref best_finalized_number) = self.best_finalized_number { - if number <= *best_finalized_number { - return Err(Error::Revert); - } - } - - // check if the given hash is equal or a descendent of any node in the - // tree, if we find a valid node that passes the predicate then we must - // ensure that we're not finalizing past any of its child nodes. - for node in self.node_iter() { - if predicate(&node.data) { - if node.hash == *hash || is_descendent_of(&node.hash, hash)? { - for node in node.children.iter() { - if node.number <= number && is_descendent_of(&node.hash, &hash)? { - return Err(Error::UnfinalizedAncestor); - } - } - - return Ok(Some(self.roots.iter().any(|root| root.hash == node.hash))); - } - } - } - - Ok(None) - } - - /// Finalize a root in the tree by either finalizing the node itself or a - /// child node that's not in the tree, guaranteeing that the node being - /// finalized isn't a descendent of any of the root's children. The given - /// `predicate` is checked on the prospective finalized root and must pass for - /// finalization to occur. The given function `is_descendent_of` should - /// return `true` if the second hash (target) is a descendent of the first - /// hash (base). - pub fn finalize_with_descendent_if( - &mut self, - hash: &H, - number: N, - is_descendent_of: &F, - predicate: P, - ) -> Result, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, - { - if let Some(ref best_finalized_number) = self.best_finalized_number { - if number <= *best_finalized_number { - return Err(Error::Revert); - } - } - - // check if the given hash is equal or a a descendent of any root, if we - // find a valid root that passes the predicate then we must ensure that - // we're not finalizing past any children node. - let mut position = None; - for (i, root) in self.roots.iter().enumerate() { - if predicate(&root.data) { - if root.hash == *hash || is_descendent_of(&root.hash, hash)? { - for node in root.children.iter() { - if node.number <= number && is_descendent_of(&node.hash, &hash)? { - return Err(Error::UnfinalizedAncestor); - } - } - - position = Some(i); - break; - } - } - } - - let node_data = position.map(|i| { - let node = self.roots.swap_remove(i); - self.roots = node.children; - self.best_finalized_number = Some(node.number); - node.data - }); - - // if the block being finalized is earlier than a given root, then it - // must be its ancestor, otherwise we can prune the root. if there's a - // root at the same height then the hashes must match. otherwise the - // node being finalized is higher than the root so it must be its - // descendent (in this case the node wasn't finalized earlier presumably - // because the predicate didn't pass). - let mut changed = false; - self.roots.retain(|root| { - let retain = - root.number > number && is_descendent_of(hash, &root.hash).unwrap_or(false) || - root.number == number && root.hash == *hash || - is_descendent_of(&root.hash, hash).unwrap_or(false); - - if !retain { - changed = true; - } - - retain - }); - - self.best_finalized_number = Some(number); - - match (node_data, changed) { - (Some(data), _) => Ok(FinalizationResult::Changed(Some(data))), - (None, true) => Ok(FinalizationResult::Changed(None)), - (None, false) => Ok(FinalizationResult::Unchanged), - } - } + /// Create a new empty tree. + pub fn new() -> ForkTree { + ForkTree { + roots: Vec::new(), + best_finalized_number: None, + } + } + + /// Import a new node into the tree. The given function `is_descendent_of` + /// should return `true` if the second hash (target) is a descendent of the + /// first hash (base). This method assumes that nodes in the same branch are + /// imported in order. + pub fn import( + &mut self, + mut hash: H, + mut number: N, + mut data: V, + is_descendent_of: &F, + ) -> Result> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + { + if let Some(ref best_finalized_number) = self.best_finalized_number { + if number <= *best_finalized_number { + return Err(Error::Revert); + } + } + + for root in self.roots.iter_mut() { + if root.hash == hash { + return Err(Error::Duplicate); + } + + match root.import(hash, number, data, is_descendent_of)? { + Some((h, n, d)) => { + hash = h; + number = n; + data = d; + } + None => return Ok(false), + } + } + + self.roots.push(Node { + data, + hash: hash, + number: number, + children: Vec::new(), + }); + + Ok(true) + } + + /// Iterates over the existing roots in the tree. + pub fn roots(&self) -> impl Iterator { + self.roots + .iter() + .map(|node| (&node.hash, &node.number, &node.data)) + } + + fn node_iter(&self) -> impl Iterator> { + ForkTreeIterator { + stack: self.roots.iter().collect(), + } + } + + /// Iterates the nodes in the tree in pre-order. + pub fn iter(&self) -> impl Iterator { + self.node_iter() + .map(|node| (&node.hash, &node.number, &node.data)) + } + + /// Finalize a root in the tree and return it, return `None` in case no root + /// with the given hash exists. All other roots are pruned, and the children + /// of the finalized node become the new roots. + pub fn finalize_root(&mut self, hash: &H) -> Option { + if let Some(position) = self.roots.iter().position(|node| node.hash == *hash) { + let node = self.roots.swap_remove(position); + self.roots = node.children; + self.best_finalized_number = Some(node.number); + return Some(node.data); + } + + None + } + + /// Finalize a node in the tree. This method will make sure that the node + /// being finalized is either an existing root (an return its data), or a + /// node from a competing branch (not in the tree), tree pruning is done + /// accordingly. The given function `is_descendent_of` should return `true` + /// if the second hash (target) is a descendent of the first hash (base). + pub fn finalize( + &mut self, + hash: &H, + number: N, + is_descendent_of: &F, + ) -> Result, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + { + if let Some(ref best_finalized_number) = self.best_finalized_number { + if number <= *best_finalized_number { + return Err(Error::Revert); + } + } + + // check if one of the current roots is being finalized + if let Some(root) = self.finalize_root(hash) { + return Ok(FinalizationResult::Changed(Some(root))); + } + + // make sure we're not finalizing a descendent of any root + for root in self.roots.iter() { + if number > root.number && is_descendent_of(&root.hash, hash)? { + return Err(Error::UnfinalizedAncestor); + } + } + + // we finalized a block earlier than any existing root (or possibly + // another fork not part of the tree). make sure to only keep roots that + // are part of the finalized branch + let mut changed = false; + self.roots.retain(|root| { + let retain = + root.number > number && is_descendent_of(hash, &root.hash).unwrap_or(false); + + if !retain { + changed = true; + } + + retain + }); + + self.best_finalized_number = Some(number); + + if changed { + Ok(FinalizationResult::Changed(None)) + } else { + Ok(FinalizationResult::Unchanged) + } + } + + /// Checks if any node in the tree is finalized by either finalizing the + /// node itself or a child node that's not in the tree, guaranteeing that + /// the node being finalized isn't a descendent of any of the node's + /// children. Returns `Some(true)` if the node being finalized is a root, + /// `Some(false)` if the node being finalized is not a root, and `None` if + /// no node in the tree is finalized. The given `predicate` is checked on + /// the prospective finalized root and must pass for finalization to occur. + /// The given function `is_descendent_of` should return `true` if the second + /// hash (target) is a descendent of the first hash (base). + pub fn finalizes_any_with_descendent_if( + &self, + hash: &H, + number: N, + is_descendent_of: &F, + predicate: P, + ) -> Result, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + if let Some(ref best_finalized_number) = self.best_finalized_number { + if number <= *best_finalized_number { + return Err(Error::Revert); + } + } + + // check if the given hash is equal or a descendent of any node in the + // tree, if we find a valid node that passes the predicate then we must + // ensure that we're not finalizing past any of its child nodes. + for node in self.node_iter() { + if predicate(&node.data) { + if node.hash == *hash || is_descendent_of(&node.hash, hash)? { + for node in node.children.iter() { + if node.number <= number && is_descendent_of(&node.hash, &hash)? { + return Err(Error::UnfinalizedAncestor); + } + } + + return Ok(Some(self.roots.iter().any(|root| root.hash == node.hash))); + } + } + } + + Ok(None) + } + + /// Finalize a root in the tree by either finalizing the node itself or a + /// child node that's not in the tree, guaranteeing that the node being + /// finalized isn't a descendent of any of the root's children. The given + /// `predicate` is checked on the prospective finalized root and must pass for + /// finalization to occur. The given function `is_descendent_of` should + /// return `true` if the second hash (target) is a descendent of the first + /// hash (base). + pub fn finalize_with_descendent_if( + &mut self, + hash: &H, + number: N, + is_descendent_of: &F, + predicate: P, + ) -> Result, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + if let Some(ref best_finalized_number) = self.best_finalized_number { + if number <= *best_finalized_number { + return Err(Error::Revert); + } + } + + // check if the given hash is equal or a a descendent of any root, if we + // find a valid root that passes the predicate then we must ensure that + // we're not finalizing past any children node. + let mut position = None; + for (i, root) in self.roots.iter().enumerate() { + if predicate(&root.data) { + if root.hash == *hash || is_descendent_of(&root.hash, hash)? { + for node in root.children.iter() { + if node.number <= number && is_descendent_of(&node.hash, &hash)? { + return Err(Error::UnfinalizedAncestor); + } + } + + position = Some(i); + break; + } + } + } + + let node_data = position.map(|i| { + let node = self.roots.swap_remove(i); + self.roots = node.children; + self.best_finalized_number = Some(node.number); + node.data + }); + + // if the block being finalized is earlier than a given root, then it + // must be its ancestor, otherwise we can prune the root. if there's a + // root at the same height then the hashes must match. otherwise the + // node being finalized is higher than the root so it must be its + // descendent (in this case the node wasn't finalized earlier presumably + // because the predicate didn't pass). + let mut changed = false; + self.roots.retain(|root| { + let retain = root.number > number + && is_descendent_of(hash, &root.hash).unwrap_or(false) + || root.number == number && root.hash == *hash + || is_descendent_of(&root.hash, hash).unwrap_or(false); + + if !retain { + changed = true; + } + + retain + }); + + self.best_finalized_number = Some(number); + + match (node_data, changed) { + (Some(data), _) => Ok(FinalizationResult::Changed(Some(data))), + (None, true) => Ok(FinalizationResult::Changed(None)), + (None, false) => Ok(FinalizationResult::Unchanged), + } + } } // Workaround for: https://github.com/rust-lang/rust/issues/34537 mod node_implementation { - use super::*; - - #[derive(Clone, Debug, Decode, Encode, PartialEq)] - pub struct Node { - pub hash: H, - pub number: N, - pub data: V, - pub children: Vec>, - } - - impl Node { - pub fn import( - &mut self, - mut hash: H, - mut number: N, - mut data: V, - is_descendent_of: &F, - ) -> Result, Error> - where E: fmt::Debug, - F: Fn(&H, &H) -> Result, - { - if self.hash == hash { - return Err(Error::Duplicate); - }; - - if number <= self.number { return Ok(Some((hash, number, data))); } - - for node in self.children.iter_mut() { - match node.import(hash, number, data, is_descendent_of)? { - Some((h, n, d)) => { - hash = h; - number = n; - data = d; - }, - None => return Ok(None), - } - } - - if is_descendent_of(&self.hash, &hash)? { - self.children.push(Node { - data, - hash: hash, - number: number, - children: Vec::new(), - }); - - Ok(None) - } else { - Ok(Some((hash, number, data))) - } - } - } + use super::*; + + #[derive(Clone, Debug, Decode, Encode, PartialEq)] + pub struct Node { + pub hash: H, + pub number: N, + pub data: V, + pub children: Vec>, + } + + impl Node { + pub fn import( + &mut self, + mut hash: H, + mut number: N, + mut data: V, + is_descendent_of: &F, + ) -> Result, Error> + where + E: fmt::Debug, + F: Fn(&H, &H) -> Result, + { + if self.hash == hash { + return Err(Error::Duplicate); + }; + + if number <= self.number { + return Ok(Some((hash, number, data))); + } + + for node in self.children.iter_mut() { + match node.import(hash, number, data, is_descendent_of)? { + Some((h, n, d)) => { + hash = h; + number = n; + data = d; + } + None => return Ok(None), + } + } + + if is_descendent_of(&self.hash, &hash)? { + self.children.push(Node { + data, + hash: hash, + number: number, + children: Vec::new(), + }); + + Ok(None) + } else { + Ok(Some((hash, number, data))) + } + } + } } // Workaround for: https://github.com/rust-lang/rust/issues/34537 use node_implementation::Node; struct ForkTreeIterator<'a, H, N, V> { - stack: Vec<&'a Node>, + stack: Vec<&'a Node>, } impl<'a, H, N, V> Iterator for ForkTreeIterator<'a, H, N, V> { - type Item = &'a Node; - - fn next(&mut self) -> Option { - self.stack.pop().map(|node| { - self.stack.extend(node.children.iter()); - node - }) - } + type Item = &'a Node; + + fn next(&mut self) -> Option { + self.stack.pop().map(|node| { + self.stack.extend(node.children.iter()); + node + }) + } } #[cfg(test)] mod test { - use super::{FinalizationResult, ForkTree, Error}; - - #[derive(Debug, PartialEq)] - struct TestError; - - impl std::fmt::Display for TestError { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "TestError") - } - } - - impl std::error::Error for TestError {} - - fn test_fork_tree<'a>() -> (ForkTree<&'a str, u64, ()>, impl Fn(&&str, &&str) -> Result) { - let mut tree = ForkTree::new(); - - // - // - B - C - D - E - // / - // / - G - // / / - // A - F - H - I - // \ - // — J - K - // - let is_descendent_of = |base: &&str, block: &&str| -> Result { - let letters = vec!["B", "C", "D", "E", "F", "G", "H", "I", "J", "K"]; - match (*base, *block) { - ("A", b) => Ok(letters.into_iter().any(|n| n == b)), - ("B", b) => Ok(b == "C" || b == "D" || b == "E"), - ("C", b) => Ok(b == "D" || b == "E"), - ("D", b) => Ok(b == "E"), - ("E", _) => Ok(false), - ("F", b) => Ok(b == "G" || b == "H" || b == "I"), - ("G", _) => Ok(false), - ("H", b) => Ok(b == "I"), - ("I", _) => Ok(false), - ("J", b) => Ok(b == "K"), - ("K", _) => Ok(false), - ("0", _) => Ok(true), - _ => Ok(false), - } - }; - - tree.import("A", 1, (), &is_descendent_of).unwrap(); - - tree.import("B", 2, (), &is_descendent_of).unwrap(); - tree.import("C", 3, (), &is_descendent_of).unwrap(); - tree.import("D", 4, (), &is_descendent_of).unwrap(); - tree.import("E", 5, (), &is_descendent_of).unwrap(); - - tree.import("F", 2, (), &is_descendent_of).unwrap(); - tree.import("G", 3, (), &is_descendent_of).unwrap(); - - tree.import("H", 3, (), &is_descendent_of).unwrap(); - tree.import("I", 4, (), &is_descendent_of).unwrap(); - - tree.import("J", 2, (), &is_descendent_of).unwrap(); - tree.import("K", 3, (), &is_descendent_of).unwrap(); - - (tree, is_descendent_of) - } - - #[test] - fn import_doesnt_revert() { - let (mut tree, is_descendent_of) = test_fork_tree(); - - tree.finalize_root(&"A"); - - assert_eq!( - tree.best_finalized_number, - Some(1), - ); - - assert_eq!( - tree.import("A", 1, (), &is_descendent_of), - Err(Error::Revert), - ); - } - - #[test] - fn import_doesnt_add_duplicates() { - let (mut tree, is_descendent_of) = test_fork_tree(); - - assert_eq!( - tree.import("A", 1, (), &is_descendent_of), - Err(Error::Duplicate), - ); - - assert_eq!( - tree.import("I", 4, (), &is_descendent_of), - Err(Error::Duplicate), - ); - - assert_eq!( - tree.import("G", 3, (), &is_descendent_of), - Err(Error::Duplicate), - ); - - assert_eq!( - tree.import("K", 3, (), &is_descendent_of), - Err(Error::Duplicate), - ); - } - - #[test] - fn finalize_root_works() { - let finalize_a = || { - let (mut tree, ..) = test_fork_tree(); - - assert_eq!( - tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("A", 1)], - ); - - // finalizing "A" opens up three possible forks - tree.finalize_root(&"A"); - - assert_eq!( - tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("B", 2), ("F", 2), ("J", 2)], - ); - - tree - }; - - { - let mut tree = finalize_a(); - - // finalizing "B" will progress on its fork and remove any other competing forks - tree.finalize_root(&"B"); - - assert_eq!( - tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("C", 3)], - ); - - // all the other forks have been pruned - assert!(tree.roots.len() == 1); - } - - { - let mut tree = finalize_a(); - - // finalizing "J" will progress on its fork and remove any other competing forks - tree.finalize_root(&"J"); - - assert_eq!( - tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("K", 3)], - ); - - // all the other forks have been pruned - assert!(tree.roots.len() == 1); - } - } - - #[test] - fn finalize_works() { - let (mut tree, is_descendent_of) = test_fork_tree(); - - let original_roots = tree.roots.clone(); - - // finalizing a block prior to any in the node doesn't change the tree - assert_eq!( - tree.finalize(&"0", 0, &is_descendent_of), - Ok(FinalizationResult::Unchanged), - ); - - assert_eq!(tree.roots, original_roots); - - // finalizing "A" opens up three possible forks - assert_eq!( - tree.finalize(&"A", 1, &is_descendent_of), - Ok(FinalizationResult::Changed(Some(()))), - ); - - assert_eq!( - tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("B", 2), ("F", 2), ("J", 2)], - ); - - // finalizing anything lower than what we observed will fail - assert_eq!( - tree.best_finalized_number, - Some(1), - ); - - assert_eq!( - tree.finalize(&"Z", 1, &is_descendent_of), - Err(Error::Revert), - ); - - // trying to finalize a node without finalizing its ancestors first will fail - assert_eq!( - tree.finalize(&"H", 3, &is_descendent_of), - Err(Error::UnfinalizedAncestor), - ); - - // after finalizing "F" we can finalize "H" - assert_eq!( - tree.finalize(&"F", 2, &is_descendent_of), - Ok(FinalizationResult::Changed(Some(()))), - ); - - assert_eq!( - tree.finalize(&"H", 3, &is_descendent_of), - Ok(FinalizationResult::Changed(Some(()))), - ); - - assert_eq!( - tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("I", 4)], - ); - - // finalizing a node from another fork that isn't part of the tree clears the tree - assert_eq!( - tree.finalize(&"Z", 5, &is_descendent_of), - Ok(FinalizationResult::Changed(None)), - ); - - assert!(tree.roots.is_empty()); - } - - #[test] - fn finalize_with_descendent_works() { - #[derive(Debug, PartialEq)] - struct Change { effective: u64 }; - - let (mut tree, is_descendent_of) = { - let mut tree = ForkTree::new(); - - let is_descendent_of = |base: &&str, block: &&str| -> Result { - - // - // A0 #1 - (B #2) - (C #5) - D #10 - E #15 - (F #100) - // \ - // - (G #100) - // - // A1 #1 - // - // Nodes B, C, F and G are not part of the tree. - match (*base, *block) { - ("A0", b) => Ok(b == "B" || b == "C" || b == "D" || b == "G"), - ("A1", _) => Ok(false), - ("C", b) => Ok(b == "D"), - ("D", b) => Ok(b == "E" || b == "F" || b == "G"), - ("E", b) => Ok(b == "F"), - _ => Ok(false), - } - }; - - tree.import("A0", 1, Change { effective: 5 }, &is_descendent_of).unwrap(); - tree.import("A1", 1, Change { effective: 5 }, &is_descendent_of).unwrap(); - tree.import("D", 10, Change { effective: 10 }, &is_descendent_of).unwrap(); - tree.import("E", 15, Change { effective: 50 }, &is_descendent_of).unwrap(); - - (tree, is_descendent_of) - }; - - assert_eq!( + use super::{Error, FinalizationResult, ForkTree}; + + #[derive(Debug, PartialEq)] + struct TestError; + + impl std::fmt::Display for TestError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "TestError") + } + } + + impl std::error::Error for TestError {} + + fn test_fork_tree<'a>() -> ( + ForkTree<&'a str, u64, ()>, + impl Fn(&&str, &&str) -> Result, + ) { + let mut tree = ForkTree::new(); + + // + // - B - C - D - E + // / + // / - G + // / / + // A - F - H - I + // \ + // — J - K + // + let is_descendent_of = |base: &&str, block: &&str| -> Result { + let letters = vec!["B", "C", "D", "E", "F", "G", "H", "I", "J", "K"]; + match (*base, *block) { + ("A", b) => Ok(letters.into_iter().any(|n| n == b)), + ("B", b) => Ok(b == "C" || b == "D" || b == "E"), + ("C", b) => Ok(b == "D" || b == "E"), + ("D", b) => Ok(b == "E"), + ("E", _) => Ok(false), + ("F", b) => Ok(b == "G" || b == "H" || b == "I"), + ("G", _) => Ok(false), + ("H", b) => Ok(b == "I"), + ("I", _) => Ok(false), + ("J", b) => Ok(b == "K"), + ("K", _) => Ok(false), + ("0", _) => Ok(true), + _ => Ok(false), + } + }; + + tree.import("A", 1, (), &is_descendent_of).unwrap(); + + tree.import("B", 2, (), &is_descendent_of).unwrap(); + tree.import("C", 3, (), &is_descendent_of).unwrap(); + tree.import("D", 4, (), &is_descendent_of).unwrap(); + tree.import("E", 5, (), &is_descendent_of).unwrap(); + + tree.import("F", 2, (), &is_descendent_of).unwrap(); + tree.import("G", 3, (), &is_descendent_of).unwrap(); + + tree.import("H", 3, (), &is_descendent_of).unwrap(); + tree.import("I", 4, (), &is_descendent_of).unwrap(); + + tree.import("J", 2, (), &is_descendent_of).unwrap(); + tree.import("K", 3, (), &is_descendent_of).unwrap(); + + (tree, is_descendent_of) + } + + #[test] + fn import_doesnt_revert() { + let (mut tree, is_descendent_of) = test_fork_tree(); + + tree.finalize_root(&"A"); + + assert_eq!(tree.best_finalized_number, Some(1),); + + assert_eq!( + tree.import("A", 1, (), &is_descendent_of), + Err(Error::Revert), + ); + } + + #[test] + fn import_doesnt_add_duplicates() { + let (mut tree, is_descendent_of) = test_fork_tree(); + + assert_eq!( + tree.import("A", 1, (), &is_descendent_of), + Err(Error::Duplicate), + ); + + assert_eq!( + tree.import("I", 4, (), &is_descendent_of), + Err(Error::Duplicate), + ); + + assert_eq!( + tree.import("G", 3, (), &is_descendent_of), + Err(Error::Duplicate), + ); + + assert_eq!( + tree.import("K", 3, (), &is_descendent_of), + Err(Error::Duplicate), + ); + } + + #[test] + fn finalize_root_works() { + let finalize_a = || { + let (mut tree, ..) = test_fork_tree(); + + assert_eq!( + tree.roots() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![("A", 1)], + ); + + // finalizing "A" opens up three possible forks + tree.finalize_root(&"A"); + + assert_eq!( + tree.roots() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![("B", 2), ("F", 2), ("J", 2)], + ); + + tree + }; + + { + let mut tree = finalize_a(); + + // finalizing "B" will progress on its fork and remove any other competing forks + tree.finalize_root(&"B"); + + assert_eq!( + tree.roots() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![("C", 3)], + ); + + // all the other forks have been pruned + assert!(tree.roots.len() == 1); + } + + { + let mut tree = finalize_a(); + + // finalizing "J" will progress on its fork and remove any other competing forks + tree.finalize_root(&"J"); + + assert_eq!( + tree.roots() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![("K", 3)], + ); + + // all the other forks have been pruned + assert!(tree.roots.len() == 1); + } + } + + #[test] + fn finalize_works() { + let (mut tree, is_descendent_of) = test_fork_tree(); + + let original_roots = tree.roots.clone(); + + // finalizing a block prior to any in the node doesn't change the tree + assert_eq!( + tree.finalize(&"0", 0, &is_descendent_of), + Ok(FinalizationResult::Unchanged), + ); + + assert_eq!(tree.roots, original_roots); + + // finalizing "A" opens up three possible forks + assert_eq!( + tree.finalize(&"A", 1, &is_descendent_of), + Ok(FinalizationResult::Changed(Some(()))), + ); + + assert_eq!( + tree.roots() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![("B", 2), ("F", 2), ("J", 2)], + ); + + // finalizing anything lower than what we observed will fail + assert_eq!(tree.best_finalized_number, Some(1),); + + assert_eq!( + tree.finalize(&"Z", 1, &is_descendent_of), + Err(Error::Revert), + ); + + // trying to finalize a node without finalizing its ancestors first will fail + assert_eq!( + tree.finalize(&"H", 3, &is_descendent_of), + Err(Error::UnfinalizedAncestor), + ); + + // after finalizing "F" we can finalize "H" + assert_eq!( + tree.finalize(&"F", 2, &is_descendent_of), + Ok(FinalizationResult::Changed(Some(()))), + ); + + assert_eq!( + tree.finalize(&"H", 3, &is_descendent_of), + Ok(FinalizationResult::Changed(Some(()))), + ); + + assert_eq!( + tree.roots() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![("I", 4)], + ); + + // finalizing a node from another fork that isn't part of the tree clears the tree + assert_eq!( + tree.finalize(&"Z", 5, &is_descendent_of), + Ok(FinalizationResult::Changed(None)), + ); + + assert!(tree.roots.is_empty()); + } + + #[test] + fn finalize_with_descendent_works() { + #[derive(Debug, PartialEq)] + struct Change { + effective: u64, + }; + + let (mut tree, is_descendent_of) = { + let mut tree = ForkTree::new(); + + let is_descendent_of = |base: &&str, block: &&str| -> Result { + // + // A0 #1 - (B #2) - (C #5) - D #10 - E #15 - (F #100) + // \ + // - (G #100) + // + // A1 #1 + // + // Nodes B, C, F and G are not part of the tree. + match (*base, *block) { + ("A0", b) => Ok(b == "B" || b == "C" || b == "D" || b == "G"), + ("A1", _) => Ok(false), + ("C", b) => Ok(b == "D"), + ("D", b) => Ok(b == "E" || b == "F" || b == "G"), + ("E", b) => Ok(b == "F"), + _ => Ok(false), + } + }; + + tree.import("A0", 1, Change { effective: 5 }, &is_descendent_of) + .unwrap(); + tree.import("A1", 1, Change { effective: 5 }, &is_descendent_of) + .unwrap(); + tree.import("D", 10, Change { effective: 10 }, &is_descendent_of) + .unwrap(); + tree.import("E", 15, Change { effective: 50 }, &is_descendent_of) + .unwrap(); + + (tree, is_descendent_of) + }; + + assert_eq!( tree.finalizes_any_with_descendent_if( &"B", 2, @@ -697,37 +725,30 @@ mod test { Ok(None), ); - // finalizing "D" will finalize a block from the tree, but it can't be applied yet - // since it is not a root change - assert_eq!( - tree.finalizes_any_with_descendent_if( - &"D", - 10, - &is_descendent_of, - |c| c.effective == 10, - ), - Ok(Some(false)), - ); - - // finalizing "B" doesn't finalize "A0" since the predicate doesn't pass, - // although it will clear out "A1" from the tree - assert_eq!( - tree.finalize_with_descendent_if( - &"B", - 2, - &is_descendent_of, - |c| c.effective <= 2, - ), - Ok(FinalizationResult::Changed(None)), - ); - - assert_eq!( - tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("A0", 1)], - ); - - // finalizing "C" will finalize the node "A0" and prune it out of the tree - assert_eq!( + // finalizing "D" will finalize a block from the tree, but it can't be applied yet + // since it is not a root change + assert_eq!( + tree.finalizes_any_with_descendent_if(&"D", 10, &is_descendent_of, |c| c.effective + == 10,), + Ok(Some(false)), + ); + + // finalizing "B" doesn't finalize "A0" since the predicate doesn't pass, + // although it will clear out "A1" from the tree + assert_eq!( + tree.finalize_with_descendent_if(&"B", 2, &is_descendent_of, |c| c.effective <= 2,), + Ok(FinalizationResult::Changed(None)), + ); + + assert_eq!( + tree.roots() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![("A0", 1)], + ); + + // finalizing "C" will finalize the node "A0" and prune it out of the tree + assert_eq!( tree.finalizes_any_with_descendent_if( &"C", 5, @@ -737,141 +758,119 @@ mod test { Ok(Some(true)), ); - assert_eq!( - tree.finalize_with_descendent_if( - &"C", - 5, - &is_descendent_of, - |c| c.effective <= 5, - ), - Ok(FinalizationResult::Changed(Some(Change { effective: 5 }))), - ); - - assert_eq!( - tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("D", 10)], - ); - - // finalizing "F" will fail since it would finalize past "E" without finalizing "D" first - assert_eq!( - tree.finalizes_any_with_descendent_if( - &"F", - 100, - &is_descendent_of, - |c| c.effective <= 100, - ), - Err(Error::UnfinalizedAncestor), - ); - - // it will work with "G" though since it is not in the same branch as "E" - assert_eq!( - tree.finalizes_any_with_descendent_if( - &"G", - 100, - &is_descendent_of, - |c| c.effective <= 100, - ), - Ok(Some(true)), - ); - - assert_eq!( - tree.finalize_with_descendent_if( - &"G", - 100, - &is_descendent_of, - |c| c.effective <= 100, - ), - Ok(FinalizationResult::Changed(Some(Change { effective: 10 }))), - ); - - // "E" will be pruned out - assert_eq!(tree.roots().count(), 0); - } - - #[test] - fn iter_iterates_in_preorder() { - let (tree, ..) = test_fork_tree(); - assert_eq!( - tree.iter().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![ - ("A", 1), - ("J", 2), ("K", 3), - ("F", 2), ("H", 3), ("I", 4), - ("G", 3), - ("B", 2), ("C", 3), ("D", 4), ("E", 5), - ], - ); - } - - #[test] - fn minimizes_calls_to_is_descendent_of() { - use std::sync::atomic::{AtomicUsize, Ordering}; - - let n_is_descendent_of_calls = AtomicUsize::new(0); - - let is_descendent_of = |_: &&str, _: &&str| -> Result { - n_is_descendent_of_calls.fetch_add(1, Ordering::SeqCst); - Ok(true) - }; - - { - // Deep tree where we want to call `finalizes_any_with_descendent_if`. The - // search for the node should first check the predicate (which is cheaper) and - // only then call `is_descendent_of` - let mut tree = ForkTree::new(); - let letters = vec!["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"]; - - for (i, letter) in letters.iter().enumerate() { - tree.import::<_, TestError>(*letter, i, i, &|_, _| Ok(true)).unwrap(); - } - - // "L" is a descendent of "K", but the predicate will only pass for "K", - // therefore only one call to `is_descendent_of` should be made - assert_eq!( - tree.finalizes_any_with_descendent_if( - &"L", - 11, - &is_descendent_of, - |i| *i == 10, - ), - Ok(Some(false)), - ); - - assert_eq!( - n_is_descendent_of_calls.load(Ordering::SeqCst), - 1, - ); - } - - n_is_descendent_of_calls.store(0, Ordering::SeqCst); - - { - // Multiple roots in the tree where we want to call `finalize_with_descendent_if`. - // The search for the root node should first check the predicate (which is cheaper) - // and only then call `is_descendent_of` - let mut tree = ForkTree::new(); - let letters = vec!["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"]; - - for (i, letter) in letters.iter().enumerate() { - tree.import::<_, TestError>(*letter, i, i, &|_, _| Ok(false)).unwrap(); - } - - // "L" is a descendent of "K", but the predicate will only pass for "K", - // therefore only one call to `is_descendent_of` should be made - assert_eq!( - tree.finalize_with_descendent_if( - &"L", - 11, - &is_descendent_of, - |i| *i == 10, - ), - Ok(FinalizationResult::Changed(Some(10))), - ); - - assert_eq!( - n_is_descendent_of_calls.load(Ordering::SeqCst), - 1, - ); - } - } + assert_eq!( + tree.finalize_with_descendent_if(&"C", 5, &is_descendent_of, |c| c.effective <= 5,), + Ok(FinalizationResult::Changed(Some(Change { effective: 5 }))), + ); + + assert_eq!( + tree.roots() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![("D", 10)], + ); + + // finalizing "F" will fail since it would finalize past "E" without finalizing "D" first + assert_eq!( + tree.finalizes_any_with_descendent_if(&"F", 100, &is_descendent_of, |c| c.effective + <= 100,), + Err(Error::UnfinalizedAncestor), + ); + + // it will work with "G" though since it is not in the same branch as "E" + assert_eq!( + tree.finalizes_any_with_descendent_if(&"G", 100, &is_descendent_of, |c| c.effective + <= 100,), + Ok(Some(true)), + ); + + assert_eq!( + tree.finalize_with_descendent_if(&"G", 100, &is_descendent_of, |c| c.effective <= 100,), + Ok(FinalizationResult::Changed(Some(Change { effective: 10 }))), + ); + + // "E" will be pruned out + assert_eq!(tree.roots().count(), 0); + } + + #[test] + fn iter_iterates_in_preorder() { + let (tree, ..) = test_fork_tree(); + assert_eq!( + tree.iter() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![ + ("A", 1), + ("J", 2), + ("K", 3), + ("F", 2), + ("H", 3), + ("I", 4), + ("G", 3), + ("B", 2), + ("C", 3), + ("D", 4), + ("E", 5), + ], + ); + } + + #[test] + fn minimizes_calls_to_is_descendent_of() { + use std::sync::atomic::{AtomicUsize, Ordering}; + + let n_is_descendent_of_calls = AtomicUsize::new(0); + + let is_descendent_of = |_: &&str, _: &&str| -> Result { + n_is_descendent_of_calls.fetch_add(1, Ordering::SeqCst); + Ok(true) + }; + + { + // Deep tree where we want to call `finalizes_any_with_descendent_if`. The + // search for the node should first check the predicate (which is cheaper) and + // only then call `is_descendent_of` + let mut tree = ForkTree::new(); + let letters = vec!["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"]; + + for (i, letter) in letters.iter().enumerate() { + tree.import::<_, TestError>(*letter, i, i, &|_, _| Ok(true)) + .unwrap(); + } + + // "L" is a descendent of "K", but the predicate will only pass for "K", + // therefore only one call to `is_descendent_of` should be made + assert_eq!( + tree.finalizes_any_with_descendent_if(&"L", 11, &is_descendent_of, |i| *i == 10,), + Ok(Some(false)), + ); + + assert_eq!(n_is_descendent_of_calls.load(Ordering::SeqCst), 1,); + } + + n_is_descendent_of_calls.store(0, Ordering::SeqCst); + + { + // Multiple roots in the tree where we want to call `finalize_with_descendent_if`. + // The search for the root node should first check the predicate (which is cheaper) + // and only then call `is_descendent_of` + let mut tree = ForkTree::new(); + let letters = vec!["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"]; + + for (i, letter) in letters.iter().enumerate() { + tree.import::<_, TestError>(*letter, i, i, &|_, _| Ok(false)) + .unwrap(); + } + + // "L" is a descendent of "K", but the predicate will only pass for "K", + // therefore only one call to `is_descendent_of` should be made + assert_eq!( + tree.finalize_with_descendent_if(&"L", 11, &is_descendent_of, |i| *i == 10,), + Ok(FinalizationResult::Changed(Some(10))), + ); + + assert_eq!(n_is_descendent_of_calls.load(Ordering::SeqCst), 1,); + } + } } diff --git a/node-template/build.rs b/node-template/build.rs index afc39d3b63..ce3d5bc530 100644 --- a/node-template/build.rs +++ b/node-template/build.rs @@ -1,8 +1,8 @@ -use vergen::{ConstantsFlags, generate_cargo_keys}; +use vergen::{generate_cargo_keys, ConstantsFlags}; const ERROR_MSG: &str = "Failed to generate metadata files"; fn main() { - generate_cargo_keys(ConstantsFlags::all()).expect(ERROR_MSG); - println!("cargo:rerun-if-changed=.git/HEAD"); + generate_cargo_keys(ConstantsFlags::all()).expect(ERROR_MSG); + println!("cargo:rerun-if-changed=.git/HEAD"); } diff --git a/node-template/runtime/src/lib.rs b/node-template/runtime/src/lib.rs index 5299831ea5..4a75f24e09 100644 --- a/node-template/runtime/src/lib.rs +++ b/node-template/runtime/src/lib.rs @@ -3,36 +3,38 @@ #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), feature(alloc))] // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. -#![recursion_limit="256"] +#![recursion_limit = "256"] -#[cfg(feature = "std")] -use serde_derive::{Serialize, Deserialize}; -use parity_codec::{Encode, Decode}; -use rstd::prelude::*; +use client::{ + block_builder::api::{self as block_builder_api, CheckInherentsResult, InherentData}, + impl_runtime_apis, runtime_api, +}; +use parity_codec::{Decode, Encode}; #[cfg(feature = "std")] use primitives::bytes; use primitives::{ed25519, sr25519, OpaqueMetadata}; +use rstd::prelude::*; use runtime_primitives::{ - ApplyResult, transaction_validity::TransactionValidity, generic, create_runtime_str, - traits::{self, NumberFor, BlakeTwo256, Block as BlockT, StaticLookup, Verify} + create_runtime_str, generic, + traits::{self, BlakeTwo256, Block as BlockT, NumberFor, StaticLookup, Verify}, + transaction_validity::TransactionValidity, + ApplyResult, }; -use client::{ - block_builder::api::{CheckInherentsResult, InherentData, self as block_builder_api}, - runtime_api, impl_runtime_apis -}; -use version::RuntimeVersion; +#[cfg(feature = "std")] +use serde_derive::{Deserialize, Serialize}; #[cfg(feature = "std")] use version::NativeVersion; +use version::RuntimeVersion; // A few exports that help ease life for downstream crates. +pub use balances::Call as BalancesCall; +pub use consensus::Call as ConsensusCall; #[cfg(any(feature = "std", test))] pub use runtime_primitives::BuildStorage; -pub use consensus::Call as ConsensusCall; -pub use timestamp::Call as TimestampCall; -pub use balances::Call as BalancesCall; -pub use runtime_primitives::{Permill, Perbill}; +pub use runtime_primitives::{Perbill, Permill}; +pub use support::{construct_runtime, StorageValue}; pub use timestamp::BlockPeriod; -pub use support::{StorageValue, construct_runtime}; +pub use timestamp::Call as TimestampCall; /// The type that is used for identifying authorities. pub type AuthorityId = ::Signer; @@ -63,133 +65,137 @@ mod template; /// of data like extrinsics, allowing for them to continue syncing the network through upgrades /// to even the core datastructures. pub mod opaque { - use super::*; - - /// Opaque, encoded, unchecked extrinsic. - #[derive(PartialEq, Eq, Clone, Default, Encode, Decode)] - #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] - pub struct UncheckedExtrinsic(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); - #[cfg(feature = "std")] - impl std::fmt::Debug for UncheckedExtrinsic { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(fmt, "{}", primitives::hexdisplay::HexDisplay::from(&self.0)) - } - } - impl traits::Extrinsic for UncheckedExtrinsic { - fn is_signed(&self) -> Option { - None - } - } - /// Opaque block header type. - pub type Header = generic::Header>; - /// Opaque block type. - pub type Block = generic::Block; - /// Opaque block identifier type. - pub type BlockId = generic::BlockId; - /// Opaque session key type. - pub type SessionKey = AuthorityId; + use super::*; + + /// Opaque, encoded, unchecked extrinsic. + #[derive(PartialEq, Eq, Clone, Default, Encode, Decode)] + #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] + pub struct UncheckedExtrinsic(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec); + #[cfg(feature = "std")] + impl std::fmt::Debug for UncheckedExtrinsic { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(fmt, "{}", primitives::hexdisplay::HexDisplay::from(&self.0)) + } + } + impl traits::Extrinsic for UncheckedExtrinsic { + fn is_signed(&self) -> Option { + None + } + } + /// Opaque block header type. + pub type Header = generic::Header< + BlockNumber, + BlakeTwo256, + generic::DigestItem, + >; + /// Opaque block type. + pub type Block = generic::Block; + /// Opaque block identifier type. + pub type BlockId = generic::BlockId; + /// Opaque session key type. + pub type SessionKey = AuthorityId; } /// This runtime version. pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("template-node"), - impl_name: create_runtime_str!("template-node"), - authoring_version: 3, - spec_version: 3, - impl_version: 0, - apis: RUNTIME_API_VERSIONS, + spec_name: create_runtime_str!("template-node"), + impl_name: create_runtime_str!("template-node"), + authoring_version: 3, + spec_version: 3, + impl_version: 0, + apis: RUNTIME_API_VERSIONS, }; /// The version infromation used to identify this runtime when compiled natively. #[cfg(feature = "std")] pub fn native_version() -> NativeVersion { - NativeVersion { - runtime_version: VERSION, - can_author_with: Default::default(), - } + NativeVersion { + runtime_version: VERSION, + can_author_with: Default::default(), + } } impl system::Trait for Runtime { - /// The identifier used to distinguish between accounts. - type AccountId = AccountId; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = Indices; - /// The index type for storing how many extrinsics an account has signed. - type Index = Nonce; - /// The index type for blocks. - type BlockNumber = BlockNumber; - /// The type for hashing blocks and tries. - type Hash = Hash; - /// The hashing algorithm used. - type Hashing = BlakeTwo256; - /// The header digest type. - type Digest = generic::Digest; - /// The header type. - type Header = generic::Header; - /// The ubiquitous event type. - type Event = Event; - /// The ubiquitous log type. - type Log = Log; - /// The ubiquitous origin type. - type Origin = Origin; + /// The identifier used to distinguish between accounts. + type AccountId = AccountId; + /// The lookup mechanism to get account ID from whatever is passed in dispatchers. + type Lookup = Indices; + /// The index type for storing how many extrinsics an account has signed. + type Index = Nonce; + /// The index type for blocks. + type BlockNumber = BlockNumber; + /// The type for hashing blocks and tries. + type Hash = Hash; + /// The hashing algorithm used. + type Hashing = BlakeTwo256; + /// The header digest type. + type Digest = generic::Digest; + /// The header type. + type Header = generic::Header; + /// The ubiquitous event type. + type Event = Event; + /// The ubiquitous log type. + type Log = Log; + /// The ubiquitous origin type. + type Origin = Origin; } impl aura::Trait for Runtime { - type HandleReport = (); + type HandleReport = (); } impl consensus::Trait for Runtime { - /// The identifier we use to refer to authorities. - type SessionKey = AuthorityId; - // The aura module handles offline-reports internally - // rather than using an explicit report system. - type InherentOfflineReport = (); - /// The ubiquitous log type. - type Log = Log; + /// The identifier we use to refer to authorities. + type SessionKey = AuthorityId; + // The aura module handles offline-reports internally + // rather than using an explicit report system. + type InherentOfflineReport = (); + /// The ubiquitous log type. + type Log = Log; } impl indices::Trait for Runtime { - /// The type for recording indexing into the account enumeration. If this ever overflows, there - /// will be problems! - type AccountIndex = u32; - /// Use the standard means of resolving an index hint from an id. - type ResolveHint = indices::SimpleResolveHint; - /// Determine whether an account is dead. - type IsDeadAccount = Balances; - /// The uniquitous event type. - type Event = Event; + /// The type for recording indexing into the account enumeration. If this ever overflows, there + /// will be problems! + type AccountIndex = u32; + /// Use the standard means of resolving an index hint from an id. + type ResolveHint = indices::SimpleResolveHint; + /// Determine whether an account is dead. + type IsDeadAccount = Balances; + /// The uniquitous event type. + type Event = Event; } impl timestamp::Trait for Runtime { - /// A timestamp: seconds since the unix epoch. - type Moment = u64; - type OnTimestampSet = Aura; + /// A timestamp: seconds since the unix epoch. + type Moment = u64; + type OnTimestampSet = Aura; } impl balances::Trait for Runtime { - /// The type for recording an account's balance. - type Balance = u128; - /// What to do if an account's free balance gets zeroed. - type OnFreeBalanceZero = (); - /// What to do if a new account is created. - type OnNewAccount = Indices; - /// The uniquitous event type. - type Event = Event; - - type TransactionPayment = (); - type DustRemoval = (); - type TransferPayment = (); + /// The type for recording an account's balance. + type Balance = u128; + /// What to do if an account's free balance gets zeroed. + type OnFreeBalanceZero = (); + /// What to do if a new account is created. + type OnNewAccount = Indices; + /// The uniquitous event type. + type Event = Event; + + type TransactionPayment = (); + type DustRemoval = (); + type TransferPayment = (); } impl sudo::Trait for Runtime { - /// The uniquitous event type. - type Event = Event; - type Proposal = Call; + /// The uniquitous event type. + type Event = Event; + type Proposal = Call; } /// Used for the module template in `./template.rs` impl template::Trait for Runtime { - type Event = Event; + type Event = Event; } construct_runtime!( @@ -221,7 +227,8 @@ pub type Block = generic::Block; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; /// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = generic::UncheckedMortalCompactExtrinsic; +pub type UncheckedExtrinsic = + generic::UncheckedMortalCompactExtrinsic; /// Extrinsic type that has already been checked. pub type CheckedExtrinsic = generic::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. @@ -229,69 +236,69 @@ pub type Executive = executive::Executive for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) { - Executive::initialize_block(header) - } - } - - impl runtime_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - Runtime::metadata().into() - } - } - - impl block_builder_api::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents(block: Block, data: InherentData) -> CheckInherentsResult { - data.check_extrinsics(&block) - } - - fn random_seed() -> ::Hash { - System::random_seed() - } - } - - impl runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction(tx: ::Extrinsic) -> TransactionValidity { - Executive::validate_transaction(tx) - } - } - - impl consensus_aura::AuraApi for Runtime { - fn slot_duration() -> u64 { - Aura::slot_duration() - } - } - - impl offchain_primitives::OffchainWorkerApi for Runtime { - fn offchain_worker(n: NumberFor) { - Executive::offchain_worker(n) - } - } - - impl consensus_authorities::AuthoritiesApi for Runtime { - fn authorities() -> Vec { - Consensus::authorities() - } - } + impl runtime_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl runtime_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + Runtime::metadata().into() + } + } + + impl block_builder_api::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents(block: Block, data: InherentData) -> CheckInherentsResult { + data.check_extrinsics(&block) + } + + fn random_seed() -> ::Hash { + System::random_seed() + } + } + + impl runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction(tx: ::Extrinsic) -> TransactionValidity { + Executive::validate_transaction(tx) + } + } + + impl consensus_aura::AuraApi for Runtime { + fn slot_duration() -> u64 { + Aura::slot_duration() + } + } + + impl offchain_primitives::OffchainWorkerApi for Runtime { + fn offchain_worker(n: NumberFor) { + Executive::offchain_worker(n) + } + } + + impl consensus_authorities::AuthoritiesApi for Runtime { + fn authorities() -> Vec { + Consensus::authorities() + } + } } diff --git a/node-template/runtime/src/template.rs b/node-template/runtime/src/template.rs index fd122433da..860e2df02f 100644 --- a/node-template/runtime/src/template.rs +++ b/node-template/runtime/src/template.rs @@ -4,120 +4,125 @@ /// If you change the name of this file, make sure to update its references in runtime/src/lib.rs /// If you remove this file, you can remove those references - /// For more guidance on Substrate modules, see the example module /// https://github.com/paritytech/substrate/blob/master/srml/example/src/lib.rs - -use support::{decl_module, decl_storage, decl_event, StorageValue, dispatch::Result}; +use support::{decl_event, decl_module, decl_storage, dispatch::Result, StorageValue}; use system::ensure_signed; /// The module's configuration trait. pub trait Trait: system::Trait { - // TODO: Add other types and constants required configure this module. + // TODO: Add other types and constants required configure this module. - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The overarching event type. + type Event: From> + Into<::Event>; } /// This module's storage items. decl_storage! { - trait Store for Module as TemplateModule { - // Just a dummy storage item. - // Here we are declaring a StorageValue, `Something` as a Option - // `get(something)` is the default getter which returns either the stored `u32` or `None` if nothing stored - Something get(something): Option; - } + trait Store for Module as TemplateModule { + // Just a dummy storage item. + // Here we are declaring a StorageValue, `Something` as a Option + // `get(something)` is the default getter which returns either the stored `u32` or `None` if nothing stored + Something get(something): Option; + } } decl_module! { - /// The module declaration. - pub struct Module for enum Call where origin: T::Origin { - // Initializing events - // this is needed only if you are using events in your module - fn deposit_event() = default; - - // Just a dummy entry point. - // function that can be called by the external world as an extrinsics call - // takes a parameter of the type `AccountId`, stores it and emits an event - pub fn do_something(origin, something: u32) -> Result { - // TODO: You only need this if you want to check it was signed. - let who = ensure_signed(origin)?; - - // TODO: Code to execute when something calls this. - // For example: the following line stores the passed in u32 in the storage - >::put(something); - - // here we are raising the Something event - Self::deposit_event(RawEvent::SomethingStored(something, who)); - Ok(()) - } - } + /// The module declaration. + pub struct Module for enum Call where origin: T::Origin { + // Initializing events + // this is needed only if you are using events in your module + fn deposit_event() = default; + + // Just a dummy entry point. + // function that can be called by the external world as an extrinsics call + // takes a parameter of the type `AccountId`, stores it and emits an event + pub fn do_something(origin, something: u32) -> Result { + // TODO: You only need this if you want to check it was signed. + let who = ensure_signed(origin)?; + + // TODO: Code to execute when something calls this. + // For example: the following line stores the passed in u32 in the storage + >::put(something); + + // here we are raising the Something event + Self::deposit_event(RawEvent::SomethingStored(something, who)); + Ok(()) + } + } } decl_event!( - pub enum Event where AccountId = ::AccountId { - // Just a dummy event. - // Event `Something` is declared with a parameter of the type `u32` and `AccountId` - // To emit this event, we call the deposit funtion, from our runtime funtions - SomethingStored(u32, AccountId), - } + pub enum Event + where + AccountId = ::AccountId, + { + // Just a dummy event. + // Event `Something` is declared with a parameter of the type `u32` and `AccountId` + // To emit this event, we call the deposit funtion, from our runtime funtions + SomethingStored(u32, AccountId), + } ); /// tests for this module #[cfg(test)] mod tests { - use super::*; - - use runtime_io::with_externalities; - use primitives::{H256, Blake2Hasher}; - use support::{impl_outer_origin, assert_ok}; - use runtime_primitives::{ - BuildStorage, - traits::{BlakeTwo256, IdentityLookup}, - testing::{Digest, DigestItem, Header} - }; - - impl_outer_origin! { - pub enum Origin for Test {} - } - - // For testing the module, we construct most of a mock runtime. This means - // first constructing a configuration type (`Test`) which `impl`s each of the - // configuration traits of modules we want to use. - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - impl system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type Digest = Digest; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type Log = DigestItem; - } - impl Trait for Test { - type Event = (); - } - type TemplateModule = Module; - - // This function basically just builds a genesis storage key/value store according to - // our desired mockup. - fn new_test_ext() -> runtime_io::TestExternalities { - system::GenesisConfig::::default().build_storage().unwrap().0.into() - } - - #[test] - fn it_works_for_default_value() { - with_externalities(&mut new_test_ext(), || { - // Just a dummy test for the dummy funtion `do_something` - // calling the `do_something` function with a value 42 - assert_ok!(TemplateModule::do_something(Origin::signed(1), 42)); - // asserting that the stored value is equal to what we stored - assert_eq!(TemplateModule::something(), Some(42)); - }); - } + use super::*; + + use primitives::{Blake2Hasher, H256}; + use runtime_io::with_externalities; + use runtime_primitives::{ + testing::{Digest, DigestItem, Header}, + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, + }; + use support::{assert_ok, impl_outer_origin}; + + impl_outer_origin! { + pub enum Origin for Test {} + } + + // For testing the module, we construct most of a mock runtime. This means + // first constructing a configuration type (`Test`) which `impl`s each of the + // configuration traits of modules we want to use. + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + impl system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type Digest = Digest; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type Log = DigestItem; + } + impl Trait for Test { + type Event = (); + } + type TemplateModule = Module; + + // This function basically just builds a genesis storage key/value store according to + // our desired mockup. + fn new_test_ext() -> runtime_io::TestExternalities { + system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0 + .into() + } + + #[test] + fn it_works_for_default_value() { + with_externalities(&mut new_test_ext(), || { + // Just a dummy test for the dummy funtion `do_something` + // calling the `do_something` function with a value 42 + assert_ok!(TemplateModule::do_something(Origin::signed(1), 42)); + // asserting that the stored value is equal to what we stored + assert_eq!(TemplateModule::something(), Some(42)); + }); + } } diff --git a/node-template/src/chain_spec.rs b/node-template/src/chain_spec.rs index 3cb8d21d5d..b95c4a60a6 100644 --- a/node-template/src/chain_spec.rs +++ b/node-template/src/chain_spec.rs @@ -1,8 +1,8 @@ -use primitives::{ed25519, sr25519, Pair}; use node_template_runtime::{ - AccountId, GenesisConfig, ConsensusConfig, TimestampConfig, BalancesConfig, - SudoConfig, IndicesConfig, + AccountId, BalancesConfig, ConsensusConfig, GenesisConfig, IndicesConfig, SudoConfig, + TimestampConfig, }; +use primitives::{ed25519, sr25519, Pair}; use substrate_service; use ed25519::Public as AuthorityId; @@ -18,80 +18,85 @@ pub type ChainSpec = substrate_service::ChainSpec; /// from a string (`--chain=...`) into a `ChainSpec`. #[derive(Clone, Debug)] pub enum Alternative { - /// Whatever the current runtime is, with just Alice as an auth. - Development, - /// Whatever the current runtime is, with simple Alice/Bob auths. - LocalTestnet, + /// Whatever the current runtime is, with just Alice as an auth. + Development, + /// Whatever the current runtime is, with simple Alice/Bob auths. + LocalTestnet, } fn authority_key(s: &str) -> AuthorityId { - ed25519::Pair::from_string(&format!("//{}", s), None) - .expect("static values are valid; qed") - .public() + ed25519::Pair::from_string(&format!("//{}", s), None) + .expect("static values are valid; qed") + .public() } fn account_key(s: &str) -> AccountId { - sr25519::Pair::from_string(&format!("//{}", s), None) - .expect("static values are valid; qed") - .public() + sr25519::Pair::from_string(&format!("//{}", s), None) + .expect("static values are valid; qed") + .public() } impl Alternative { - /// Get an actual chain config from one of the alternatives. - pub(crate) fn load(self) -> Result { - Ok(match self { - Alternative::Development => ChainSpec::from_genesis( - "Development", - "dev", - || testnet_genesis(vec![ - authority_key("Alice") - ], vec![ - account_key("Alice") - ], - account_key("Alice") - ), - vec![], - None, - None, - None, - None - ), - Alternative::LocalTestnet => ChainSpec::from_genesis( - "Local Testnet", - "local_testnet", - || testnet_genesis(vec![ - authority_key("Alice"), - authority_key("Bob"), - ], vec![ - account_key("Alice"), - account_key("Bob"), - account_key("Charlie"), - account_key("Dave"), - account_key("Eve"), - account_key("Ferdie"), - ], - account_key("Alice"), - ), - vec![], - None, - None, - None, - None - ), - }) - } + /// Get an actual chain config from one of the alternatives. + pub(crate) fn load(self) -> Result { + Ok(match self { + Alternative::Development => ChainSpec::from_genesis( + "Development", + "dev", + || { + testnet_genesis( + vec![authority_key("Alice")], + vec![account_key("Alice")], + account_key("Alice"), + ) + }, + vec![], + None, + None, + None, + None, + ), + Alternative::LocalTestnet => ChainSpec::from_genesis( + "Local Testnet", + "local_testnet", + || { + testnet_genesis( + vec![authority_key("Alice"), authority_key("Bob")], + vec![ + account_key("Alice"), + account_key("Bob"), + account_key("Charlie"), + account_key("Dave"), + account_key("Eve"), + account_key("Ferdie"), + ], + account_key("Alice"), + ) + }, + vec![], + None, + None, + None, + None, + ), + }) + } - pub(crate) fn from(s: &str) -> Option { - match s { - "dev" => Some(Alternative::Development), - "" | "local" => Some(Alternative::LocalTestnet), - _ => None, - } - } + pub(crate) fn from(s: &str) -> Option { + match s { + "dev" => Some(Alternative::Development), + "" | "local" => Some(Alternative::LocalTestnet), + _ => None, + } + } } -fn testnet_genesis(initial_authorities: Vec, endowed_accounts: Vec, root_key: AccountId) -> GenesisConfig { - GenesisConfig { +fn testnet_genesis( + initial_authorities: Vec, + endowed_accounts: Vec, + root_key: AccountId, +) -> GenesisConfig { + GenesisConfig { consensus: Some(ConsensusConfig { code: include_bytes!("../runtime/wasm/target/wasm32-unknown-unknown/release/node_template_runtime_wasm.compact.wasm").to_vec(), authorities: initial_authorities.clone(), diff --git a/node-template/src/cli.rs b/node-template/src/cli.rs index 258d2194a6..8f4a4d52f0 100644 --- a/node-template/src/cli.rs +++ b/node-template/src/cli.rs @@ -1,94 +1,104 @@ +use crate::chain_spec; use crate::service; -use futures::{future, Future, sync::oneshot}; +use futures::{future, sync::oneshot, Future}; +use log::info; use std::cell::RefCell; -use tokio::runtime::Runtime; -pub use substrate_cli::{VersionInfo, IntoExit, error}; -use substrate_cli::{informant, parse_and_execute, NoCustom}; -use substrate_service::{ServiceFactory, Roles as ServiceRoles}; -use crate::chain_spec; use std::ops::Deref; -use log::info; +pub use substrate_cli::{error, IntoExit, VersionInfo}; +use substrate_cli::{informant, parse_and_execute, NoCustom}; +use substrate_service::{Roles as ServiceRoles, ServiceFactory}; +use tokio::runtime::Runtime; /// Parse command line arguments into service configuration. -pub fn run(args: I, exit: E, version: VersionInfo) -> error::Result<()> where - I: IntoIterator, - T: Into + Clone, - E: IntoExit, +pub fn run(args: I, exit: E, version: VersionInfo) -> error::Result<()> +where + I: IntoIterator, + T: Into + Clone, + E: IntoExit, { - parse_and_execute::( - load_spec, &version, "substrate-node", args, exit, - |exit, _custom_args, config| { - info!("{}", version.name); - info!(" version {}", config.full_version()); - info!(" by {}, 2017, 2018", version.author); - info!("Chain specification: {}", config.chain_spec.name()); - info!("Node name: {}", config.name); - info!("Roles: {:?}", config.roles); - let runtime = Runtime::new().map_err(|e| format!("{:?}", e))?; - let executor = runtime.executor(); - match config.roles { - ServiceRoles::LIGHT => run_until_exit( - runtime, - service::Factory::new_light(config, executor).map_err(|e| format!("{:?}", e))?, - exit - ), - _ => run_until_exit( - runtime, - service::Factory::new_full(config, executor).map_err(|e| format!("{:?}", e))?, - exit - ), - }.map_err(|e| format!("{:?}", e)) - } - ).map_err(Into::into).map(|_| ()) + parse_and_execute::( + load_spec, + &version, + "substrate-node", + args, + exit, + |exit, _custom_args, config| { + info!("{}", version.name); + info!(" version {}", config.full_version()); + info!(" by {}, 2017, 2018", version.author); + info!("Chain specification: {}", config.chain_spec.name()); + info!("Node name: {}", config.name); + info!("Roles: {:?}", config.roles); + let runtime = Runtime::new().map_err(|e| format!("{:?}", e))?; + let executor = runtime.executor(); + match config.roles { + ServiceRoles::LIGHT => run_until_exit( + runtime, + service::Factory::new_light(config, executor) + .map_err(|e| format!("{:?}", e))?, + exit, + ), + _ => run_until_exit( + runtime, + service::Factory::new_full(config, executor).map_err(|e| format!("{:?}", e))?, + exit, + ), + } + .map_err(|e| format!("{:?}", e)) + }, + ) + .map_err(Into::into) + .map(|_| ()) } fn load_spec(id: &str) -> Result, String> { - Ok(match chain_spec::Alternative::from(id) { - Some(spec) => Some(spec.load()?), - None => None, - }) + Ok(match chain_spec::Alternative::from(id) { + Some(spec) => Some(spec.load()?), + None => None, + }) } -fn run_until_exit( - mut runtime: Runtime, - service: T, - e: E, -) -> error::Result<()> - where - T: Deref>, - C: substrate_service::Components, - E: IntoExit, +fn run_until_exit(mut runtime: Runtime, service: T, e: E) -> error::Result<()> +where + T: Deref>, + C: substrate_service::Components, + E: IntoExit, { - let (exit_send, exit) = exit_future::signal(); + let (exit_send, exit) = exit_future::signal(); - let executor = runtime.executor(); - informant::start(&service, exit.clone(), executor.clone()); + let executor = runtime.executor(); + informant::start(&service, exit.clone(), executor.clone()); - let _ = runtime.block_on(e.into_exit()); - exit_send.fire(); + let _ = runtime.block_on(e.into_exit()); + exit_send.fire(); - // we eagerly drop the service so that the internal exit future is fired, - // but we need to keep holding a reference to the global telemetry guard - let _telemetry = service.telemetry(); - drop(service); - Ok(()) + // we eagerly drop the service so that the internal exit future is fired, + // but we need to keep holding a reference to the global telemetry guard + let _telemetry = service.telemetry(); + drop(service); + Ok(()) } // handles ctrl-c pub struct Exit; impl IntoExit for Exit { - type Exit = future::MapErr, fn(oneshot::Canceled) -> ()>; - fn into_exit(self) -> Self::Exit { - // can't use signal directly here because CtrlC takes only `Fn`. - let (exit_send, exit) = oneshot::channel(); + type Exit = future::MapErr, fn(oneshot::Canceled) -> ()>; + fn into_exit(self) -> Self::Exit { + // can't use signal directly here because CtrlC takes only `Fn`. + let (exit_send, exit) = oneshot::channel(); - let exit_send_cell = RefCell::new(Some(exit_send)); - ctrlc::set_handler(move || { - if let Some(exit_send) = exit_send_cell.try_borrow_mut().expect("signal handler not reentrant; qed").take() { - exit_send.send(()).expect("Error sending exit notification"); - } - }).expect("Error setting Ctrl-C handler"); + let exit_send_cell = RefCell::new(Some(exit_send)); + ctrlc::set_handler(move || { + if let Some(exit_send) = exit_send_cell + .try_borrow_mut() + .expect("signal handler not reentrant; qed") + .take() + { + exit_send.send(()).expect("Error sending exit notification"); + } + }) + .expect("Error setting Ctrl-C handler"); - exit.map_err(drop) - } + exit.map_err(drop) + } } diff --git a/node-template/src/main.rs b/node-template/src/main.rs index 7d8c3076c6..30da735a1e 100644 --- a/node-template/src/main.rs +++ b/node-template/src/main.rs @@ -4,22 +4,22 @@ #![warn(unused_extern_crates)] mod chain_spec; -mod service; mod cli; +mod service; -pub use substrate_cli::{VersionInfo, IntoExit, error}; +pub use substrate_cli::{error, IntoExit, VersionInfo}; fn run() -> cli::error::Result<()> { - let version = VersionInfo { - name: "Substrate Node", - commit: env!("VERGEN_SHA_SHORT"), - version: env!("CARGO_PKG_VERSION"), - executable_name: "template-node", - author: "Anonymous", - description: "Template Node", - support_url: "support.anonymous.an", - }; - cli::run(::std::env::args(), cli::Exit, version) + let version = VersionInfo { + name: "Substrate Node", + commit: env!("VERGEN_SHA_SHORT"), + version: env!("CARGO_PKG_VERSION"), + executable_name: "template-node", + author: "Anonymous", + description: "Template Node", + support_url: "support.anonymous.an", + }; + cli::run(::std::env::args(), cli::Exit, version) } error_chain::quick_main!(run); diff --git a/node-template/src/service.rs b/node-template/src/service.rs index 2b7554c79d..f6ba370541 100644 --- a/node-template/src/service.rs +++ b/node-template/src/service.rs @@ -2,23 +2,22 @@ #![warn(unused_extern_crates)] -use std::sync::Arc; -use log::info; -use transaction_pool::{self, txpool::{Pool as TransactionPool}}; -use node_template_runtime::{self, GenesisConfig, opaque::Block, RuntimeApi}; -use substrate_service::{ - FactoryFullConfiguration, LightComponents, FullComponents, FullBackend, - FullClient, LightClient, LightBackend, FullExecutor, LightExecutor, - TaskExecutor, -}; use basic_authorship::ProposerFactory; -use consensus::{import_queue, start_aura, AuraImportQueue, SlotDuration, NothingExtra}; -use substrate_client as client; -use primitives::{ed25519::Pair, Pair as PairT}; +use consensus::{import_queue, start_aura, AuraImportQueue, NothingExtra, SlotDuration}; use inherents::InherentDataProviders; +use log::info; use network::construct_simple_protocol; +use node_template_runtime::{self, opaque::Block, GenesisConfig, RuntimeApi}; +use primitives::{ed25519::Pair, Pair as PairT}; +use std::sync::Arc; +use substrate_client as client; use substrate_executor::native_executor_instance; use substrate_service::construct_service_factory; +use substrate_service::{ + FactoryFullConfiguration, FullBackend, FullClient, FullComponents, FullExecutor, LightBackend, + LightClient, LightComponents, LightExecutor, TaskExecutor, +}; +use transaction_pool::{self, txpool::Pool as TransactionPool}; pub use substrate_executor::NativeExecutor; // Our native executor instance. @@ -31,87 +30,87 @@ native_executor_instance!( #[derive(Default)] pub struct NodeConfig { - inherent_data_providers: InherentDataProviders, + inherent_data_providers: InherentDataProviders, } construct_simple_protocol! { - /// Demo protocol attachment for substrate. - pub struct NodeProtocol where Block = Block { } + /// Demo protocol attachment for substrate. + pub struct NodeProtocol where Block = Block { } } construct_service_factory! { - struct Factory { - Block = Block, - RuntimeApi = RuntimeApi, - NetworkProtocol = NodeProtocol { |config| Ok(NodeProtocol::new()) }, - RuntimeDispatch = Executor, - FullTransactionPoolApi = transaction_pool::ChainApi, FullExecutor, Block, RuntimeApi>, Block> - { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) }, - LightTransactionPoolApi = transaction_pool::ChainApi, LightExecutor, Block, RuntimeApi>, Block> - { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) }, - Genesis = GenesisConfig, - Configuration = NodeConfig, - FullService = FullComponents - { |config: FactoryFullConfiguration, executor: TaskExecutor| - FullComponents::::new(config, executor) - }, - AuthoritySetup = { - |service: Self::FullService, executor: TaskExecutor, key: Option>| { - if let Some(key) = key { - info!("Using authority key {}", key.public()); - let proposer = Arc::new(ProposerFactory { - client: service.client(), - transaction_pool: service.transaction_pool(), - inherents_pool: service.inherents_pool(), - }); - let client = service.client(); - executor.spawn(start_aura( - SlotDuration::get_or_compute(&*client)?, - key.clone(), - client.clone(), - client, - proposer, - service.network(), - service.on_exit(), - service.config.custom.inherent_data_providers.clone(), - service.config.force_authoring, - )?); - } + struct Factory { + Block = Block, + RuntimeApi = RuntimeApi, + NetworkProtocol = NodeProtocol { |config| Ok(NodeProtocol::new()) }, + RuntimeDispatch = Executor, + FullTransactionPoolApi = transaction_pool::ChainApi, FullExecutor, Block, RuntimeApi>, Block> + { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) }, + LightTransactionPoolApi = transaction_pool::ChainApi, LightExecutor, Block, RuntimeApi>, Block> + { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) }, + Genesis = GenesisConfig, + Configuration = NodeConfig, + FullService = FullComponents + { |config: FactoryFullConfiguration, executor: TaskExecutor| + FullComponents::::new(config, executor) + }, + AuthoritySetup = { + |service: Self::FullService, executor: TaskExecutor, key: Option>| { + if let Some(key) = key { + info!("Using authority key {}", key.public()); + let proposer = Arc::new(ProposerFactory { + client: service.client(), + transaction_pool: service.transaction_pool(), + inherents_pool: service.inherents_pool(), + }); + let client = service.client(); + executor.spawn(start_aura( + SlotDuration::get_or_compute(&*client)?, + key.clone(), + client.clone(), + client, + proposer, + service.network(), + service.on_exit(), + service.config.custom.inherent_data_providers.clone(), + service.config.force_authoring, + )?); + } - Ok(service) - } - }, - LightService = LightComponents - { |config, executor| >::new(config, executor) }, - FullImportQueue = AuraImportQueue< - Self::Block, - > - { |config: &mut FactoryFullConfiguration , client: Arc>| { - import_queue::<_, _, _, Pair>( - SlotDuration::get_or_compute(&*client)?, - client.clone(), - None, - client, - NothingExtra, - config.custom.inherent_data_providers.clone(), - true, - ).map_err(Into::into) - } - }, - LightImportQueue = AuraImportQueue< - Self::Block, - > - { |config: &mut FactoryFullConfiguration, client: Arc>| { - import_queue::<_, _, _, Pair>( - SlotDuration::get_or_compute(&*client)?, - client.clone(), - None, - client, - NothingExtra, - config.custom.inherent_data_providers.clone(), - true, - ).map_err(Into::into) - } - }, - } + Ok(service) + } + }, + LightService = LightComponents + { |config, executor| >::new(config, executor) }, + FullImportQueue = AuraImportQueue< + Self::Block, + > + { |config: &mut FactoryFullConfiguration , client: Arc>| { + import_queue::<_, _, _, Pair>( + SlotDuration::get_or_compute(&*client)?, + client.clone(), + None, + client, + NothingExtra, + config.custom.inherent_data_providers.clone(), + true, + ).map_err(Into::into) + } + }, + LightImportQueue = AuraImportQueue< + Self::Block, + > + { |config: &mut FactoryFullConfiguration, client: Arc>| { + import_queue::<_, _, _, Pair>( + SlotDuration::get_or_compute(&*client)?, + client.clone(), + None, + client, + NothingExtra, + config.custom.inherent_data_providers.clone(), + true, + ).map_err(Into::into) + } + }, + } } diff --git a/node/cli/build.rs b/node/cli/build.rs index e7a7b271f1..8dba2383a9 100644 --- a/node/cli/build.rs +++ b/node/cli/build.rs @@ -14,37 +14,46 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use cli::{NoCustom, CoreParams}; +use cli::{CoreParams, NoCustom}; -use std::{fs, env, path::Path}; +use std::{env, fs, path::Path}; -use structopt::{StructOpt, clap::Shell}; +use structopt::{clap::Shell, StructOpt}; fn main() { - build_shell_completion(); + build_shell_completion(); } /// Build shell completion scripts for all known shells /// Full list in https://github.com/kbknapp/clap-rs/blob/e9d0562a1dc5dfe731ed7c767e6cee0af08f0cf9/src/app/parser.rs#L123 fn build_shell_completion() { - for shell in &[Shell::Bash, Shell::Fish, Shell::Zsh, Shell::Elvish, Shell::PowerShell] { - build_completion(shell); - } + for shell in &[ + Shell::Bash, + Shell::Fish, + Shell::Zsh, + Shell::Elvish, + Shell::PowerShell, + ] { + build_completion(shell); + } } /// Build the shell auto-completion for a given Shell fn build_completion(shell: &Shell) { - let outdir = match env::var_os("OUT_DIR") { - None => return, - Some(dir) => dir, - }; - let path = Path::new(&outdir) - .parent().unwrap() - .parent().unwrap() - .parent().unwrap() - .join("completion-scripts"); - - fs::create_dir(&path).ok(); - - CoreParams::::clap().gen_completions("substrate-node", *shell, &path); + let outdir = match env::var_os("OUT_DIR") { + None => return, + Some(dir) => dir, + }; + let path = Path::new(&outdir) + .parent() + .unwrap() + .parent() + .unwrap() + .parent() + .unwrap() + .join("completion-scripts"); + + fs::create_dir(&path).ok(); + + CoreParams::::clap().gen_completions("substrate-node", *shell, &path); } diff --git a/node/cli/src/chain_spec.rs b/node/cli/src/chain_spec.rs index a45b25249f..888c5bbd2f 100644 --- a/node/cli/src/chain_spec.rs +++ b/node/cli/src/chain_spec.rs @@ -16,14 +16,16 @@ //! Substrate chain configurations. -use primitives::{ed25519::Public as AuthorityId, ed25519, sr25519, Pair, crypto::UncheckedInto}; +use hex_literal::{hex, hex_impl}; use node_primitives::AccountId; -use node_runtime::{ConsensusConfig, CouncilSeatsConfig, CouncilVotingConfig, DemocracyConfig, - SessionConfig, StakingConfig, StakerStatus, TimestampConfig, BalancesConfig, TreasuryConfig, - SudoConfig, ContractConfig, GrandpaConfig, IndicesConfig, Permill, Perbill}; pub use node_runtime::GenesisConfig; +use node_runtime::{ + BalancesConfig, ConsensusConfig, ContractConfig, CouncilSeatsConfig, CouncilVotingConfig, + DemocracyConfig, GrandpaConfig, IndicesConfig, Perbill, Permill, SessionConfig, StakerStatus, + StakingConfig, SudoConfig, TimestampConfig, TreasuryConfig, +}; +use primitives::{crypto::UncheckedInto, ed25519, ed25519::Public as AuthorityId, sr25519, Pair}; use substrate_service; -use hex_literal::{hex, hex_impl}; use substrate_telemetry::TelemetryEndpoints; const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; @@ -33,49 +35,66 @@ pub type ChainSpec = substrate_service::ChainSpec; /// Dried Danta testnet generator pub fn dried_danta_config() -> Result { - ChainSpec::from_embedded(include_bytes!("../res/dried-danta.json")) + ChainSpec::from_embedded(include_bytes!("../res/dried-danta.json")) } fn staging_testnet_config_genesis() -> GenesisConfig { - // stash, controller, session-key - // generated with secret: - // for i in 1 2 3 4 ; do for j in stash controller; do subkey -p danta-$i-$j restore $secret; done; done - // and - // for i in 1 2 3 4 ; do for j in session; do subkey --ed25519 -p danta-$i-$j restore $secret; done; done - let initial_authorities: Vec<(AccountId, AccountId, AuthorityId)> = vec![( - hex!["d807f8bd6b4b02b3db716dd5372960b094ed0e62b5704a07bc990130a642992b"].unchecked_into(), // 5GwxZv7LxSUQn89TLUaLi3oEWhFcabqW3nHcEg2J88gZNhrb - hex!["1a934af462454e512e22b5d9455c0c3c2df479b1c61406b3d990f6bc2eb25e09"].unchecked_into(), // 5CfYrg5cW8UebBdfJpJbKFhZLyk7yHWXUgdxZnSGb2dWKgpt - hex!["831fcce3a9565baf093b52568a8cb9875cb54974d80da8fc4f0cc767128a23e9"].unchecked_into(), // 5F2daQPHK7yv4Yuwyz3cggvvn1R5u1ofGMQ5LK5XvnfebMcX - ),( - hex!["12652f26e427c56268095bb0ec5824471e37722b073a9fa5de61c61c1de94656"].unchecked_into(), // 5CUpn2JmpsWkHQjZgWjN3rqPEUnjjUQZYcMk14nbUgR2Gpab - hex!["5279e73e22971d729276ebad4eb6804d1b9c0c35bd32e8aba4513c674760a461"].unchecked_into(), // 5Dvqzke7Mdp3fP6Ysut7UXPSepPr3Qguys6LNkZGPSwXwAkR - hex!["dbe61640d854bb7bf83cbfaf638a8a4c76c49a919ec3bbdd86799061fc1903e4"].unchecked_into(), // 5H32hCtKf6nXSckviVhUvWb7N14wDCRunRkCM29mxEXwjcUZ - ),( - hex!["a81d738fdeeaed440cfce5635e0820d0d23e89207cf66a62b8c0d2a968e37d32"].unchecked_into(), // 5Fs8ehAjDEnenDwULCPnEr3HVXgepAVfyk9ABW84NfxCYtWD - hex!["443a2c779a5f5dada8ee6921efec9673f67e5ce1bd6012899ff6c1adc437696c"].unchecked_into(), // 5DcAPqR269myKXhZmwbU1x2xLbuTojr85jHNRuDhrFdZ3vwi - hex!["5bc01f56225e8602372fb343dba65a73e20c55bdbb3b8343a8f34df298a616fb"].unchecked_into(), // 5E91HbY2xo2qDJzi3KY8nRXjDNAQE9WtmMaji6YRwT8DAuK1 - ),( - hex!["e269e835e0bc07c497d55bc17c7bb29c85c5615f9e61582ffdeca7e5f5c66578"].unchecked_into(), // 5HBa95U5HDFCV1N5Xyrjti65F71tHRQcPbZBmkxRJ39SpqzM - hex!["3e9829e6fd4fc7501b504fc16f12177c6c7f38aeb3b8344efb9b15ee85118b2c"].unchecked_into(), // 5DUn2afs2QevZ6PrGu8snrt76157oacH6JXUD8JNM18VKMwK - hex!["0fd673ee5e95ed124bcd71463ff924c810573dad91527ab9d2b5af36f66ff84b"].unchecked_into(), // 5CRUHGLA1JYe2v4p479VCHybqjB9uBXjGkJ2npdduVdrTuUM - )]; - // generated with secret: subkey -p danta-root restore $secret - let endowed_accounts: Vec = vec![ - hex!["343df6f04ffae0840f214f6cb0da00b612c7e9347f980e7afafc520582f79136"].unchecked_into(), // 5DFCkiP9vky31C1ZP3LpuQYinLAFwQqq6vda7NXa8ALCpq5D - ]; - const MILLICENTS: u128 = 1_000_000_000; - const CENTS: u128 = 1_000 * MILLICENTS; // assume this is worth about a cent. - const DOLLARS: u128 = 100 * CENTS; + // stash, controller, session-key + // generated with secret: + // for i in 1 2 3 4 ; do for j in stash controller; do subkey -p danta-$i-$j restore $secret; done; done + // and + // for i in 1 2 3 4 ; do for j in session; do subkey --ed25519 -p danta-$i-$j restore $secret; done; done + let initial_authorities: Vec<(AccountId, AccountId, AuthorityId)> = vec![ + ( + hex!["d807f8bd6b4b02b3db716dd5372960b094ed0e62b5704a07bc990130a642992b"] + .unchecked_into(), // 5GwxZv7LxSUQn89TLUaLi3oEWhFcabqW3nHcEg2J88gZNhrb + hex!["1a934af462454e512e22b5d9455c0c3c2df479b1c61406b3d990f6bc2eb25e09"] + .unchecked_into(), // 5CfYrg5cW8UebBdfJpJbKFhZLyk7yHWXUgdxZnSGb2dWKgpt + hex!["831fcce3a9565baf093b52568a8cb9875cb54974d80da8fc4f0cc767128a23e9"] + .unchecked_into(), // 5F2daQPHK7yv4Yuwyz3cggvvn1R5u1ofGMQ5LK5XvnfebMcX + ), + ( + hex!["12652f26e427c56268095bb0ec5824471e37722b073a9fa5de61c61c1de94656"] + .unchecked_into(), // 5CUpn2JmpsWkHQjZgWjN3rqPEUnjjUQZYcMk14nbUgR2Gpab + hex!["5279e73e22971d729276ebad4eb6804d1b9c0c35bd32e8aba4513c674760a461"] + .unchecked_into(), // 5Dvqzke7Mdp3fP6Ysut7UXPSepPr3Qguys6LNkZGPSwXwAkR + hex!["dbe61640d854bb7bf83cbfaf638a8a4c76c49a919ec3bbdd86799061fc1903e4"] + .unchecked_into(), // 5H32hCtKf6nXSckviVhUvWb7N14wDCRunRkCM29mxEXwjcUZ + ), + ( + hex!["a81d738fdeeaed440cfce5635e0820d0d23e89207cf66a62b8c0d2a968e37d32"] + .unchecked_into(), // 5Fs8ehAjDEnenDwULCPnEr3HVXgepAVfyk9ABW84NfxCYtWD + hex!["443a2c779a5f5dada8ee6921efec9673f67e5ce1bd6012899ff6c1adc437696c"] + .unchecked_into(), // 5DcAPqR269myKXhZmwbU1x2xLbuTojr85jHNRuDhrFdZ3vwi + hex!["5bc01f56225e8602372fb343dba65a73e20c55bdbb3b8343a8f34df298a616fb"] + .unchecked_into(), // 5E91HbY2xo2qDJzi3KY8nRXjDNAQE9WtmMaji6YRwT8DAuK1 + ), + ( + hex!["e269e835e0bc07c497d55bc17c7bb29c85c5615f9e61582ffdeca7e5f5c66578"] + .unchecked_into(), // 5HBa95U5HDFCV1N5Xyrjti65F71tHRQcPbZBmkxRJ39SpqzM + hex!["3e9829e6fd4fc7501b504fc16f12177c6c7f38aeb3b8344efb9b15ee85118b2c"] + .unchecked_into(), // 5DUn2afs2QevZ6PrGu8snrt76157oacH6JXUD8JNM18VKMwK + hex!["0fd673ee5e95ed124bcd71463ff924c810573dad91527ab9d2b5af36f66ff84b"] + .unchecked_into(), // 5CRUHGLA1JYe2v4p479VCHybqjB9uBXjGkJ2npdduVdrTuUM + ), + ]; + // generated with secret: subkey -p danta-root restore $secret + let endowed_accounts: Vec = vec![ + hex!["343df6f04ffae0840f214f6cb0da00b612c7e9347f980e7afafc520582f79136"].unchecked_into(), // 5DFCkiP9vky31C1ZP3LpuQYinLAFwQqq6vda7NXa8ALCpq5D + ]; + const MILLICENTS: u128 = 1_000_000_000; + const CENTS: u128 = 1_000 * MILLICENTS; // assume this is worth about a cent. + const DOLLARS: u128 = 100 * CENTS; - const SECS_PER_BLOCK: u64 = 6; - const MINUTES: u64 = 60 / SECS_PER_BLOCK; - const HOURS: u64 = MINUTES * 60; - const DAYS: u64 = HOURS * 24; + const SECS_PER_BLOCK: u64 = 6; + const MINUTES: u64 = 60 / SECS_PER_BLOCK; + const HOURS: u64 = MINUTES * 60; + const DAYS: u64 = HOURS * 24; - const ENDOWMENT: u128 = 10_000_000 * DOLLARS; - const STASH: u128 = 100 * DOLLARS; + const ENDOWMENT: u128 = 10_000_000 * DOLLARS; + const STASH: u128 = 100 * DOLLARS; - GenesisConfig { + GenesisConfig { consensus: Some(ConsensusConfig { code: include_bytes!("../../runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm").to_vec(), // FIXME change once we have #1252 authorities: initial_authorities.iter().map(|x| x.2.clone()).collect(), @@ -173,69 +192,72 @@ fn staging_testnet_config_genesis() -> GenesisConfig { /// Staging testnet config. pub fn staging_testnet_config() -> ChainSpec { - let boot_nodes = vec![]; - ChainSpec::from_genesis( - "Staging Testnet", - "staging_testnet", - staging_testnet_config_genesis, - boot_nodes, - Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])), - None, - None, - None, - ) + let boot_nodes = vec![]; + ChainSpec::from_genesis( + "Staging Testnet", + "staging_testnet", + staging_testnet_config_genesis, + boot_nodes, + Some(TelemetryEndpoints::new(vec![( + STAGING_TELEMETRY_URL.to_string(), + 0, + )])), + None, + None, + None, + ) } /// Helper function to generate AccountId from seed pub fn get_account_id_from_seed(seed: &str) -> AccountId { - sr25519::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() + sr25519::Pair::from_string(&format!("//{}", seed), None) + .expect("static values are valid; qed") + .public() } /// Helper function to generate AuthorityId from seed pub fn get_session_key_from_seed(seed: &str) -> AuthorityId { - ed25519::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() + ed25519::Pair::from_string(&format!("//{}", seed), None) + .expect("static values are valid; qed") + .public() } /// Helper function to generate stash, controller and session key from seed pub fn get_authority_keys_from_seed(seed: &str) -> (AccountId, AccountId, AuthorityId) { - ( - get_account_id_from_seed(&format!("{}//stash", seed)), - get_account_id_from_seed(seed), - get_session_key_from_seed(seed) - ) + ( + get_account_id_from_seed(&format!("{}//stash", seed)), + get_account_id_from_seed(seed), + get_session_key_from_seed(seed), + ) } /// Helper function to create GenesisConfig for testing pub fn testnet_genesis( - initial_authorities: Vec<(AccountId, AccountId, AuthorityId)>, - root_key: AccountId, - endowed_accounts: Option>, + initial_authorities: Vec<(AccountId, AccountId, AuthorityId)>, + root_key: AccountId, + endowed_accounts: Option>, ) -> GenesisConfig { - let endowed_accounts: Vec = endowed_accounts.unwrap_or_else(|| { - vec![ - get_account_id_from_seed("Alice"), - get_account_id_from_seed("Bob"), - get_account_id_from_seed("Charlie"), - get_account_id_from_seed("Dave"), - get_account_id_from_seed("Eve"), - get_account_id_from_seed("Ferdie"), - get_account_id_from_seed("Alice//stash"), - get_account_id_from_seed("Bob//stash"), - get_account_id_from_seed("Charlie//stash"), - get_account_id_from_seed("Dave//stash"), - get_account_id_from_seed("Eve//stash"), - get_account_id_from_seed("Ferdie//stash"), - ] - }); + let endowed_accounts: Vec = endowed_accounts.unwrap_or_else(|| { + vec![ + get_account_id_from_seed("Alice"), + get_account_id_from_seed("Bob"), + get_account_id_from_seed("Charlie"), + get_account_id_from_seed("Dave"), + get_account_id_from_seed("Eve"), + get_account_id_from_seed("Ferdie"), + get_account_id_from_seed("Alice//stash"), + get_account_id_from_seed("Bob//stash"), + get_account_id_from_seed("Charlie//stash"), + get_account_id_from_seed("Dave//stash"), + get_account_id_from_seed("Eve//stash"), + get_account_id_from_seed("Ferdie//stash"), + ] + }); - const STASH: u128 = 1 << 20; - const ENDOWMENT: u128 = 1 << 20; + const STASH: u128 = 1 << 20; + const ENDOWMENT: u128 = 1 << 20; - GenesisConfig { + GenesisConfig { consensus: Some(ConsensusConfig { code: include_bytes!("../../runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm").to_vec(), authorities: initial_authorities.iter().map(|x| x.2.clone()).collect(), @@ -329,55 +351,80 @@ pub fn testnet_genesis( } fn development_config_genesis() -> GenesisConfig { - testnet_genesis( - vec![ - get_authority_keys_from_seed("Alice"), - ], - get_account_id_from_seed("Alice"), - None, - ) + testnet_genesis( + vec![get_authority_keys_from_seed("Alice")], + get_account_id_from_seed("Alice"), + None, + ) } /// Development config (single validator Alice) pub fn development_config() -> ChainSpec { - ChainSpec::from_genesis("Development", "dev", development_config_genesis, vec![], None, None, None, None) + ChainSpec::from_genesis( + "Development", + "dev", + development_config_genesis, + vec![], + None, + None, + None, + None, + ) } fn local_testnet_genesis() -> GenesisConfig { - testnet_genesis( - vec![ - get_authority_keys_from_seed("Alice"), - get_authority_keys_from_seed("Bob"), - ], - get_account_id_from_seed("Alice"), - None, - ) + testnet_genesis( + vec![ + get_authority_keys_from_seed("Alice"), + get_authority_keys_from_seed("Bob"), + ], + get_account_id_from_seed("Alice"), + None, + ) } /// Local testnet config (multivalidator Alice + Bob) pub fn local_testnet_config() -> ChainSpec { - ChainSpec::from_genesis("Local Testnet", "local_testnet", local_testnet_genesis, vec![], None, None, None, None) + ChainSpec::from_genesis( + "Local Testnet", + "local_testnet", + local_testnet_genesis, + vec![], + None, + None, + None, + None, + ) } #[cfg(test)] mod tests { - use super::*; - use service_test; - use crate::service::Factory; + use super::*; + use crate::service::Factory; + use service_test; - fn local_testnet_genesis_instant() -> GenesisConfig { - let mut genesis = local_testnet_genesis(); - genesis.timestamp = Some(TimestampConfig { minimum_period: 1 }); - genesis - } + fn local_testnet_genesis_instant() -> GenesisConfig { + let mut genesis = local_testnet_genesis(); + genesis.timestamp = Some(TimestampConfig { minimum_period: 1 }); + genesis + } - /// Local testnet config (multivalidator Alice + Bob) - pub fn integration_test_config() -> ChainSpec { - ChainSpec::from_genesis("Integration Test", "test", local_testnet_genesis_instant, vec![], None, None, None, None) - } + /// Local testnet config (multivalidator Alice + Bob) + pub fn integration_test_config() -> ChainSpec { + ChainSpec::from_genesis( + "Integration Test", + "test", + local_testnet_genesis_instant, + vec![], + None, + None, + None, + None, + ) + } - #[test] - fn test_connectivity() { - service_test::connectivity::(integration_test_config()); - } + #[test] + fn test_connectivity() { + service_test::connectivity::(integration_test_config()); + } } diff --git a/node/cli/src/lib.rs b/node/cli/src/lib.rs index 1b103b7bfc..4d8e3da302 100644 --- a/node/cli/src/lib.rs +++ b/node/cli/src/lib.rs @@ -23,114 +23,121 @@ pub use cli::error; pub mod chain_spec; mod service; +pub use cli::{IntoExit, NoCustom, VersionInfo}; +use log::info; +use std::ops::Deref; +use substrate_service::{Roles as ServiceRoles, ServiceFactory}; use tokio::prelude::Future; use tokio::runtime::{Builder as RuntimeBuilder, Runtime}; -pub use cli::{VersionInfo, IntoExit, NoCustom}; -use substrate_service::{ServiceFactory, Roles as ServiceRoles}; -use std::ops::Deref; -use log::info; /// The chain specification option. #[derive(Clone, Debug)] pub enum ChainSpec { - /// Whatever the current runtime is, with just Alice as an auth. - Development, - /// Whatever the current runtime is, with simple Alice/Bob auths. - LocalTestnet, - /// The Dried Danta testnet. - DriedDanta, - /// Whatever the current runtime is with the "global testnet" defaults. - StagingTestnet, + /// Whatever the current runtime is, with just Alice as an auth. + Development, + /// Whatever the current runtime is, with simple Alice/Bob auths. + LocalTestnet, + /// The Dried Danta testnet. + DriedDanta, + /// Whatever the current runtime is with the "global testnet" defaults. + StagingTestnet, } /// Get a chain config from a spec setting. impl ChainSpec { - pub(crate) fn load(self) -> Result { - Ok(match self { - ChainSpec::DriedDanta => chain_spec::dried_danta_config()?, - ChainSpec::Development => chain_spec::development_config(), - ChainSpec::LocalTestnet => chain_spec::local_testnet_config(), - ChainSpec::StagingTestnet => chain_spec::staging_testnet_config(), - }) - } - - pub(crate) fn from(s: &str) -> Option { - match s { - "dev" => Some(ChainSpec::Development), - "local" => Some(ChainSpec::LocalTestnet), - "" | "danta" | "dried-danta" => Some(ChainSpec::DriedDanta), - "staging" => Some(ChainSpec::StagingTestnet), - _ => None, - } - } + pub(crate) fn load(self) -> Result { + Ok(match self { + ChainSpec::DriedDanta => chain_spec::dried_danta_config()?, + ChainSpec::Development => chain_spec::development_config(), + ChainSpec::LocalTestnet => chain_spec::local_testnet_config(), + ChainSpec::StagingTestnet => chain_spec::staging_testnet_config(), + }) + } + + pub(crate) fn from(s: &str) -> Option { + match s { + "dev" => Some(ChainSpec::Development), + "local" => Some(ChainSpec::LocalTestnet), + "" | "danta" | "dried-danta" => Some(ChainSpec::DriedDanta), + "staging" => Some(ChainSpec::StagingTestnet), + _ => None, + } + } } fn load_spec(id: &str) -> Result, String> { - Ok(match ChainSpec::from(id) { - Some(spec) => Some(spec.load()?), - None => None, - }) + Ok(match ChainSpec::from(id) { + Some(spec) => Some(spec.load()?), + None => None, + }) } /// Parse command line arguments into service configuration. -pub fn run(args: I, exit: E, version: cli::VersionInfo) -> error::Result<()> where - I: IntoIterator, - T: Into + Clone, - E: IntoExit, +pub fn run(args: I, exit: E, version: cli::VersionInfo) -> error::Result<()> +where + I: IntoIterator, + T: Into + Clone, + E: IntoExit, { - cli::parse_and_execute::( - load_spec, &version, "substrate-node", args, exit, - |exit, _custom_args, config| { - info!("{}", version.name); - info!(" version {}", config.full_version()); - info!(" by Parity Technologies, 2017-2019"); - info!("Chain specification: {}", config.chain_spec.name()); - info!("Node name: {}", config.name); - info!("Roles: {:?}", config.roles); - let runtime = RuntimeBuilder::new().name_prefix("main-tokio-").build() - .map_err(|e| format!("{:?}", e))?; - let executor = runtime.executor(); - match config.roles { - ServiceRoles::LIGHT => run_until_exit( - runtime, - service::Factory::new_light(config, executor).map_err(|e| format!("{:?}", e))?, - exit - ), - _ => run_until_exit( - runtime, - service::Factory::new_full(config, executor).map_err(|e| format!("{:?}", e))?, - exit - ), - }.map_err(|e| format!("{:?}", e)) - } - ).map_err(Into::into).map(|_| ()) + cli::parse_and_execute::( + load_spec, + &version, + "substrate-node", + args, + exit, + |exit, _custom_args, config| { + info!("{}", version.name); + info!(" version {}", config.full_version()); + info!(" by Parity Technologies, 2017-2019"); + info!("Chain specification: {}", config.chain_spec.name()); + info!("Node name: {}", config.name); + info!("Roles: {:?}", config.roles); + let runtime = RuntimeBuilder::new() + .name_prefix("main-tokio-") + .build() + .map_err(|e| format!("{:?}", e))?; + let executor = runtime.executor(); + match config.roles { + ServiceRoles::LIGHT => run_until_exit( + runtime, + service::Factory::new_light(config, executor) + .map_err(|e| format!("{:?}", e))?, + exit, + ), + _ => run_until_exit( + runtime, + service::Factory::new_full(config, executor).map_err(|e| format!("{:?}", e))?, + exit, + ), + } + .map_err(|e| format!("{:?}", e)) + }, + ) + .map_err(Into::into) + .map(|_| ()) } -fn run_until_exit( - mut runtime: Runtime, - service: T, - e: E, -) -> error::Result<()> - where - T: Deref>, - C: substrate_service::Components, - E: IntoExit, +fn run_until_exit(mut runtime: Runtime, service: T, e: E) -> error::Result<()> +where + T: Deref>, + C: substrate_service::Components, + E: IntoExit, { - let (exit_send, exit) = exit_future::signal(); + let (exit_send, exit) = exit_future::signal(); - let executor = runtime.executor(); - cli::informant::start(&service, exit.clone(), executor.clone()); + let executor = runtime.executor(); + cli::informant::start(&service, exit.clone(), executor.clone()); - let _ = runtime.block_on(e.into_exit()); - exit_send.fire(); + let _ = runtime.block_on(e.into_exit()); + exit_send.fire(); - // we eagerly drop the service so that the internal exit future is fired, - // but we need to keep holding a reference to the global telemetry guard - let _telemetry = service.telemetry(); - drop(service); + // we eagerly drop the service so that the internal exit future is fired, + // but we need to keep holding a reference to the global telemetry guard + let _telemetry = service.telemetry(); + drop(service); - // TODO [andre]: timeout this future #1318 - let _ = runtime.shutdown_on_idle().wait(); + // TODO [andre]: timeout this future #1318 + let _ = runtime.shutdown_on_idle().wait(); - Ok(()) + Ok(()) } diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index a1fb61f175..863463d0b9 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -22,199 +22,219 @@ use std::sync::Arc; use std::time::Duration; use client; -use consensus::{import_queue, start_aura, AuraImportQueue, SlotDuration, NothingExtra}; +use consensus::{import_queue, start_aura, AuraImportQueue, NothingExtra, SlotDuration}; use grandpa; +use inherents::InherentDataProviders; +use log::info; +use network::construct_simple_protocol; use node_executor; -use primitives::{Pair as PairT, ed25519}; use node_primitives::Block; use node_runtime::{GenesisConfig, RuntimeApi}; +use primitives::{ed25519, Pair as PairT}; +use substrate_service::construct_service_factory; use substrate_service::{ - FactoryFullConfiguration, LightComponents, FullComponents, FullBackend, - FullClient, LightClient, LightBackend, FullExecutor, LightExecutor, TaskExecutor, + FactoryFullConfiguration, FullBackend, FullClient, FullComponents, FullExecutor, LightBackend, + LightClient, LightComponents, LightExecutor, TaskExecutor, }; -use transaction_pool::{self, txpool::{Pool as TransactionPool}}; -use inherents::InherentDataProviders; -use network::construct_simple_protocol; -use substrate_service::construct_service_factory; -use log::info; +use transaction_pool::{self, txpool::Pool as TransactionPool}; construct_simple_protocol! { - /// Demo protocol attachment for substrate. - pub struct NodeProtocol where Block = Block { } + /// Demo protocol attachment for substrate. + pub struct NodeProtocol where Block = Block { } } /// Node specific configuration pub struct NodeConfig { - /// grandpa connection to import block - // FIXME #1134 rather than putting this on the config, let's have an actual intermediate setup state - pub grandpa_import_setup: Option<(Arc>, grandpa::LinkHalfForService)>, - inherent_data_providers: InherentDataProviders, + /// grandpa connection to import block + // FIXME #1134 rather than putting this on the config, let's have an actual intermediate setup state + pub grandpa_import_setup: Option<( + Arc>, + grandpa::LinkHalfForService, + )>, + inherent_data_providers: InherentDataProviders, } -impl Default for NodeConfig where F: substrate_service::ServiceFactory { - fn default() -> NodeConfig { - NodeConfig { - grandpa_import_setup: None, - inherent_data_providers: InherentDataProviders::new(), - } - } +impl Default for NodeConfig +where + F: substrate_service::ServiceFactory, +{ + fn default() -> NodeConfig { + NodeConfig { + grandpa_import_setup: None, + inherent_data_providers: InherentDataProviders::new(), + } + } } construct_service_factory! { - struct Factory { - Block = Block, - RuntimeApi = RuntimeApi, - NetworkProtocol = NodeProtocol { |config| Ok(NodeProtocol::new()) }, - RuntimeDispatch = node_executor::Executor, - FullTransactionPoolApi = transaction_pool::ChainApi, FullExecutor, Block, RuntimeApi>, Block> - { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) }, - LightTransactionPoolApi = transaction_pool::ChainApi, LightExecutor, Block, RuntimeApi>, Block> - { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) }, - Genesis = GenesisConfig, - Configuration = NodeConfig, - FullService = FullComponents - { |config: FactoryFullConfiguration, executor: TaskExecutor| - FullComponents::::new(config, executor) }, - AuthoritySetup = { - |mut service: Self::FullService, executor: TaskExecutor, local_key: Option>| { - let (block_import, link_half) = service.config.custom.grandpa_import_setup.take() - .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); - - if let Some(ref key) = local_key { - info!("Using authority key {}", key.public()); - let proposer = Arc::new(substrate_basic_authorship::ProposerFactory { - client: service.client(), - transaction_pool: service.transaction_pool(), - inherents_pool: service.inherents_pool(), - }); - - let client = service.client(); - executor.spawn(start_aura( - SlotDuration::get_or_compute(&*client)?, - key.clone(), - client, - block_import.clone(), - proposer, - service.network(), - service.on_exit(), - service.config.custom.inherent_data_providers.clone(), - service.config.force_authoring, - )?); - - info!("Running Grandpa session as Authority {}", key.public()); - } - - let local_key = if service.config.disable_grandpa { - None - } else { - local_key - }; - - executor.spawn(grandpa::run_grandpa( - grandpa::Config { - local_key, - // FIXME #1578 make this available through chainspec - gossip_duration: Duration::from_millis(333), - justification_period: 4096, - name: Some(service.config.name.clone()) - }, - link_half, - grandpa::NetworkBridge::new(service.network()), - service.config.custom.inherent_data_providers.clone(), - service.on_exit(), - )?); - - Ok(service) - } - }, - LightService = LightComponents - { |config, executor| >::new(config, executor) }, - FullImportQueue = AuraImportQueue - { |config: &mut FactoryFullConfiguration , client: Arc>| { - let slot_duration = SlotDuration::get_or_compute(&*client)?; - let (block_import, link_half) = - grandpa::block_import::<_, _, _, RuntimeApi, FullClient>( - client.clone(), client.clone() - )?; - let block_import = Arc::new(block_import); - let justification_import = block_import.clone(); - - config.custom.grandpa_import_setup = Some((block_import.clone(), link_half)); - - import_queue::<_, _, _, ed25519::Pair>( - slot_duration, - block_import, - Some(justification_import), - client, - NothingExtra, - config.custom.inherent_data_providers.clone(), - true, - ).map_err(Into::into) - }}, - LightImportQueue = AuraImportQueue - { |config: &FactoryFullConfiguration, client: Arc>| { - import_queue::<_, _, _, ed25519::Pair>( - SlotDuration::get_or_compute(&*client)?, - client.clone(), - None, - client, - NothingExtra, - config.custom.inherent_data_providers.clone(), - true, - ).map_err(Into::into) - } - }, - } + struct Factory { + Block = Block, + RuntimeApi = RuntimeApi, + NetworkProtocol = NodeProtocol { |config| Ok(NodeProtocol::new()) }, + RuntimeDispatch = node_executor::Executor, + FullTransactionPoolApi = transaction_pool::ChainApi, FullExecutor, Block, RuntimeApi>, Block> + { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) }, + LightTransactionPoolApi = transaction_pool::ChainApi, LightExecutor, Block, RuntimeApi>, Block> + { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) }, + Genesis = GenesisConfig, + Configuration = NodeConfig, + FullService = FullComponents + { |config: FactoryFullConfiguration, executor: TaskExecutor| + FullComponents::::new(config, executor) }, + AuthoritySetup = { + |mut service: Self::FullService, executor: TaskExecutor, local_key: Option>| { + let (block_import, link_half) = service.config.custom.grandpa_import_setup.take() + .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); + + if let Some(ref key) = local_key { + info!("Using authority key {}", key.public()); + let proposer = Arc::new(substrate_basic_authorship::ProposerFactory { + client: service.client(), + transaction_pool: service.transaction_pool(), + inherents_pool: service.inherents_pool(), + }); + + let client = service.client(); + executor.spawn(start_aura( + SlotDuration::get_or_compute(&*client)?, + key.clone(), + client, + block_import.clone(), + proposer, + service.network(), + service.on_exit(), + service.config.custom.inherent_data_providers.clone(), + service.config.force_authoring, + )?); + + info!("Running Grandpa session as Authority {}", key.public()); + } + + let local_key = if service.config.disable_grandpa { + None + } else { + local_key + }; + + executor.spawn(grandpa::run_grandpa( + grandpa::Config { + local_key, + // FIXME #1578 make this available through chainspec + gossip_duration: Duration::from_millis(333), + justification_period: 4096, + name: Some(service.config.name.clone()) + }, + link_half, + grandpa::NetworkBridge::new(service.network()), + service.config.custom.inherent_data_providers.clone(), + service.on_exit(), + )?); + + Ok(service) + } + }, + LightService = LightComponents + { |config, executor| >::new(config, executor) }, + FullImportQueue = AuraImportQueue + { |config: &mut FactoryFullConfiguration , client: Arc>| { + let slot_duration = SlotDuration::get_or_compute(&*client)?; + let (block_import, link_half) = + grandpa::block_import::<_, _, _, RuntimeApi, FullClient>( + client.clone(), client.clone() + )?; + let block_import = Arc::new(block_import); + let justification_import = block_import.clone(); + + config.custom.grandpa_import_setup = Some((block_import.clone(), link_half)); + + import_queue::<_, _, _, ed25519::Pair>( + slot_duration, + block_import, + Some(justification_import), + client, + NothingExtra, + config.custom.inherent_data_providers.clone(), + true, + ).map_err(Into::into) + }}, + LightImportQueue = AuraImportQueue + { |config: &FactoryFullConfiguration, client: Arc>| { + import_queue::<_, _, _, ed25519::Pair>( + SlotDuration::get_or_compute(&*client)?, + client.clone(), + None, + client, + NothingExtra, + config.custom.inherent_data_providers.clone(), + true, + ).map_err(Into::into) + } + }, + } } - #[cfg(test)] mod tests { - #[cfg(feature = "rhd")] - fn test_sync() { - use {service_test, Factory}; - use client::{ImportBlock, BlockOrigin}; - - let alice: Arc = Arc::new(Keyring::Alice.into()); - let bob: Arc = Arc::new(Keyring::Bob.into()); - let validators = vec![alice.public().0.into(), bob.public().0.into()]; - let keys: Vec<&ed25519::Pair> = vec![&*alice, &*bob]; - let dummy_runtime = ::tokio::runtime::Runtime::new().unwrap(); - let block_factory = |service: &::FullService| { - let block_id = BlockId::number(service.client().info().unwrap().chain.best_number); - let parent_header = service.client().header(&block_id).unwrap().unwrap(); - let consensus_net = ConsensusNetwork::new(service.network(), service.client().clone()); - let proposer_factory = consensus::ProposerFactory { - client: service.client().clone(), - transaction_pool: service.transaction_pool().clone(), - network: consensus_net, - force_delay: 0, - handle: dummy_runtime.executor(), - }; - let (proposer, _, _) = proposer_factory.init(&parent_header, &validators, alice.clone()).unwrap(); - let block = proposer.propose().expect("Error making test block"); - ImportBlock { - origin: BlockOrigin::File, - justification: Vec::new(), - internal_justification: Vec::new(), - finalized: true, - body: Some(block.extrinsics), - header: block.header, - auxiliary: Vec::new(), - } - }; - let extrinsic_factory = |service: &::FullService| { - let payload = (0, Call::Balances(BalancesCall::transfer(RawAddress::Id(bob.public().0.into()), 69.into())), Era::immortal(), service.client().genesis_hash()); - let signature = alice.sign(&payload.encode()).into(); - let id = alice.public().0.into(); - let xt = UncheckedExtrinsic { - signature: Some((RawAddress::Id(id), signature, payload.0, Era::immortal())), - function: payload.1, - }.encode(); - let v: Vec = Decode::decode(&mut xt.as_slice()).unwrap(); - OpaqueExtrinsic(v) - }; - service_test::sync::(chain_spec::integration_test_config(), block_factory, extrinsic_factory); - } + #[cfg(feature = "rhd")] + fn test_sync() { + use client::{BlockOrigin, ImportBlock}; + use {service_test, Factory}; + + let alice: Arc = Arc::new(Keyring::Alice.into()); + let bob: Arc = Arc::new(Keyring::Bob.into()); + let validators = vec![alice.public().0.into(), bob.public().0.into()]; + let keys: Vec<&ed25519::Pair> = vec![&*alice, &*bob]; + let dummy_runtime = ::tokio::runtime::Runtime::new().unwrap(); + let block_factory = |service: &::FullService| { + let block_id = BlockId::number(service.client().info().unwrap().chain.best_number); + let parent_header = service.client().header(&block_id).unwrap().unwrap(); + let consensus_net = ConsensusNetwork::new(service.network(), service.client().clone()); + let proposer_factory = consensus::ProposerFactory { + client: service.client().clone(), + transaction_pool: service.transaction_pool().clone(), + network: consensus_net, + force_delay: 0, + handle: dummy_runtime.executor(), + }; + let (proposer, _, _) = proposer_factory + .init(&parent_header, &validators, alice.clone()) + .unwrap(); + let block = proposer.propose().expect("Error making test block"); + ImportBlock { + origin: BlockOrigin::File, + justification: Vec::new(), + internal_justification: Vec::new(), + finalized: true, + body: Some(block.extrinsics), + header: block.header, + auxiliary: Vec::new(), + } + }; + let extrinsic_factory = |service: &::FullService| { + let payload = ( + 0, + Call::Balances(BalancesCall::transfer( + RawAddress::Id(bob.public().0.into()), + 69.into(), + )), + Era::immortal(), + service.client().genesis_hash(), + ); + let signature = alice.sign(&payload.encode()).into(); + let id = alice.public().0.into(); + let xt = UncheckedExtrinsic { + signature: Some((RawAddress::Id(id), signature, payload.0, Era::immortal())), + function: payload.1, + } + .encode(); + let v: Vec = Decode::decode(&mut xt.as_slice()).unwrap(); + OpaqueExtrinsic(v) + }; + service_test::sync::( + chain_spec::integration_test_config(), + block_factory, + extrinsic_factory, + ); + } } diff --git a/node/executor/src/lib.rs b/node/executor/src/lib.rs index c855a4e6f0..c8e6e37c85 100644 --- a/node/executor/src/lib.rs +++ b/node/executor/src/lib.rs @@ -19,601 +19,690 @@ #![cfg_attr(feature = "benchmarks", feature(test))] -#[cfg(feature = "benchmarks")] extern crate test; +#[cfg(feature = "benchmarks")] +extern crate test; -pub use substrate_executor::NativeExecutor; use substrate_executor::native_executor_instance; +pub use substrate_executor::NativeExecutor; native_executor_instance!(pub Executor, node_runtime::api::dispatch, node_runtime::native_version, include_bytes!("../../runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm")); #[cfg(test)] mod tests { - use runtime_io; - use super::Executor; - use substrate_executor::{WasmExecutor, NativeExecutionDispatch}; - use parity_codec::{Encode, Decode, Joiner}; - use keyring::{AuthorityKeyring, AccountKeyring}; - use runtime_support::{Hashable, StorageValue, StorageMap, traits::Currency}; - use state_machine::{CodeExecutor, Externalities, TestExternalities}; - use primitives::{twox_128, Blake2Hasher, ChangesTrieConfiguration, NeverNativeValue, - NativeOrEncoded}; - use node_primitives::{Hash, BlockNumber, AccountId}; - use runtime_primitives::traits::{Header as HeaderT, Hash as HashT}; - use runtime_primitives::{generic, generic::Era, ApplyOutcome, ApplyError, ApplyResult, Perbill}; - use {balances, indices, session, system, staking, consensus, timestamp, treasury, contract}; - use contract::ContractAddressFor; - use system::{EventRecord, Phase}; - use node_runtime::{Header, Block, UncheckedExtrinsic, CheckedExtrinsic, Call, Runtime, Balances, - BuildStorage, GenesisConfig, BalancesConfig, SessionConfig, StakingConfig, System, - SystemConfig, GrandpaConfig, IndicesConfig, Event, Log}; - use wabt; - use primitives::map; - - const BLOATY_CODE: &[u8] = include_bytes!("../../runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.wasm"); - const COMPACT_CODE: &[u8] = include_bytes!("../../runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm"); - const GENESIS_HASH: [u8; 32] = [69u8; 32]; - - fn alice() -> AccountId { - AccountKeyring::Alice.into() - } - - fn bob() -> AccountId { - AccountKeyring::Bob.into() - } - - fn charlie() -> AccountId { - AccountKeyring::Charlie.into() - } - - fn dave() -> AccountId { - AccountKeyring::Dave.into() - } - - fn eve() -> AccountId { - AccountKeyring::Eve.into() - } - - fn ferdie() -> AccountId { - AccountKeyring::Ferdie.into() - } - - fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic { - match xt.signed { - Some((signed, index)) => { - let era = Era::mortal(256, 0); - let payload = (index.into(), xt.function, era, GENESIS_HASH); - let key = AccountKeyring::from_public(&signed).unwrap(); - let signature = payload.using_encoded(|b| { - if b.len() > 256 { - key.sign(&runtime_io::blake2_256(b)) - } else { - key.sign(b) - } - }).into(); - UncheckedExtrinsic { - signature: Some((indices::address::Address::Id(signed), signature, payload.0, era)), - function: payload.1, - } - } - None => UncheckedExtrinsic { - signature: None, - function: xt.function, - }, - } - } - - fn xt() -> UncheckedExtrinsic { - sign(CheckedExtrinsic { - signed: Some((alice(), 0)), - function: Call::Balances(balances::Call::transfer::(bob().into(), 69)), - }) - } - - fn from_block_number(n: u64) -> Header { - Header::new(n, Default::default(), Default::default(), [69; 32].into(), Default::default()) - } - - fn executor() -> ::substrate_executor::NativeExecutor { - ::substrate_executor::NativeExecutor::new(None) - } - - #[test] - fn panic_execution_with_foreign_code_gives_error() { - let mut t = TestExternalities::::new_with_code(BLOATY_CODE, map![ - twox_128(&>::key_for(alice())).to_vec() => vec![69u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - twox_128(>::key()).to_vec() => vec![69u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32], - twox_128(>::key()).to_vec() => vec![70u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16] - ]); - - let r = executor().call::<_, NeverNativeValue, fn() -> _>( - &mut t, - "Core_initialize_block", - &vec![].and(&from_block_number(1u64)), - true, - None, - ).0; - assert!(r.is_ok()); - let v = executor().call::<_, NeverNativeValue, fn() -> _>( - &mut t, - "BlockBuilder_apply_extrinsic", - &vec![].and(&xt()), - true, - None, - ).0.unwrap(); - let r = ApplyResult::decode(&mut &v.as_encoded()[..]).unwrap(); - assert_eq!(r, Err(ApplyError::CantPay)); - } - - #[test] - fn bad_extrinsic_with_native_equivalent_code_gives_error() { - let mut t = TestExternalities::::new_with_code(COMPACT_CODE, map![ - twox_128(&>::key_for(alice())).to_vec() => vec![69u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - twox_128(>::key()).to_vec() => vec![69u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32], - twox_128(>::key()).to_vec() => vec![70u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16] - ]); - - let r = executor().call::<_, NeverNativeValue, fn() -> _>( - &mut t, - "Core_initialize_block", - &vec![].and(&from_block_number(1u64)), - true, - None, - ).0; - assert!(r.is_ok()); - let v = executor().call::<_, NeverNativeValue, fn() -> _>( - &mut t, - "BlockBuilder_apply_extrinsic", - &vec![].and(&xt()), - true, - None, - ).0.unwrap(); - let r = ApplyResult::decode(&mut &v.as_encoded()[..]).unwrap(); - assert_eq!(r, Err(ApplyError::CantPay)); - } - - #[test] - fn successful_execution_with_native_equivalent_code_gives_ok() { - let mut t = TestExternalities::::new_with_code(COMPACT_CODE, map![ - twox_128(&>::key_for(alice())).to_vec() => vec![111u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - twox_128(>::key()).to_vec() => vec![111u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16] - ]); - - let r = executor().call::<_, NeverNativeValue, fn() -> _>( - &mut t, - "Core_initialize_block", - &vec![].and(&from_block_number(1u64)), - true, - None, - ).0; - assert!(r.is_ok()); - let r = executor().call::<_, NeverNativeValue, fn() -> _>( - &mut t, - "BlockBuilder_apply_extrinsic", - &vec![].and(&xt()), - true, - None, - ).0; - assert!(r.is_ok()); - - runtime_io::with_externalities(&mut t, || { - assert_eq!(Balances::total_balance(&alice()), 42); - assert_eq!(Balances::total_balance(&bob()), 69); - }); - } - - #[test] - fn successful_execution_with_foreign_code_gives_ok() { - let mut t = TestExternalities::::new_with_code(BLOATY_CODE, map![ - twox_128(&>::key_for(alice())).to_vec() => vec![111u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - twox_128(>::key()).to_vec() => vec![111u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16] - ]); - - let r = executor().call::<_, NeverNativeValue, fn() -> _>( - &mut t, - "Core_initialize_block", - &vec![].and(&from_block_number(1u64)), - true, - None, - ).0; - assert!(r.is_ok()); - let r = executor().call::<_, NeverNativeValue, fn() -> _>( - &mut t, - "BlockBuilder_apply_extrinsic", - &vec![].and(&xt()), - true, - None, - ).0; - assert!(r.is_ok()); - - runtime_io::with_externalities(&mut t, || { - assert_eq!(Balances::total_balance(&alice()), 42); - assert_eq!(Balances::total_balance(&bob()), 69); - }); - } - - fn new_test_ext(code: &[u8], support_changes_trie: bool) -> TestExternalities { - let three = AccountId::from_raw([3u8; 32]); - TestExternalities::new_with_code(code, GenesisConfig { - consensus: Some(Default::default()), - system: Some(SystemConfig { - changes_trie_config: if support_changes_trie { Some(ChangesTrieConfiguration { - digest_interval: 2, - digest_levels: 2, - }) } else { None }, - ..Default::default() - }), - indices: Some(IndicesConfig { - ids: vec![alice(), bob(), charlie(), dave(), eve(), ferdie()], - }), - balances: Some(BalancesConfig { - transaction_base_fee: 1, - transaction_byte_fee: 0, - balances: vec![ - (alice(), 111), - (bob(), 100), - (charlie(), 100_000_000), - (dave(), 111), - (eve(), 101), - (ferdie(), 100), - ], - existential_deposit: 0, - transfer_fee: 0, - creation_fee: 0, - vesting: vec![], - }), - session: Some(SessionConfig { - session_length: 2, - validators: vec![AccountKeyring::One.into(), AccountKeyring::Two.into(), three], - keys: vec![ - (alice(), AuthorityKeyring::Alice.into()), - (bob(), AuthorityKeyring::Bob.into()), - (charlie(), AuthorityKeyring::Charlie.into()) - ] - }), - staking: Some(StakingConfig { - sessions_per_era: 2, - current_era: 0, - stakers: vec![ - (dave(), alice(), 111, staking::StakerStatus::Validator), - (eve(), bob(), 100, staking::StakerStatus::Validator), - (ferdie(), charlie(), 100, staking::StakerStatus::Validator) - ], - validator_count: 3, - minimum_validator_count: 0, - bonding_duration: 0, - offline_slash: Perbill::zero(), - session_reward: Perbill::zero(), - current_session_reward: 0, - offline_slash_grace: 0, - invulnerables: vec![alice(), bob(), charlie()], - }), - democracy: Some(Default::default()), - council_seats: Some(Default::default()), - council_voting: Some(Default::default()), - timestamp: Some(Default::default()), - treasury: Some(Default::default()), - contract: Some(Default::default()), - sudo: Some(Default::default()), - grandpa: Some(GrandpaConfig { - authorities: vec![], - }), - }.build_storage().unwrap().0) - } - - fn construct_block( - env: &mut TestExternalities, - number: BlockNumber, - parent_hash: Hash, - extrinsics: Vec, - ) -> (Vec, Hash) { - use trie::ordered_trie_root; - - // sign extrinsics. - let extrinsics = extrinsics.into_iter().map(sign).collect::>(); - - // calculate the header fields that we can. - let extrinsics_root = ordered_trie_root::( - extrinsics.iter().map(Encode::encode) - ).to_fixed_bytes() - .into(); - - let header = Header { - parent_hash, - number, - extrinsics_root, - state_root: Default::default(), - digest: Default::default(), - }; - - // execute the block to get the real header. - Executor::new(None).call::<_, NeverNativeValue, fn() -> _>( - env, - "Core_initialize_block", - &header.encode(), - true, - None, - ).0.unwrap(); - - for i in extrinsics.iter() { - Executor::new(None).call::<_, NeverNativeValue, fn() -> _>( - env, - "BlockBuilder_apply_extrinsic", - &i.encode(), - true, - None, - ).0.unwrap(); - } - - let header = match Executor::new(None).call::<_, NeverNativeValue, fn() -> _>( - env, - "BlockBuilder_finalize_block", - &[0u8;0], - true, - None, - ).0.unwrap() { - NativeOrEncoded::Native(_) => unreachable!(), - NativeOrEncoded::Encoded(h) => Header::decode(&mut &h[..]).unwrap(), - }; - - let hash = header.blake2_256(); - (Block { header, extrinsics }.encode(), hash.into()) - } - - fn changes_trie_block() -> (Vec, Hash) { - construct_block( - &mut new_test_ext(COMPACT_CODE, true), - 1, - GENESIS_HASH.into(), - vec![ - CheckedExtrinsic { - signed: None, - function: Call::Timestamp(timestamp::Call::set(42)), - }, - CheckedExtrinsic { - signed: Some((alice(), 0)), - function: Call::Balances(balances::Call::transfer(bob().into(), 69)), - }, - ] - ) - } - - // block 1 and 2 must be created together to ensure transactions are only signed once (since they - // are not guaranteed to be deterministic) and to ensure that the correct state is propagated - // from block1's execution to block2 to derive the correct storage_root. - fn blocks() -> ((Vec, Hash), (Vec, Hash)) { - let mut t = new_test_ext(COMPACT_CODE, false); - let block1 = construct_block( - &mut t, - 1, - GENESIS_HASH.into(), - vec![ - CheckedExtrinsic { - signed: None, - function: Call::Timestamp(timestamp::Call::set(42)), - }, - CheckedExtrinsic { - signed: Some((alice(), 0)), - function: Call::Balances(balances::Call::transfer(bob().into(), 69)), - }, - ] - ); - let block2 = construct_block( - &mut t, - 2, - block1.1.clone(), - vec![ - CheckedExtrinsic { - signed: None, - function: Call::Timestamp(timestamp::Call::set(52)), - }, - CheckedExtrinsic { - signed: Some((bob(), 0)), - function: Call::Balances(balances::Call::transfer(alice().into(), 5)), - }, - CheckedExtrinsic { - signed: Some((alice(), 1)), - function: Call::Balances(balances::Call::transfer(bob().into(), 15)), - } - ] - ); - - let digest = generic::Digest::::default(); - assert_eq!(Header::decode(&mut &block2.0[..]).unwrap().digest, digest); - - (block1, block2) - } - - fn big_block() -> (Vec, Hash) { - construct_block( - &mut new_test_ext(COMPACT_CODE, false), - 1, - GENESIS_HASH.into(), - vec![ - CheckedExtrinsic { - signed: None, - function: Call::Timestamp(timestamp::Call::set(42)), - }, - CheckedExtrinsic { - signed: Some((alice(), 0)), - function: Call::Consensus(consensus::Call::remark(vec![0; 120000])), - } - ] - ) - } - - #[test] - fn full_native_block_import_works() { - let mut t = new_test_ext(COMPACT_CODE, false); - - let (block1, block2) = blocks(); - - executor().call::<_, NeverNativeValue, fn() -> _>( - &mut t, - "Core_execute_block", - &block1.0, - true, - None, - ).0.unwrap(); - - runtime_io::with_externalities(&mut t, || { - // block1 transfers from alice 69 to bob. - // -1 is the default fee - assert_eq!(Balances::total_balance(&alice()), 111 - 69 - 1); - assert_eq!(Balances::total_balance(&bob()), 100 + 69); - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: Event::system(system::Event::ExtrinsicSuccess) - }, - EventRecord { - phase: Phase::ApplyExtrinsic(1), - event: Event::balances(balances::RawEvent::Transfer( - alice().into(), - bob().into(), - 69, - 0 - )) - }, - EventRecord { - phase: Phase::ApplyExtrinsic(1), - event: Event::system(system::Event::ExtrinsicSuccess) - }, - EventRecord { - phase: Phase::Finalization, - event: Event::treasury(treasury::RawEvent::Spending(0)) - }, - EventRecord { - phase: Phase::Finalization, - event: Event::treasury(treasury::RawEvent::Burnt(0)) - }, - EventRecord { - phase: Phase::Finalization, - event: Event::treasury(treasury::RawEvent::Rollover(0)) - }, - ]); - }); - - executor().call::<_, NeverNativeValue, fn() -> _>( - &mut t, - "Core_execute_block", - &block2.0, - true, - None, - ).0.unwrap(); - - runtime_io::with_externalities(&mut t, || { - // bob sends 5, alice sends 15 | bob += 10, alice -= 10 - // 111 - 69 - 1 - 10 - 1 = 30 - assert_eq!(Balances::total_balance(&alice()), 111 - 69 - 1 - 10 - 1); - // 100 + 69 + 10 - 1 = 178 - assert_eq!(Balances::total_balance(&bob()), 100 + 69 + 10 - 1); - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: Event::system(system::Event::ExtrinsicSuccess) - }, - EventRecord { - phase: Phase::ApplyExtrinsic(1), - event: Event::balances( - balances::RawEvent::Transfer( - bob().into(), - alice().into(), - 5, - 0 - ) - ) - }, - EventRecord { - phase: Phase::ApplyExtrinsic(1), - event: Event::system(system::Event::ExtrinsicSuccess) - }, - EventRecord { - phase: Phase::ApplyExtrinsic(2), - event: Event::balances( - balances::RawEvent::Transfer( - alice().into(), - bob().into(), - 15, - 0 - ) - ) - }, - EventRecord { - phase: Phase::ApplyExtrinsic(2), - event: Event::system(system::Event::ExtrinsicSuccess) - }, - EventRecord { - phase: Phase::Finalization, - event: Event::session(session::RawEvent::NewSession(1)) - }, - EventRecord { - phase: Phase::Finalization, - event: Event::treasury(treasury::RawEvent::Spending(0)) - }, - EventRecord { - phase: Phase::Finalization, - event: Event::treasury(treasury::RawEvent::Burnt(0)) - }, - EventRecord { - phase: Phase::Finalization, - event: Event::treasury(treasury::RawEvent::Rollover(0)) - }, - ]); - }); - } - - #[test] - fn full_wasm_block_import_works() { - let mut t = new_test_ext(COMPACT_CODE, false); - - let (block1, block2) = blocks(); - - WasmExecutor::new().call(&mut t, 8, COMPACT_CODE, "Core_execute_block", &block1.0).unwrap(); - - runtime_io::with_externalities(&mut t, || { - // block1 transfers from alice 69 to bob. - // -1 is the default fee - assert_eq!(Balances::total_balance(&alice()), 111 - 69 - 1); - assert_eq!(Balances::total_balance(&bob()), 100 + 69); - }); - - WasmExecutor::new().call(&mut t, 8, COMPACT_CODE, "Core_execute_block", &block2.0).unwrap(); - - runtime_io::with_externalities(&mut t, || { - // bob sends 5, alice sends 15 | bob += 10, alice -= 10 - // 111 - 69 - 1 - 10 - 1 = 30 - assert_eq!(Balances::total_balance(&alice()), 111 - 69 - 1 - 10 - 1); - // 100 + 69 + 10 - 1 = 178 - assert_eq!(Balances::total_balance(&bob()), 100 + 69 + 10 - 1); - }); - } - - const CODE_TRANSFER: &str = r#" + use super::Executor; + use contract::ContractAddressFor; + use keyring::{AccountKeyring, AuthorityKeyring}; + use node_primitives::{AccountId, BlockNumber, Hash}; + use node_runtime::{ + Balances, BalancesConfig, Block, BuildStorage, Call, CheckedExtrinsic, Event, + GenesisConfig, GrandpaConfig, Header, IndicesConfig, Log, Runtime, SessionConfig, + StakingConfig, System, SystemConfig, UncheckedExtrinsic, + }; + use parity_codec::{Decode, Encode, Joiner}; + use primitives::map; + use primitives::{ + twox_128, Blake2Hasher, ChangesTrieConfiguration, NativeOrEncoded, NeverNativeValue, + }; + use runtime_io; + use runtime_primitives::traits::{Hash as HashT, Header as HeaderT}; + use runtime_primitives::{ + generic, generic::Era, ApplyError, ApplyOutcome, ApplyResult, Perbill, + }; + use runtime_support::{traits::Currency, Hashable, StorageMap, StorageValue}; + use state_machine::{CodeExecutor, Externalities, TestExternalities}; + use substrate_executor::{NativeExecutionDispatch, WasmExecutor}; + use system::{EventRecord, Phase}; + use wabt; + use {balances, consensus, contract, indices, session, staking, system, timestamp, treasury}; + + const BLOATY_CODE: &[u8] = include_bytes!( + "../../runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.wasm" + ); + const COMPACT_CODE: &[u8] = include_bytes!( + "../../runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm" + ); + const GENESIS_HASH: [u8; 32] = [69u8; 32]; + + fn alice() -> AccountId { + AccountKeyring::Alice.into() + } + + fn bob() -> AccountId { + AccountKeyring::Bob.into() + } + + fn charlie() -> AccountId { + AccountKeyring::Charlie.into() + } + + fn dave() -> AccountId { + AccountKeyring::Dave.into() + } + + fn eve() -> AccountId { + AccountKeyring::Eve.into() + } + + fn ferdie() -> AccountId { + AccountKeyring::Ferdie.into() + } + + fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic { + match xt.signed { + Some((signed, index)) => { + let era = Era::mortal(256, 0); + let payload = (index.into(), xt.function, era, GENESIS_HASH); + let key = AccountKeyring::from_public(&signed).unwrap(); + let signature = payload + .using_encoded(|b| { + if b.len() > 256 { + key.sign(&runtime_io::blake2_256(b)) + } else { + key.sign(b) + } + }) + .into(); + UncheckedExtrinsic { + signature: Some(( + indices::address::Address::Id(signed), + signature, + payload.0, + era, + )), + function: payload.1, + } + } + None => UncheckedExtrinsic { + signature: None, + function: xt.function, + }, + } + } + + fn xt() -> UncheckedExtrinsic { + sign(CheckedExtrinsic { + signed: Some((alice(), 0)), + function: Call::Balances(balances::Call::transfer::(bob().into(), 69)), + }) + } + + fn from_block_number(n: u64) -> Header { + Header::new( + n, + Default::default(), + Default::default(), + [69; 32].into(), + Default::default(), + ) + } + + fn executor() -> ::substrate_executor::NativeExecutor { + ::substrate_executor::NativeExecutor::new(None) + } + + #[test] + fn panic_execution_with_foreign_code_gives_error() { + let mut t = TestExternalities::::new_with_code( + BLOATY_CODE, + map![ + twox_128(&>::key_for(alice())).to_vec() => vec![69u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + twox_128(>::key()).to_vec() => vec![69u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32], + twox_128(>::key()).to_vec() => vec![70u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16] + ], + ); + + let r = executor() + .call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "Core_initialize_block", + &vec![].and(&from_block_number(1u64)), + true, + None, + ) + .0; + assert!(r.is_ok()); + let v = executor() + .call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt()), + true, + None, + ) + .0 + .unwrap(); + let r = ApplyResult::decode(&mut &v.as_encoded()[..]).unwrap(); + assert_eq!(r, Err(ApplyError::CantPay)); + } + + #[test] + fn bad_extrinsic_with_native_equivalent_code_gives_error() { + let mut t = TestExternalities::::new_with_code( + COMPACT_CODE, + map![ + twox_128(&>::key_for(alice())).to_vec() => vec![69u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + twox_128(>::key()).to_vec() => vec![69u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32], + twox_128(>::key()).to_vec() => vec![70u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16] + ], + ); + + let r = executor() + .call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "Core_initialize_block", + &vec![].and(&from_block_number(1u64)), + true, + None, + ) + .0; + assert!(r.is_ok()); + let v = executor() + .call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt()), + true, + None, + ) + .0 + .unwrap(); + let r = ApplyResult::decode(&mut &v.as_encoded()[..]).unwrap(); + assert_eq!(r, Err(ApplyError::CantPay)); + } + + #[test] + fn successful_execution_with_native_equivalent_code_gives_ok() { + let mut t = TestExternalities::::new_with_code( + COMPACT_CODE, + map![ + twox_128(&>::key_for(alice())).to_vec() => vec![111u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + twox_128(>::key()).to_vec() => vec![111u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16] + ], + ); + + let r = executor() + .call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "Core_initialize_block", + &vec![].and(&from_block_number(1u64)), + true, + None, + ) + .0; + assert!(r.is_ok()); + let r = executor() + .call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt()), + true, + None, + ) + .0; + assert!(r.is_ok()); + + runtime_io::with_externalities(&mut t, || { + assert_eq!(Balances::total_balance(&alice()), 42); + assert_eq!(Balances::total_balance(&bob()), 69); + }); + } + + #[test] + fn successful_execution_with_foreign_code_gives_ok() { + let mut t = TestExternalities::::new_with_code( + BLOATY_CODE, + map![ + twox_128(&>::key_for(alice())).to_vec() => vec![111u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + twox_128(>::key()).to_vec() => vec![111u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16] + ], + ); + + let r = executor() + .call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "Core_initialize_block", + &vec![].and(&from_block_number(1u64)), + true, + None, + ) + .0; + assert!(r.is_ok()); + let r = executor() + .call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt()), + true, + None, + ) + .0; + assert!(r.is_ok()); + + runtime_io::with_externalities(&mut t, || { + assert_eq!(Balances::total_balance(&alice()), 42); + assert_eq!(Balances::total_balance(&bob()), 69); + }); + } + + fn new_test_ext(code: &[u8], support_changes_trie: bool) -> TestExternalities { + let three = AccountId::from_raw([3u8; 32]); + TestExternalities::new_with_code( + code, + GenesisConfig { + consensus: Some(Default::default()), + system: Some(SystemConfig { + changes_trie_config: if support_changes_trie { + Some(ChangesTrieConfiguration { + digest_interval: 2, + digest_levels: 2, + }) + } else { + None + }, + ..Default::default() + }), + indices: Some(IndicesConfig { + ids: vec![alice(), bob(), charlie(), dave(), eve(), ferdie()], + }), + balances: Some(BalancesConfig { + transaction_base_fee: 1, + transaction_byte_fee: 0, + balances: vec![ + (alice(), 111), + (bob(), 100), + (charlie(), 100_000_000), + (dave(), 111), + (eve(), 101), + (ferdie(), 100), + ], + existential_deposit: 0, + transfer_fee: 0, + creation_fee: 0, + vesting: vec![], + }), + session: Some(SessionConfig { + session_length: 2, + validators: vec![ + AccountKeyring::One.into(), + AccountKeyring::Two.into(), + three, + ], + keys: vec![ + (alice(), AuthorityKeyring::Alice.into()), + (bob(), AuthorityKeyring::Bob.into()), + (charlie(), AuthorityKeyring::Charlie.into()), + ], + }), + staking: Some(StakingConfig { + sessions_per_era: 2, + current_era: 0, + stakers: vec![ + (dave(), alice(), 111, staking::StakerStatus::Validator), + (eve(), bob(), 100, staking::StakerStatus::Validator), + (ferdie(), charlie(), 100, staking::StakerStatus::Validator), + ], + validator_count: 3, + minimum_validator_count: 0, + bonding_duration: 0, + offline_slash: Perbill::zero(), + session_reward: Perbill::zero(), + current_session_reward: 0, + offline_slash_grace: 0, + invulnerables: vec![alice(), bob(), charlie()], + }), + democracy: Some(Default::default()), + council_seats: Some(Default::default()), + council_voting: Some(Default::default()), + timestamp: Some(Default::default()), + treasury: Some(Default::default()), + contract: Some(Default::default()), + sudo: Some(Default::default()), + grandpa: Some(GrandpaConfig { + authorities: vec![], + }), + } + .build_storage() + .unwrap() + .0, + ) + } + + fn construct_block( + env: &mut TestExternalities, + number: BlockNumber, + parent_hash: Hash, + extrinsics: Vec, + ) -> (Vec, Hash) { + use trie::ordered_trie_root; + + // sign extrinsics. + let extrinsics = extrinsics.into_iter().map(sign).collect::>(); + + // calculate the header fields that we can. + let extrinsics_root = + ordered_trie_root::(extrinsics.iter().map(Encode::encode)) + .to_fixed_bytes() + .into(); + + let header = Header { + parent_hash, + number, + extrinsics_root, + state_root: Default::default(), + digest: Default::default(), + }; + + // execute the block to get the real header. + Executor::new(None) + .call::<_, NeverNativeValue, fn() -> _>( + env, + "Core_initialize_block", + &header.encode(), + true, + None, + ) + .0 + .unwrap(); + + for i in extrinsics.iter() { + Executor::new(None) + .call::<_, NeverNativeValue, fn() -> _>( + env, + "BlockBuilder_apply_extrinsic", + &i.encode(), + true, + None, + ) + .0 + .unwrap(); + } + + let header = match Executor::new(None) + .call::<_, NeverNativeValue, fn() -> _>( + env, + "BlockBuilder_finalize_block", + &[0u8; 0], + true, + None, + ) + .0 + .unwrap() + { + NativeOrEncoded::Native(_) => unreachable!(), + NativeOrEncoded::Encoded(h) => Header::decode(&mut &h[..]).unwrap(), + }; + + let hash = header.blake2_256(); + (Block { header, extrinsics }.encode(), hash.into()) + } + + fn changes_trie_block() -> (Vec, Hash) { + construct_block( + &mut new_test_ext(COMPACT_CODE, true), + 1, + GENESIS_HASH.into(), + vec![ + CheckedExtrinsic { + signed: None, + function: Call::Timestamp(timestamp::Call::set(42)), + }, + CheckedExtrinsic { + signed: Some((alice(), 0)), + function: Call::Balances(balances::Call::transfer(bob().into(), 69)), + }, + ], + ) + } + + // block 1 and 2 must be created together to ensure transactions are only signed once (since they + // are not guaranteed to be deterministic) and to ensure that the correct state is propagated + // from block1's execution to block2 to derive the correct storage_root. + fn blocks() -> ((Vec, Hash), (Vec, Hash)) { + let mut t = new_test_ext(COMPACT_CODE, false); + let block1 = construct_block( + &mut t, + 1, + GENESIS_HASH.into(), + vec![ + CheckedExtrinsic { + signed: None, + function: Call::Timestamp(timestamp::Call::set(42)), + }, + CheckedExtrinsic { + signed: Some((alice(), 0)), + function: Call::Balances(balances::Call::transfer(bob().into(), 69)), + }, + ], + ); + let block2 = construct_block( + &mut t, + 2, + block1.1.clone(), + vec![ + CheckedExtrinsic { + signed: None, + function: Call::Timestamp(timestamp::Call::set(52)), + }, + CheckedExtrinsic { + signed: Some((bob(), 0)), + function: Call::Balances(balances::Call::transfer(alice().into(), 5)), + }, + CheckedExtrinsic { + signed: Some((alice(), 1)), + function: Call::Balances(balances::Call::transfer(bob().into(), 15)), + }, + ], + ); + + let digest = generic::Digest::::default(); + assert_eq!(Header::decode(&mut &block2.0[..]).unwrap().digest, digest); + + (block1, block2) + } + + fn big_block() -> (Vec, Hash) { + construct_block( + &mut new_test_ext(COMPACT_CODE, false), + 1, + GENESIS_HASH.into(), + vec![ + CheckedExtrinsic { + signed: None, + function: Call::Timestamp(timestamp::Call::set(42)), + }, + CheckedExtrinsic { + signed: Some((alice(), 0)), + function: Call::Consensus(consensus::Call::remark(vec![0; 120000])), + }, + ], + ) + } + + #[test] + fn full_native_block_import_works() { + let mut t = new_test_ext(COMPACT_CODE, false); + + let (block1, block2) = blocks(); + + executor() + .call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "Core_execute_block", + &block1.0, + true, + None, + ) + .0 + .unwrap(); + + runtime_io::with_externalities(&mut t, || { + // block1 transfers from alice 69 to bob. + // -1 is the default fee + assert_eq!(Balances::total_balance(&alice()), 111 - 69 - 1); + assert_eq!(Balances::total_balance(&bob()), 100 + 69); + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: Event::system(system::Event::ExtrinsicSuccess) + }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: Event::balances(balances::RawEvent::Transfer( + alice().into(), + bob().into(), + 69, + 0 + )) + }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: Event::system(system::Event::ExtrinsicSuccess) + }, + EventRecord { + phase: Phase::Finalization, + event: Event::treasury(treasury::RawEvent::Spending(0)) + }, + EventRecord { + phase: Phase::Finalization, + event: Event::treasury(treasury::RawEvent::Burnt(0)) + }, + EventRecord { + phase: Phase::Finalization, + event: Event::treasury(treasury::RawEvent::Rollover(0)) + }, + ] + ); + }); + + executor() + .call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "Core_execute_block", + &block2.0, + true, + None, + ) + .0 + .unwrap(); + + runtime_io::with_externalities(&mut t, || { + // bob sends 5, alice sends 15 | bob += 10, alice -= 10 + // 111 - 69 - 1 - 10 - 1 = 30 + assert_eq!(Balances::total_balance(&alice()), 111 - 69 - 1 - 10 - 1); + // 100 + 69 + 10 - 1 = 178 + assert_eq!(Balances::total_balance(&bob()), 100 + 69 + 10 - 1); + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: Event::system(system::Event::ExtrinsicSuccess) + }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: Event::balances(balances::RawEvent::Transfer( + bob().into(), + alice().into(), + 5, + 0 + )) + }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: Event::system(system::Event::ExtrinsicSuccess) + }, + EventRecord { + phase: Phase::ApplyExtrinsic(2), + event: Event::balances(balances::RawEvent::Transfer( + alice().into(), + bob().into(), + 15, + 0 + )) + }, + EventRecord { + phase: Phase::ApplyExtrinsic(2), + event: Event::system(system::Event::ExtrinsicSuccess) + }, + EventRecord { + phase: Phase::Finalization, + event: Event::session(session::RawEvent::NewSession(1)) + }, + EventRecord { + phase: Phase::Finalization, + event: Event::treasury(treasury::RawEvent::Spending(0)) + }, + EventRecord { + phase: Phase::Finalization, + event: Event::treasury(treasury::RawEvent::Burnt(0)) + }, + EventRecord { + phase: Phase::Finalization, + event: Event::treasury(treasury::RawEvent::Rollover(0)) + }, + ] + ); + }); + } + + #[test] + fn full_wasm_block_import_works() { + let mut t = new_test_ext(COMPACT_CODE, false); + + let (block1, block2) = blocks(); + + WasmExecutor::new() + .call(&mut t, 8, COMPACT_CODE, "Core_execute_block", &block1.0) + .unwrap(); + + runtime_io::with_externalities(&mut t, || { + // block1 transfers from alice 69 to bob. + // -1 is the default fee + assert_eq!(Balances::total_balance(&alice()), 111 - 69 - 1); + assert_eq!(Balances::total_balance(&bob()), 100 + 69); + }); + + WasmExecutor::new() + .call(&mut t, 8, COMPACT_CODE, "Core_execute_block", &block2.0) + .unwrap(); + + runtime_io::with_externalities(&mut t, || { + // bob sends 5, alice sends 15 | bob += 10, alice -= 10 + // 111 - 69 - 1 - 10 - 1 = 30 + assert_eq!(Balances::total_balance(&alice()), 111 - 69 - 1 - 10 - 1); + // 100 + 69 + 10 - 1 = 178 + assert_eq!(Balances::total_balance(&bob()), 100 + 69 + 10 - 1); + }); + } + + const CODE_TRANSFER: &str = r#" (module ;; ext_call( ;; callee_ptr: u32, @@ -697,192 +786,254 @@ mod tests { ) "#; - #[test] - fn deploying_wasm_contract_should_work() { - - let transfer_code = wabt::wat2wasm(CODE_TRANSFER).unwrap(); - let transfer_ch = ::Hashing::hash(&transfer_code); - - let addr = ::DetermineContractAddress::contract_address_for( - &transfer_ch, - &[], - &charlie(), - ); - - let b = construct_block( - &mut new_test_ext(COMPACT_CODE, false), - 1, - GENESIS_HASH.into(), - vec![ - CheckedExtrinsic { - signed: None, - function: Call::Timestamp(timestamp::Call::set(42)), - }, - CheckedExtrinsic { - signed: Some((charlie(), 0)), - function: Call::Contract( - contract::Call::put_code::(10_000, transfer_code) - ), - }, - CheckedExtrinsic { - signed: Some((charlie(), 1)), - function: Call::Contract( - contract::Call::create::(10, 10_000, transfer_ch, Vec::new()) - ), - }, - CheckedExtrinsic { - signed: Some((charlie(), 2)), - function: Call::Contract( - contract::Call::call::(indices::address::Address::Id(addr.clone()), 10, 10_000, vec![0x00, 0x01, 0x02, 0x03]) - ), - }, - ] - ); - - let mut t = new_test_ext(COMPACT_CODE, false); - - WasmExecutor::new().call(&mut t, 8, COMPACT_CODE,"Core_execute_block", &b.0).unwrap(); - - runtime_io::with_externalities(&mut t, || { - // Verify that the contract constructor worked well and code of TRANSFER contract is actually deployed. - assert_eq!(&contract::CodeHashOf::::get(addr).unwrap(), &transfer_ch); - }); - } - - #[test] - fn wasm_big_block_import_fails() { - let mut t = new_test_ext(COMPACT_CODE, false); - - assert!( - WasmExecutor::new().call( - &mut t, - 8, - COMPACT_CODE, - "Core_execute_block", - &big_block().0 - ).is_err() - ); - } - - #[test] - fn native_big_block_import_succeeds() { - let mut t = new_test_ext(COMPACT_CODE, false); - - Executor::new(None).call::<_, NeverNativeValue, fn() -> _>( - &mut t, - "Core_execute_block", - &big_block().0, - true, - None, - ).0.unwrap(); - } - - #[test] - fn native_big_block_import_fails_on_fallback() { - let mut t = new_test_ext(COMPACT_CODE, false); - - assert!( - Executor::new(None).call::<_, NeverNativeValue, fn() -> _>( - &mut t, - "Core_execute_block", - &big_block().0, - false, - None, - ).0.is_err() - ); - } - - #[test] - fn panic_execution_gives_error() { - let foreign_code = include_bytes!("../../runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.wasm"); - let mut t = TestExternalities::::new_with_code(foreign_code, map![ - twox_128(&>::key_for(alice())).to_vec() => vec![69u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - twox_128(>::key()).to_vec() => vec![69u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32], - twox_128(>::key()).to_vec() => vec![70u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16] - ]); - - let r = WasmExecutor::new().call(&mut t, 8, COMPACT_CODE, "Core_initialize_block", &vec![].and(&from_block_number(1u64))); - assert!(r.is_ok()); - let r = WasmExecutor::new().call(&mut t, 8, COMPACT_CODE, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).unwrap(); - let r = ApplyResult::decode(&mut &r[..]).unwrap(); - assert_eq!(r, Err(ApplyError::CantPay)); - } - - #[test] - fn successful_execution_gives_ok() { - let foreign_code = include_bytes!("../../runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm"); - let mut t = TestExternalities::::new_with_code(foreign_code, map![ - twox_128(&>::key_for(alice())).to_vec() => vec![111u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - twox_128(>::key()).to_vec() => vec![111u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32], - twox_128(>::key()).to_vec() => vec![0u8; 16], - twox_128(>::key()).to_vec() => vec![0u8; 16] - ]); - - let r = WasmExecutor::new().call(&mut t, 8, COMPACT_CODE, "Core_initialize_block", &vec![].and(&from_block_number(1u64))); - assert!(r.is_ok()); - let r = WasmExecutor::new().call(&mut t, 8, COMPACT_CODE, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).unwrap(); - let r = ApplyResult::decode(&mut &r[..]).unwrap(); - assert_eq!(r, Ok(ApplyOutcome::Success)); - - runtime_io::with_externalities(&mut t, || { - assert_eq!(Balances::total_balance(&alice()), 42); - assert_eq!(Balances::total_balance(&bob()), 69); - }); - } - - #[test] - fn full_native_block_import_works_with_changes_trie() { - let block1 = changes_trie_block(); - let block_data = block1.0; - let block = Block::decode(&mut &block_data[..]).unwrap(); - - let mut t = new_test_ext(COMPACT_CODE, true); - Executor::new(None).call::<_, NeverNativeValue, fn() -> _>( - &mut t, - "Core_execute_block", - &block.encode(), - true, - None, - ).0.unwrap(); - - assert!(t.storage_changes_root(Default::default(), 0).is_some()); - } - - #[test] - fn full_wasm_block_import_works_with_changes_trie() { - let block1 = changes_trie_block(); - - let mut t = new_test_ext(COMPACT_CODE, true); - WasmExecutor::new().call(&mut t, 8, COMPACT_CODE, "Core_execute_block", &block1.0).unwrap(); - - assert!(t.storage_changes_root(Default::default(), 0).is_some()); - } - - #[cfg(feature = "benchmarks")] - mod benches { - use super::*; - use test::Bencher; - - #[bench] - fn wasm_execute_block(b: &mut Bencher) { - let (block1, block2) = blocks(); - - b.iter(|| { - let mut t = new_test_ext(COMPACT_CODE, false); - WasmExecutor::new().call(&mut t, "Core_execute_block", &block1.0).unwrap(); - WasmExecutor::new().call(&mut t, "Core_execute_block", &block2.0).unwrap(); - }); - } - } + #[test] + fn deploying_wasm_contract_should_work() { + let transfer_code = wabt::wat2wasm(CODE_TRANSFER).unwrap(); + let transfer_ch = ::Hashing::hash(&transfer_code); + + let addr = ::DetermineContractAddress::contract_address_for( + &transfer_ch, + &[], + &charlie(), + ); + + let b = construct_block( + &mut new_test_ext(COMPACT_CODE, false), + 1, + GENESIS_HASH.into(), + vec![ + CheckedExtrinsic { + signed: None, + function: Call::Timestamp(timestamp::Call::set(42)), + }, + CheckedExtrinsic { + signed: Some((charlie(), 0)), + function: Call::Contract(contract::Call::put_code::( + 10_000, + transfer_code, + )), + }, + CheckedExtrinsic { + signed: Some((charlie(), 1)), + function: Call::Contract(contract::Call::create::( + 10, + 10_000, + transfer_ch, + Vec::new(), + )), + }, + CheckedExtrinsic { + signed: Some((charlie(), 2)), + function: Call::Contract(contract::Call::call::( + indices::address::Address::Id(addr.clone()), + 10, + 10_000, + vec![0x00, 0x01, 0x02, 0x03], + )), + }, + ], + ); + + let mut t = new_test_ext(COMPACT_CODE, false); + + WasmExecutor::new() + .call(&mut t, 8, COMPACT_CODE, "Core_execute_block", &b.0) + .unwrap(); + + runtime_io::with_externalities(&mut t, || { + // Verify that the contract constructor worked well and code of TRANSFER contract is actually deployed. + assert_eq!( + &contract::CodeHashOf::::get(addr).unwrap(), + &transfer_ch + ); + }); + } + + #[test] + fn wasm_big_block_import_fails() { + let mut t = new_test_ext(COMPACT_CODE, false); + + assert!(WasmExecutor::new() + .call( + &mut t, + 8, + COMPACT_CODE, + "Core_execute_block", + &big_block().0 + ) + .is_err()); + } + + #[test] + fn native_big_block_import_succeeds() { + let mut t = new_test_ext(COMPACT_CODE, false); + + Executor::new(None) + .call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "Core_execute_block", + &big_block().0, + true, + None, + ) + .0 + .unwrap(); + } + + #[test] + fn native_big_block_import_fails_on_fallback() { + let mut t = new_test_ext(COMPACT_CODE, false); + + assert!(Executor::new(None) + .call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "Core_execute_block", + &big_block().0, + false, + None, + ) + .0 + .is_err()); + } + + #[test] + fn panic_execution_gives_error() { + let foreign_code = include_bytes!( + "../../runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.wasm" + ); + let mut t = TestExternalities::::new_with_code( + foreign_code, + map![ + twox_128(&>::key_for(alice())).to_vec() => vec![69u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + twox_128(>::key()).to_vec() => vec![69u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32], + twox_128(>::key()).to_vec() => vec![70u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16] + ], + ); + + let r = WasmExecutor::new().call( + &mut t, + 8, + COMPACT_CODE, + "Core_initialize_block", + &vec![].and(&from_block_number(1u64)), + ); + assert!(r.is_ok()); + let r = WasmExecutor::new() + .call( + &mut t, + 8, + COMPACT_CODE, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt()), + ) + .unwrap(); + let r = ApplyResult::decode(&mut &r[..]).unwrap(); + assert_eq!(r, Err(ApplyError::CantPay)); + } + + #[test] + fn successful_execution_gives_ok() { + let foreign_code = include_bytes!( + "../../runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm" + ); + let mut t = TestExternalities::::new_with_code( + foreign_code, + map![ + twox_128(&>::key_for(alice())).to_vec() => vec![111u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + twox_128(>::key()).to_vec() => vec![111u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32], + twox_128(>::key()).to_vec() => vec![0u8; 16], + twox_128(>::key()).to_vec() => vec![0u8; 16] + ], + ); + + let r = WasmExecutor::new().call( + &mut t, + 8, + COMPACT_CODE, + "Core_initialize_block", + &vec![].and(&from_block_number(1u64)), + ); + assert!(r.is_ok()); + let r = WasmExecutor::new() + .call( + &mut t, + 8, + COMPACT_CODE, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt()), + ) + .unwrap(); + let r = ApplyResult::decode(&mut &r[..]).unwrap(); + assert_eq!(r, Ok(ApplyOutcome::Success)); + + runtime_io::with_externalities(&mut t, || { + assert_eq!(Balances::total_balance(&alice()), 42); + assert_eq!(Balances::total_balance(&bob()), 69); + }); + } + + #[test] + fn full_native_block_import_works_with_changes_trie() { + let block1 = changes_trie_block(); + let block_data = block1.0; + let block = Block::decode(&mut &block_data[..]).unwrap(); + + let mut t = new_test_ext(COMPACT_CODE, true); + Executor::new(None) + .call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "Core_execute_block", + &block.encode(), + true, + None, + ) + .0 + .unwrap(); + + assert!(t.storage_changes_root(Default::default(), 0).is_some()); + } + + #[test] + fn full_wasm_block_import_works_with_changes_trie() { + let block1 = changes_trie_block(); + + let mut t = new_test_ext(COMPACT_CODE, true); + WasmExecutor::new() + .call(&mut t, 8, COMPACT_CODE, "Core_execute_block", &block1.0) + .unwrap(); + + assert!(t.storage_changes_root(Default::default(), 0).is_some()); + } + + #[cfg(feature = "benchmarks")] + mod benches { + use super::*; + use test::Bencher; + + #[bench] + fn wasm_execute_block(b: &mut Bencher) { + let (block1, block2) = blocks(); + + b.iter(|| { + let mut t = new_test_ext(COMPACT_CODE, false); + WasmExecutor::new() + .call(&mut t, "Core_execute_block", &block1.0) + .unwrap(); + WasmExecutor::new() + .call(&mut t, "Core_execute_block", &block2.0) + .unwrap(); + }); + } + } } diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs index 0d8906c47d..c5a86eb227 100644 --- a/node/primitives/src/lib.rs +++ b/node/primitives/src/lib.rs @@ -17,12 +17,13 @@ //! Low-level types used throughout the Substrate code. #![warn(missing_docs)] - #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), feature(alloc))] use runtime_primitives::{ - generic, traits::{Verify, BlakeTwo256}, OpaqueExtrinsic, AnySignature + generic, + traits::{BlakeTwo256, Verify}, + AnySignature, OpaqueExtrinsic, }; /// An index to a block. @@ -59,8 +60,12 @@ pub type Hash = primitives::H256; pub type Timestamp = u64; /// Header type. -/// -pub type Header = generic::Header>; +/// +pub type Header = generic::Header< + BlockNumber, + BlakeTwo256, + generic::DigestItem, +>; /// Block type. pub type Block = generic::Block; /// Block ID. diff --git a/node/runtime/src/lib.rs b/node/runtime/src/lib.rs index 8be70a6bcf..94f83146d3 100644 --- a/node/runtime/src/lib.rs +++ b/node/runtime/src/lib.rs @@ -18,180 +18,181 @@ #![cfg_attr(not(feature = "std"), no_std)] // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. -#![recursion_limit="256"] +#![recursion_limit = "256"] -use rstd::prelude::*; -use support::construct_runtime; -use substrate_primitives::u32_trait::{_2, _4}; -use node_primitives::{ - AccountId, AccountIndex, Balance, BlockNumber, Hash, Index, AuthorityId, Signature, AuthoritySignature +use client::{ + block_builder::api::{self as block_builder_api, CheckInherentsResult, InherentData}, + impl_runtime_apis, runtime_api as client_api, }; +#[cfg(feature = "std")] +use council::seats as council_seats; +use council::{motions as council_motions, voting as council_voting}; use grandpa::fg_primitives::{self, ScheduledChange}; -use client::{ - block_builder::api::{self as block_builder_api, InherentData, CheckInherentsResult}, - runtime_api as client_api, impl_runtime_apis +use node_primitives::{ + AccountId, AccountIndex, AuthorityId, AuthoritySignature, Balance, BlockNumber, Hash, Index, + Signature, }; -use runtime_primitives::{ApplyResult, generic, create_runtime_str}; -use runtime_primitives::transaction_validity::TransactionValidity; +use rstd::prelude::*; use runtime_primitives::traits::{ - BlakeTwo256, Block as BlockT, DigestFor, NumberFor, StaticLookup, CurrencyToVoteHandler, - AuthorityIdFor, + AuthorityIdFor, BlakeTwo256, Block as BlockT, CurrencyToVoteHandler, DigestFor, NumberFor, + StaticLookup, }; -use version::RuntimeVersion; -use council::{motions as council_motions, voting as council_voting}; -#[cfg(feature = "std")] -use council::seats as council_seats; +use runtime_primitives::transaction_validity::TransactionValidity; +use runtime_primitives::{create_runtime_str, generic, ApplyResult}; +use substrate_primitives::u32_trait::{_2, _4}; +use substrate_primitives::OpaqueMetadata; +use support::construct_runtime; #[cfg(any(feature = "std", test))] use version::NativeVersion; -use substrate_primitives::OpaqueMetadata; +use version::RuntimeVersion; +pub use balances::Call as BalancesCall; +pub use consensus::Call as ConsensusCall; #[cfg(any(feature = "std", test))] pub use runtime_primitives::BuildStorage; -pub use consensus::Call as ConsensusCall; -pub use timestamp::Call as TimestampCall; -pub use balances::Call as BalancesCall; -pub use runtime_primitives::{Permill, Perbill}; -pub use support::StorageValue; +pub use runtime_primitives::{Perbill, Permill}; pub use staking::StakerStatus; +pub use support::StorageValue; +pub use timestamp::Call as TimestampCall; /// Runtime version. pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("node"), - impl_name: create_runtime_str!("substrate-node"), - authoring_version: 10, - spec_version: 53, - impl_version: 53, - apis: RUNTIME_API_VERSIONS, + spec_name: create_runtime_str!("node"), + impl_name: create_runtime_str!("substrate-node"), + authoring_version: 10, + spec_version: 53, + impl_version: 53, + apis: RUNTIME_API_VERSIONS, }; /// Native version. #[cfg(any(feature = "std", test))] pub fn native_version() -> NativeVersion { - NativeVersion { - runtime_version: VERSION, - can_author_with: Default::default(), - } + NativeVersion { + runtime_version: VERSION, + can_author_with: Default::default(), + } } impl system::Trait for Runtime { - type Origin = Origin; - type Index = Index; - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hashing = BlakeTwo256; - type Digest = generic::Digest; - type AccountId = AccountId; - type Lookup = Indices; - type Header = generic::Header; - type Event = Event; - type Log = Log; + type Origin = Origin; + type Index = Index; + type BlockNumber = BlockNumber; + type Hash = Hash; + type Hashing = BlakeTwo256; + type Digest = generic::Digest; + type AccountId = AccountId; + type Lookup = Indices; + type Header = generic::Header; + type Event = Event; + type Log = Log; } impl aura::Trait for Runtime { - type HandleReport = aura::StakingSlasher; + type HandleReport = aura::StakingSlasher; } impl indices::Trait for Runtime { - type AccountIndex = AccountIndex; - type IsDeadAccount = Balances; - type ResolveHint = indices::SimpleResolveHint; - type Event = Event; + type AccountIndex = AccountIndex; + type IsDeadAccount = Balances; + type ResolveHint = indices::SimpleResolveHint; + type Event = Event; } impl balances::Trait for Runtime { - type Balance = Balance; - type OnFreeBalanceZero = ((Staking, Contract), Session); - type OnNewAccount = Indices; - type Event = Event; - type TransactionPayment = (); - type DustRemoval = (); - type TransferPayment = (); + type Balance = Balance; + type OnFreeBalanceZero = ((Staking, Contract), Session); + type OnNewAccount = Indices; + type Event = Event; + type TransactionPayment = (); + type DustRemoval = (); + type TransferPayment = (); } impl consensus::Trait for Runtime { - type Log = Log; - type SessionKey = AuthorityId; + type Log = Log; + type SessionKey = AuthorityId; - // The Aura module handles offline-reports internally - // rather than using an explicit report system. - type InherentOfflineReport = (); + // The Aura module handles offline-reports internally + // rather than using an explicit report system. + type InherentOfflineReport = (); } impl timestamp::Trait for Runtime { - type Moment = u64; - type OnTimestampSet = Aura; + type Moment = u64; + type OnTimestampSet = Aura; } impl session::Trait for Runtime { - type ConvertAccountIdToSessionKey = (); - type OnSessionChange = (Staking, grandpa::SyncedAuthorities); - type Event = Event; + type ConvertAccountIdToSessionKey = (); + type OnSessionChange = (Staking, grandpa::SyncedAuthorities); + type Event = Event; } impl staking::Trait for Runtime { - type Currency = balances::Module; - type CurrencyToVote = CurrencyToVoteHandler; - type OnRewardMinted = Treasury; - type Event = Event; - type Slash = (); - type Reward = (); + type Currency = balances::Module; + type CurrencyToVote = CurrencyToVoteHandler; + type OnRewardMinted = Treasury; + type Event = Event; + type Slash = (); + type Reward = (); } impl democracy::Trait for Runtime { - type Currency = balances::Module; - type Proposal = Call; - type Event = Event; + type Currency = balances::Module; + type Proposal = Call; + type Event = Event; } impl council::Trait for Runtime { - type Event = Event; - type BadPresentation = (); - type BadReaper = (); + type Event = Event; + type BadPresentation = (); + type BadReaper = (); } impl council::voting::Trait for Runtime { - type Event = Event; + type Event = Event; } impl council::motions::Trait for Runtime { - type Origin = Origin; - type Proposal = Call; - type Event = Event; + type Origin = Origin; + type Proposal = Call; + type Event = Event; } impl treasury::Trait for Runtime { - type Currency = balances::Module; - type ApproveOrigin = council_motions::EnsureMembers<_4>; - type RejectOrigin = council_motions::EnsureMembers<_2>; - type Event = Event; - type MintedForSpending = (); - type ProposalRejection = (); + type Currency = balances::Module; + type ApproveOrigin = council_motions::EnsureMembers<_4>; + type RejectOrigin = council_motions::EnsureMembers<_2>; + type Event = Event; + type MintedForSpending = (); + type ProposalRejection = (); } impl contract::Trait for Runtime { - type Currency = balances::Module; - type Call = Call; - type Event = Event; - type Gas = u64; - type DetermineContractAddress = contract::SimpleAddressDeterminator; - type ComputeDispatchFee = contract::DefaultDispatchFeeComputor; - type TrieIdGenerator = contract::TrieIdFromParentCounter; - type GasPayment = (); + type Currency = balances::Module; + type Call = Call; + type Event = Event; + type Gas = u64; + type DetermineContractAddress = contract::SimpleAddressDeterminator; + type ComputeDispatchFee = contract::DefaultDispatchFeeComputor; + type TrieIdGenerator = contract::TrieIdFromParentCounter; + type GasPayment = (); } impl sudo::Trait for Runtime { - type Event = Event; - type Proposal = Call; + type Event = Event; + type Proposal = Call; } impl grandpa::Trait for Runtime { - type SessionKey = AuthorityId; - type Log = Log; - type Event = Event; + type SessionKey = AuthorityId; + type Log = Log; + type Event = Event; } impl finality_tracker::Trait for Runtime { - type OnFinalizationStalled = grandpa::SyncedAuthorities; + type OnFinalizationStalled = grandpa::SyncedAuthorities; } construct_runtime!( @@ -232,110 +233,112 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; /// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = generic::UncheckedMortalCompactExtrinsic; +pub type UncheckedExtrinsic = + generic::UncheckedMortalCompactExtrinsic; /// Extrinsic type that has already been checked. pub type CheckedExtrinsic = generic::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. -pub type Executive = executive::Executive, Balances, AllModules>; +pub type Executive = + executive::Executive, Balances, AllModules>; impl_runtime_apis! { - impl client_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) { - Executive::initialize_block(header) - } - } - - impl client_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - Runtime::metadata().into() - } - } - - impl block_builder_api::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents(block: Block, data: InherentData) -> CheckInherentsResult { - data.check_extrinsics(&block) - } - - fn random_seed() -> ::Hash { - System::random_seed() - } - } - - impl client_api::TaggedTransactionQueue for Runtime { - fn validate_transaction(tx: ::Extrinsic) -> TransactionValidity { - Executive::validate_transaction(tx) - } - } - - impl offchain_primitives::OffchainWorkerApi for Runtime { - fn offchain_worker(number: NumberFor) { - Executive::offchain_worker(number) - } - } - - impl fg_primitives::GrandpaApi for Runtime { - fn grandpa_pending_change(digest: &DigestFor) - -> Option>> - { - for log in digest.logs.iter().filter_map(|l| match l { - Log(InternalLog::grandpa(grandpa_signal)) => Some(grandpa_signal), - _ => None - }) { - if let Some(change) = Grandpa::scrape_digest_change(log) { - return Some(change); - } - } - None - } - - fn grandpa_forced_change(digest: &DigestFor) - -> Option<(NumberFor, ScheduledChange>)> - { - for log in digest.logs.iter().filter_map(|l| match l { - Log(InternalLog::grandpa(grandpa_signal)) => Some(grandpa_signal), - _ => None - }) { - if let Some(change) = Grandpa::scrape_digest_forced_change(log) { - return Some(change); - } - } - None - } - - fn grandpa_authorities() -> Vec<(AuthorityId, u64)> { - Grandpa::grandpa_authorities() - } - } - - impl consensus_aura::AuraApi for Runtime { - fn slot_duration() -> u64 { - Aura::slot_duration() - } - } - - impl consensus_authorities::AuthoritiesApi for Runtime { - fn authorities() -> Vec> { - Consensus::authorities() - } - } + impl client_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl client_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + Runtime::metadata().into() + } + } + + impl block_builder_api::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents(block: Block, data: InherentData) -> CheckInherentsResult { + data.check_extrinsics(&block) + } + + fn random_seed() -> ::Hash { + System::random_seed() + } + } + + impl client_api::TaggedTransactionQueue for Runtime { + fn validate_transaction(tx: ::Extrinsic) -> TransactionValidity { + Executive::validate_transaction(tx) + } + } + + impl offchain_primitives::OffchainWorkerApi for Runtime { + fn offchain_worker(number: NumberFor) { + Executive::offchain_worker(number) + } + } + + impl fg_primitives::GrandpaApi for Runtime { + fn grandpa_pending_change(digest: &DigestFor) + -> Option>> + { + for log in digest.logs.iter().filter_map(|l| match l { + Log(InternalLog::grandpa(grandpa_signal)) => Some(grandpa_signal), + _ => None + }) { + if let Some(change) = Grandpa::scrape_digest_change(log) { + return Some(change); + } + } + None + } + + fn grandpa_forced_change(digest: &DigestFor) + -> Option<(NumberFor, ScheduledChange>)> + { + for log in digest.logs.iter().filter_map(|l| match l { + Log(InternalLog::grandpa(grandpa_signal)) => Some(grandpa_signal), + _ => None + }) { + if let Some(change) = Grandpa::scrape_digest_forced_change(log) { + return Some(change); + } + } + None + } + + fn grandpa_authorities() -> Vec<(AuthorityId, u64)> { + Grandpa::grandpa_authorities() + } + } + + impl consensus_aura::AuraApi for Runtime { + fn slot_duration() -> u64 { + Aura::slot_duration() + } + } + + impl consensus_authorities::AuthoritiesApi for Runtime { + fn authorities() -> Vec> { + Consensus::authorities() + } + } } diff --git a/node/src/main.rs b/node/src/main.rs index 5ff0d7ff3b..d894568689 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -27,33 +27,38 @@ use std::cell::RefCell; // handles ctrl-c struct Exit; impl cli::IntoExit for Exit { - type Exit = future::MapErr, fn(oneshot::Canceled) -> ()>; - fn into_exit(self) -> Self::Exit { - // can't use signal directly here because CtrlC takes only `Fn`. - let (exit_send, exit) = oneshot::channel(); - - let exit_send_cell = RefCell::new(Some(exit_send)); - ctrlc::set_handler(move || { - if let Some(exit_send) = exit_send_cell.try_borrow_mut().expect("signal handler not reentrant; qed").take() { - exit_send.send(()).expect("Error sending exit notification"); - } - }).expect("Error setting Ctrl-C handler"); - - exit.map_err(drop) - } + type Exit = future::MapErr, fn(oneshot::Canceled) -> ()>; + fn into_exit(self) -> Self::Exit { + // can't use signal directly here because CtrlC takes only `Fn`. + let (exit_send, exit) = oneshot::channel(); + + let exit_send_cell = RefCell::new(Some(exit_send)); + ctrlc::set_handler(move || { + if let Some(exit_send) = exit_send_cell + .try_borrow_mut() + .expect("signal handler not reentrant; qed") + .take() + { + exit_send.send(()).expect("Error sending exit notification"); + } + }) + .expect("Error setting Ctrl-C handler"); + + exit.map_err(drop) + } } error_chain::quick_main!(run); fn run() -> cli::error::Result<()> { - let version = VersionInfo { - name: "Substrate Node", - commit: env!("VERGEN_SHA_SHORT"), - version: env!("CARGO_PKG_VERSION"), - executable_name: "substrate", - author: "Parity Technologies ", - description: "Generic substrate node", - support_url: "https://github.com/paritytech/substrate/issues/new", - }; - cli::run(::std::env::args(), Exit, version) + let version = VersionInfo { + name: "Substrate Node", + commit: env!("VERGEN_SHA_SHORT"), + version: env!("CARGO_PKG_VERSION"), + executable_name: "substrate", + author: "Parity Technologies ", + description: "Generic substrate node", + support_url: "https://github.com/paritytech/substrate/issues/new", + }; + cli::run(::std::env::args(), Exit, version) } diff --git a/srml/assets/src/lib.rs b/srml/assets/src/lib.rs index 9caa3dc20d..893945a4c0 100644 --- a/srml/assets/src/lib.rs +++ b/srml/assets/src/lib.rs @@ -19,67 +19,69 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use srml_support::{StorageValue, StorageMap, Parameter, decl_module, decl_event, decl_storage, ensure}; -use primitives::traits::{Member, SimpleArithmetic, Zero, StaticLookup}; +use primitives::traits::{Member, SimpleArithmetic, StaticLookup, Zero}; +use srml_support::{ + decl_event, decl_module, decl_storage, ensure, Parameter, StorageMap, StorageValue, +}; use system::ensure_signed; pub trait Trait: system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The overarching event type. + type Event: From> + Into<::Event>; - /// The units in which we record balances. - type Balance: Member + Parameter + SimpleArithmetic + Default + Copy; + /// The units in which we record balances. + type Balance: Member + Parameter + SimpleArithmetic + Default + Copy; } type AssetId = u32; decl_module! { - // Simple declaration of the `Module` type. Lets the macro know what its working on. - pub struct Module for enum Call where origin: T::Origin { - fn deposit_event() = default; - /// Issue a new class of fungible assets. There are, and will only ever be, `total` - /// such assets and they'll all belong to the `origin` initially. It will have an - /// identifier `AssetId` instance: this will be specified in the `Issued` event. - fn issue(origin, #[compact] total: T::Balance) { - let origin = ensure_signed(origin)?; - - let id = Self::next_asset_id(); - >::mutate(|id| *id += 1); - - >::insert((id, origin.clone()), total); - >::insert(id, total); - - Self::deposit_event(RawEvent::Issued(id, origin, total)); - } - - /// Move some assets from one holder to another. - fn transfer(origin, - #[compact] id: AssetId, - target: ::Source, - #[compact] amount: T::Balance - ) { - let origin = ensure_signed(origin)?; - let origin_account = (id, origin.clone()); - let origin_balance = >::get(&origin_account); - let target = T::Lookup::lookup(target)?; - ensure!(!amount.is_zero(), "transfer amount should be non-zero"); - ensure!(origin_balance >= amount, "origin account balance must be greater than or equal to the transfer amount"); - - Self::deposit_event(RawEvent::Transferred(id, origin, target.clone(), amount)); - >::insert(origin_account, origin_balance - amount); - >::mutate((id, target), |balance| *balance += amount); - } - - /// Destroy any assets of `id` owned by `origin`. - fn destroy(origin, #[compact] id: AssetId) { - let origin = ensure_signed(origin)?; - let balance = >::take((id, origin.clone())); - ensure!(!balance.is_zero(), "origin balance should be non-zero"); - - >::mutate(id, |total_supply| *total_supply -= balance); - Self::deposit_event(RawEvent::Destroyed(id, origin, balance)); - } - } + // Simple declaration of the `Module` type. Lets the macro know what its working on. + pub struct Module for enum Call where origin: T::Origin { + fn deposit_event() = default; + /// Issue a new class of fungible assets. There are, and will only ever be, `total` + /// such assets and they'll all belong to the `origin` initially. It will have an + /// identifier `AssetId` instance: this will be specified in the `Issued` event. + fn issue(origin, #[compact] total: T::Balance) { + let origin = ensure_signed(origin)?; + + let id = Self::next_asset_id(); + >::mutate(|id| *id += 1); + + >::insert((id, origin.clone()), total); + >::insert(id, total); + + Self::deposit_event(RawEvent::Issued(id, origin, total)); + } + + /// Move some assets from one holder to another. + fn transfer(origin, + #[compact] id: AssetId, + target: ::Source, + #[compact] amount: T::Balance + ) { + let origin = ensure_signed(origin)?; + let origin_account = (id, origin.clone()); + let origin_balance = >::get(&origin_account); + let target = T::Lookup::lookup(target)?; + ensure!(!amount.is_zero(), "transfer amount should be non-zero"); + ensure!(origin_balance >= amount, "origin account balance must be greater than or equal to the transfer amount"); + + Self::deposit_event(RawEvent::Transferred(id, origin, target.clone(), amount)); + >::insert(origin_account, origin_balance - amount); + >::mutate((id, target), |balance| *balance += amount); + } + + /// Destroy any assets of `id` owned by `origin`. + fn destroy(origin, #[compact] id: AssetId) { + let origin = ensure_signed(origin)?; + let balance = >::take((id, origin.clone())); + ensure!(!balance.is_zero(), "origin balance should be non-zero"); + + >::mutate(id, |total_supply| *total_supply -= balance); + Self::deposit_event(RawEvent::Destroyed(id, origin, balance)); + } + } } decl_event!( @@ -94,163 +96,179 @@ decl_event!( ); decl_storage! { - trait Store for Module as Assets { - /// The number of units of assets held by any given account. - Balances: map (AssetId, T::AccountId) => T::Balance; - /// The next asset identifier up for grabs. - NextAssetId get(next_asset_id): AssetId; - /// The total unit supply of an asset - TotalSupply: map AssetId => T::Balance; - } + trait Store for Module as Assets { + /// The number of units of assets held by any given account. + Balances: map (AssetId, T::AccountId) => T::Balance; + /// The next asset identifier up for grabs. + NextAssetId get(next_asset_id): AssetId; + /// The total unit supply of an asset + TotalSupply: map AssetId => T::Balance; + } } // The main implementation block for the module. impl Module { - // Public immutables + // Public immutables - /// Get the asset `id` balance of `who`. - pub fn balance(id: AssetId, who: T::AccountId) -> T::Balance { - >::get((id, who)) - } + /// Get the asset `id` balance of `who`. + pub fn balance(id: AssetId, who: T::AccountId) -> T::Balance { + >::get((id, who)) + } - // Get the total supply of an asset `id` - pub fn total_supply(id: AssetId) -> T::Balance { - >::get(id) - } + // Get the total supply of an asset `id` + pub fn total_supply(id: AssetId) -> T::Balance { + >::get(id) + } } #[cfg(test)] mod tests { - use super::*; - - use runtime_io::with_externalities; - use srml_support::{impl_outer_origin, assert_ok, assert_noop}; - use substrate_primitives::{H256, Blake2Hasher}; - // The testing primitives are very useful for avoiding having to work with signatures - // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. - use primitives::{ - BuildStorage, - traits::{BlakeTwo256, IdentityLookup}, - testing::{Digest, DigestItem, Header} - }; - - impl_outer_origin! { - pub enum Origin for Test {} - } - - // For testing the module, we construct most of a mock runtime. This means - // first constructing a configuration type (`Test`) which `impl`s each of the - // configuration traits of modules we want to use. - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - impl system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type Digest = Digest; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type Log = DigestItem; - } - impl Trait for Test { - type Event = (); - type Balance = u64; - } - type Assets = Module; - - // This function basically just builds a genesis storage key/value store according to - // our desired mockup. - fn new_test_ext() -> runtime_io::TestExternalities { - system::GenesisConfig::::default().build_storage().unwrap().0.into() - } - - #[test] - fn issuing_asset_units_to_issuer_should_work() { - with_externalities(&mut new_test_ext(), || { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - }); - } - - #[test] - fn querying_total_supply_should_work() { - with_externalities(&mut new_test_ext(), || { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); - assert_eq!(Assets::balance(0, 1), 50); - assert_eq!(Assets::balance(0, 2), 50); - assert_ok!(Assets::transfer(Origin::signed(2), 0, 3, 31)); - assert_eq!(Assets::balance(0, 1), 50); - assert_eq!(Assets::balance(0, 2), 19); - assert_eq!(Assets::balance(0, 3), 31); - assert_ok!(Assets::destroy(Origin::signed(3), 0)); - assert_eq!(Assets::total_supply(0), 69); - }); - } - - #[test] - fn transferring_amount_above_available_balance_should_work() { - with_externalities(&mut new_test_ext(), || { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); - assert_eq!(Assets::balance(0, 1), 50); - assert_eq!(Assets::balance(0, 2), 50); - }); - } - - #[test] - fn transferring_amount_less_than_available_balance_should_not_work() { - with_externalities(&mut new_test_ext(), || { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); - assert_eq!(Assets::balance(0, 1), 50); - assert_eq!(Assets::balance(0, 2), 50); - assert_ok!(Assets::destroy(Origin::signed(1), 0)); - assert_eq!(Assets::balance(0, 1), 0); - assert_noop!(Assets::transfer(Origin::signed(1), 0, 1, 50), "origin account balance must be greater than or equal to the transfer amount"); - }); - } - - #[test] - fn transferring_less_than_one_unit_should_not_work() { - with_externalities(&mut new_test_ext(), || { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 0), "transfer amount should be non-zero"); - }); - } - - #[test] - fn transferring_more_units_than_total_supply_should_not_work() { - with_externalities(&mut new_test_ext(), || { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 101), "origin account balance must be greater than or equal to the transfer amount"); - }); - } - - #[test] - fn destroying_asset_balance_with_positive_balance_should_work() { - with_externalities(&mut new_test_ext(), || { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::destroy(Origin::signed(1), 0)); - }); - } - - #[test] - fn destroying_asset_balance_with_zero_balance_should_not_work() { - with_externalities(&mut new_test_ext(), || { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 2), 0); - assert_noop!(Assets::destroy(Origin::signed(2), 0), "origin balance should be non-zero"); - }); - } + use super::*; + + use runtime_io::with_externalities; + use srml_support::{assert_noop, assert_ok, impl_outer_origin}; + use substrate_primitives::{Blake2Hasher, H256}; + // The testing primitives are very useful for avoiding having to work with signatures + // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. + use primitives::{ + testing::{Digest, DigestItem, Header}, + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, + }; + + impl_outer_origin! { + pub enum Origin for Test {} + } + + // For testing the module, we construct most of a mock runtime. This means + // first constructing a configuration type (`Test`) which `impl`s each of the + // configuration traits of modules we want to use. + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + impl system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type Digest = Digest; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type Log = DigestItem; + } + impl Trait for Test { + type Event = (); + type Balance = u64; + } + type Assets = Module; + + // This function basically just builds a genesis storage key/value store according to + // our desired mockup. + fn new_test_ext() -> runtime_io::TestExternalities { + system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0 + .into() + } + + #[test] + fn issuing_asset_units_to_issuer_should_work() { + with_externalities(&mut new_test_ext(), || { + assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_eq!(Assets::balance(0, 1), 100); + }); + } + + #[test] + fn querying_total_supply_should_work() { + with_externalities(&mut new_test_ext(), || { + assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Assets::balance(0, 1), 50); + assert_eq!(Assets::balance(0, 2), 50); + assert_ok!(Assets::transfer(Origin::signed(2), 0, 3, 31)); + assert_eq!(Assets::balance(0, 1), 50); + assert_eq!(Assets::balance(0, 2), 19); + assert_eq!(Assets::balance(0, 3), 31); + assert_ok!(Assets::destroy(Origin::signed(3), 0)); + assert_eq!(Assets::total_supply(0), 69); + }); + } + + #[test] + fn transferring_amount_above_available_balance_should_work() { + with_externalities(&mut new_test_ext(), || { + assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Assets::balance(0, 1), 50); + assert_eq!(Assets::balance(0, 2), 50); + }); + } + + #[test] + fn transferring_amount_less_than_available_balance_should_not_work() { + with_externalities(&mut new_test_ext(), || { + assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Assets::balance(0, 1), 50); + assert_eq!(Assets::balance(0, 2), 50); + assert_ok!(Assets::destroy(Origin::signed(1), 0)); + assert_eq!(Assets::balance(0, 1), 0); + assert_noop!( + Assets::transfer(Origin::signed(1), 0, 1, 50), + "origin account balance must be greater than or equal to the transfer amount" + ); + }); + } + + #[test] + fn transferring_less_than_one_unit_should_not_work() { + with_externalities(&mut new_test_ext(), || { + assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_noop!( + Assets::transfer(Origin::signed(1), 0, 2, 0), + "transfer amount should be non-zero" + ); + }); + } + + #[test] + fn transferring_more_units_than_total_supply_should_not_work() { + with_externalities(&mut new_test_ext(), || { + assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_noop!( + Assets::transfer(Origin::signed(1), 0, 2, 101), + "origin account balance must be greater than or equal to the transfer amount" + ); + }); + } + + #[test] + fn destroying_asset_balance_with_positive_balance_should_work() { + with_externalities(&mut new_test_ext(), || { + assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::destroy(Origin::signed(1), 0)); + }); + } + + #[test] + fn destroying_asset_balance_with_zero_balance_should_not_work() { + with_externalities(&mut new_test_ext(), || { + assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_eq!(Assets::balance(0, 2), 0); + assert_noop!( + Assets::destroy(Origin::signed(2), 0), + "origin balance should be non-zero" + ); + }); + } } diff --git a/srml/aura/src/lib.rs b/srml/aura/src/lib.rs index 344ea64869..85b31c0e06 100644 --- a/srml/aura/src/lib.rs +++ b/srml/aura/src/lib.rs @@ -20,17 +20,17 @@ pub use timestamp; -use rstd::{result, prelude::*}; -use srml_support::storage::StorageValue; -use srml_support::{decl_storage, decl_module}; +use inherents::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent, RuntimeString}; +#[cfg(feature = "std")] +use inherents::{InherentDataProviders, ProvideInherentData}; +use parity_codec::{Decode, Encode}; use primitives::traits::{As, Zero}; +use rstd::{prelude::*, result}; +use srml_support::storage::StorageValue; +use srml_support::{decl_module, decl_storage}; use timestamp::OnTimestampSet; #[cfg(feature = "std")] use timestamp::TimestampInherentData; -use parity_codec::{Encode, Decode}; -use inherents::{RuntimeString, InherentIdentifier, InherentData, ProvideInherent, MakeFatalError}; -#[cfg(feature = "std")] -use inherents::{InherentDataProviders, ProvideInherentData}; mod mock; mod tests; @@ -43,205 +43,208 @@ pub type InherentType = u64; /// Auxiliary trait to extract aura inherent data. pub trait AuraInherentData { - /// Get aura inherent data. - fn aura_inherent_data(&self) -> result::Result; - /// Replace aura inherent data. - fn aura_replace_inherent_data(&mut self, new: InherentType); + /// Get aura inherent data. + fn aura_inherent_data(&self) -> result::Result; + /// Replace aura inherent data. + fn aura_replace_inherent_data(&mut self, new: InherentType); } impl AuraInherentData for InherentData { - fn aura_inherent_data(&self) -> result::Result { - self.get_data(&INHERENT_IDENTIFIER) - .and_then(|r| r.ok_or_else(|| "Aura inherent data not found".into())) - } + fn aura_inherent_data(&self) -> result::Result { + self.get_data(&INHERENT_IDENTIFIER) + .and_then(|r| r.ok_or_else(|| "Aura inherent data not found".into())) + } - fn aura_replace_inherent_data(&mut self, new: InherentType) { - self.replace_data(INHERENT_IDENTIFIER, &new); - } + fn aura_replace_inherent_data(&mut self, new: InherentType) { + self.replace_data(INHERENT_IDENTIFIER, &new); + } } /// Provides the slot duration inherent data for `Aura`. #[cfg(feature = "std")] pub struct InherentDataProvider { - slot_duration: u64, + slot_duration: u64, } #[cfg(feature = "std")] impl InherentDataProvider { - pub fn new(slot_duration: u64) -> Self { - Self { - slot_duration - } - } + pub fn new(slot_duration: u64) -> Self { + Self { slot_duration } + } } #[cfg(feature = "std")] impl ProvideInherentData for InherentDataProvider { - fn on_register( - &self, - providers: &InherentDataProviders, - ) -> result::Result<(), RuntimeString> { - if !providers.has_provider(×tamp::INHERENT_IDENTIFIER) { - // Add the timestamp inherent data provider, as we require it. - providers.register_provider(timestamp::InherentDataProvider) - } else { - Ok(()) - } - } - - fn inherent_identifier(&self) -> &'static inherents::InherentIdentifier { - &INHERENT_IDENTIFIER - } - - fn provide_inherent_data( - &self, - inherent_data: &mut InherentData, - ) -> result::Result<(), RuntimeString> { - let timestamp = inherent_data.timestamp_inherent_data()?; - let slot_num = timestamp / self.slot_duration; - inherent_data.put_data(INHERENT_IDENTIFIER, &slot_num) - } - - fn error_to_string(&self, error: &[u8]) -> Option { - RuntimeString::decode(&mut &error[..]).map(Into::into) - } + fn on_register(&self, providers: &InherentDataProviders) -> result::Result<(), RuntimeString> { + if !providers.has_provider(×tamp::INHERENT_IDENTIFIER) { + // Add the timestamp inherent data provider, as we require it. + providers.register_provider(timestamp::InherentDataProvider) + } else { + Ok(()) + } + } + + fn inherent_identifier(&self) -> &'static inherents::InherentIdentifier { + &INHERENT_IDENTIFIER + } + + fn provide_inherent_data( + &self, + inherent_data: &mut InherentData, + ) -> result::Result<(), RuntimeString> { + let timestamp = inherent_data.timestamp_inherent_data()?; + let slot_num = timestamp / self.slot_duration; + inherent_data.put_data(INHERENT_IDENTIFIER, &slot_num) + } + + fn error_to_string(&self, error: &[u8]) -> Option { + RuntimeString::decode(&mut &error[..]).map(Into::into) + } } /// Something which can handle Aura consensus reports. pub trait HandleReport { - fn handle_report(report: AuraReport); + fn handle_report(report: AuraReport); } impl HandleReport for () { - fn handle_report(_report: AuraReport) { } + fn handle_report(_report: AuraReport) {} } pub trait Trait: timestamp::Trait { - /// The logic for handling reports. - type HandleReport: HandleReport; + /// The logic for handling reports. + type HandleReport: HandleReport; } decl_storage! { - trait Store for Module as Aura { - // The last timestamp. - LastTimestamp get(last) build(|_| T::Moment::sa(0)): T::Moment; - } + trait Store for Module as Aura { + // The last timestamp. + LastTimestamp get(last) build(|_| T::Moment::sa(0)): T::Moment; + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { } + pub struct Module for enum Call where origin: T::Origin { } } /// A report of skipped authorities in aura. #[derive(Clone, Encode, Decode, PartialEq, Eq)] #[cfg_attr(feature = "std", derive(Debug))] pub struct AuraReport { - // The first skipped slot. - start_slot: usize, - // The number of times authorities were skipped. - skipped: usize, + // The first skipped slot. + start_slot: usize, + // The number of times authorities were skipped. + skipped: usize, } impl AuraReport { - /// Call the closure with (validator_indices, punishment_count) for each - /// validator to punish. - pub fn punish(&self, validator_count: usize, mut punish_with: F) - where F: FnMut(usize, usize) - { - // If all validators have been skipped, then it implies some sort of - // systematic problem common to all rather than a minority of validators - // unfulfilling their specific duties. In this case, it doesn't make - // sense to punish anyone, so we guard against it. - if self.skipped < validator_count { - for index in 0..self.skipped { - punish_with((self.start_slot + index) % validator_count, 1); - } - } - } + /// Call the closure with (validator_indices, punishment_count) for each + /// validator to punish. + pub fn punish(&self, validator_count: usize, mut punish_with: F) + where + F: FnMut(usize, usize), + { + // If all validators have been skipped, then it implies some sort of + // systematic problem common to all rather than a minority of validators + // unfulfilling their specific duties. In this case, it doesn't make + // sense to punish anyone, so we guard against it. + if self.skipped < validator_count { + for index in 0..self.skipped { + punish_with((self.start_slot + index) % validator_count, 1); + } + } + } } impl Module { - /// Determine the Aura slot-duration based on the timestamp module configuration. - pub fn slot_duration() -> u64 { - // we double the minimum block-period so each author can always propose within - // the majority of their slot. - >::minimum_period().as_().saturating_mul(2) - } - - fn on_timestamp_set(now: T::Moment, slot_duration: T::Moment) { - let last = Self::last(); - ::LastTimestamp::put(now.clone()); - - if last == T::Moment::zero() { - return; - } - - assert!(slot_duration > T::Moment::zero(), "Aura slot duration cannot be zero."); - - let last_slot = last / slot_duration.clone(); - let first_skipped = last_slot.clone() + T::Moment::sa(1); - let cur_slot = now / slot_duration; - - assert!(last_slot < cur_slot, "Only one block may be authored per slot."); - if cur_slot == first_skipped { return } - - let slot_to_usize = |slot: T::Moment| { slot.as_() as usize }; - - let skipped_slots = cur_slot - last_slot - T::Moment::sa(1); - - H::handle_report(AuraReport { - start_slot: slot_to_usize(first_skipped), - skipped: slot_to_usize(skipped_slots), - }) - } + /// Determine the Aura slot-duration based on the timestamp module configuration. + pub fn slot_duration() -> u64 { + // we double the minimum block-period so each author can always propose within + // the majority of their slot. + >::minimum_period() + .as_() + .saturating_mul(2) + } + + fn on_timestamp_set(now: T::Moment, slot_duration: T::Moment) { + let last = Self::last(); + ::LastTimestamp::put(now.clone()); + + if last == T::Moment::zero() { + return; + } + + assert!( + slot_duration > T::Moment::zero(), + "Aura slot duration cannot be zero." + ); + + let last_slot = last / slot_duration.clone(); + let first_skipped = last_slot.clone() + T::Moment::sa(1); + let cur_slot = now / slot_duration; + + assert!( + last_slot < cur_slot, + "Only one block may be authored per slot." + ); + if cur_slot == first_skipped { + return; + } + + let slot_to_usize = |slot: T::Moment| slot.as_() as usize; + + let skipped_slots = cur_slot - last_slot - T::Moment::sa(1); + + H::handle_report(AuraReport { + start_slot: slot_to_usize(first_skipped), + skipped: slot_to_usize(skipped_slots), + }) + } } impl OnTimestampSet for Module { - fn on_timestamp_set(moment: T::Moment) { - Self::on_timestamp_set::(moment, T::Moment::sa(Self::slot_duration())) - } + fn on_timestamp_set(moment: T::Moment) { + Self::on_timestamp_set::(moment, T::Moment::sa(Self::slot_duration())) + } } /// A type for performing slashing based on aura reports. pub struct StakingSlasher(::rstd::marker::PhantomData); impl HandleReport for StakingSlasher { - fn handle_report(report: AuraReport) { - let validators = session::Module::::validators(); + fn handle_report(report: AuraReport) { + let validators = session::Module::::validators(); - report.punish( - validators.len(), - |idx, slash_count| { - let v = validators[idx].clone(); - staking::Module::::on_offline_validator(v, slash_count); - } - ); - } + report.punish(validators.len(), |idx, slash_count| { + let v = validators[idx].clone(); + staking::Module::::on_offline_validator(v, slash_count); + }); + } } impl ProvideInherent for Module { - type Call = timestamp::Call; - type Error = MakeFatalError; - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - - fn create_inherent(_: &InherentData) -> Option { - None - } - - fn check_inherent(call: &Self::Call, data: &InherentData) -> result::Result<(), Self::Error> { - let timestamp = match call { - timestamp::Call::set(ref timestamp) => timestamp.clone(), - _ => return Ok(()), - }; - - let timestamp_based_slot = timestamp.as_() / Self::slot_duration(); - - let seal_slot = data.aura_inherent_data()?; - - if timestamp_based_slot == seal_slot { - Ok(()) - } else { - Err(RuntimeString::from("timestamp set in block doesn't match slot in seal").into()) - } - } + type Call = timestamp::Call; + type Error = MakeFatalError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(_: &InherentData) -> Option { + None + } + + fn check_inherent(call: &Self::Call, data: &InherentData) -> result::Result<(), Self::Error> { + let timestamp = match call { + timestamp::Call::set(ref timestamp) => timestamp.clone(), + _ => return Ok(()), + }; + + let timestamp_based_slot = timestamp.as_() / Self::slot_duration(); + + let seal_slot = data.aura_inherent_data()?; + + if timestamp_based_slot == seal_slot { + Ok(()) + } else { + Err(RuntimeString::from("timestamp set in block doesn't match slot in seal").into()) + } + } } diff --git a/srml/aura/src/mock.rs b/srml/aura/src/mock.rs index e72e25ef94..6e5f46df81 100644 --- a/srml/aura/src/mock.rs +++ b/srml/aura/src/mock.rs @@ -18,14 +18,18 @@ #![cfg(test)] -use primitives::{BuildStorage, traits::IdentityLookup, testing::{Digest, DigestItem, Header, UintAuthorityId}}; -use srml_support::impl_outer_origin; +use crate::{Module, Trait}; +use primitives::{ + testing::{Digest, DigestItem, Header, UintAuthorityId}, + traits::IdentityLookup, + BuildStorage, +}; use runtime_io; -use substrate_primitives::{H256, Blake2Hasher}; -use crate::{Trait, Module}; +use srml_support::impl_outer_origin; +use substrate_primitives::{Blake2Hasher, H256}; -impl_outer_origin!{ - pub enum Origin for Test {} +impl_outer_origin! { + pub enum Origin for Test {} } // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. @@ -33,44 +37,58 @@ impl_outer_origin!{ pub struct Test; impl consensus::Trait for Test { - type Log = DigestItem; - type SessionKey = UintAuthorityId; - type InherentOfflineReport = (); + type Log = DigestItem; + type SessionKey = UintAuthorityId; + type InherentOfflineReport = (); } impl system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = ::primitives::traits::BlakeTwo256; - type Digest = Digest; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type Log = DigestItem; + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = ::primitives::traits::BlakeTwo256; + type Digest = Digest; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type Log = DigestItem; } impl timestamp::Trait for Test { - type Moment = u64; - type OnTimestampSet = Aura; + type Moment = u64; + type OnTimestampSet = Aura; } impl Trait for Test { - type HandleReport = (); + type HandleReport = (); } pub fn new_test_ext(authorities: Vec) -> runtime_io::TestExternalities { - let mut t = system::GenesisConfig::::default().build_storage().unwrap().0; - t.extend(consensus::GenesisConfig::{ - code: vec![], - authorities: authorities.into_iter().map(|a| UintAuthorityId(a)).collect(), - }.build_storage().unwrap().0); - t.extend(timestamp::GenesisConfig::{ - minimum_period: 1, - }.build_storage().unwrap().0); - t.into() + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0; + t.extend( + consensus::GenesisConfig:: { + code: vec![], + authorities: authorities + .into_iter() + .map(|a| UintAuthorityId(a)) + .collect(), + } + .build_storage() + .unwrap() + .0, + ); + t.extend( + timestamp::GenesisConfig:: { minimum_period: 1 } + .build_storage() + .unwrap() + .0, + ); + t.into() } pub type System = system::Module; diff --git a/srml/aura/src/tests.rs b/srml/aura/src/tests.rs index e74c7dace2..0d54d85409 100644 --- a/srml/aura/src/tests.rs +++ b/srml/aura/src/tests.rs @@ -18,75 +18,75 @@ #![cfg(test)] +use crate::mock::{new_test_ext, Aura, System}; +use crate::{AuraReport, HandleReport}; use lazy_static::lazy_static; -use crate::mock::{System, Aura, new_test_ext}; +use parking_lot::Mutex; use primitives::traits::Header; use runtime_io::with_externalities; -use parking_lot::Mutex; -use crate::{AuraReport, HandleReport}; #[test] fn aura_report_gets_skipped_correctly() { - let mut report = AuraReport { - start_slot: 3, - skipped: 15, - }; - - let mut validators = vec![0; 10]; - report.punish(10, |idx, count| validators[idx] += count); - assert_eq!(validators, vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); - - let mut validators = vec![0; 10]; - report.skipped = 5; - report.punish(10, |idx, count| validators[idx] += count); - assert_eq!(validators, vec![0, 0, 0, 1, 1, 1, 1, 1, 0, 0]); - - let mut validators = vec![0; 10]; - report.start_slot = 8; - report.punish(10, |idx, count| validators[idx] += count); - assert_eq!(validators, vec![1, 1, 1, 0, 0, 0, 0, 0, 1, 1]); - - let mut validators = vec![0; 4]; - report.start_slot = 1; - report.skipped = 3; - report.punish(4, |idx, count| validators[idx] += count); - assert_eq!(validators, vec![0, 1, 1, 1]); - - let mut validators = vec![0; 4]; - report.start_slot = 2; - report.punish(4, |idx, count| validators[idx] += count); - assert_eq!(validators, vec![1, 0, 1, 1]); + let mut report = AuraReport { + start_slot: 3, + skipped: 15, + }; + + let mut validators = vec![0; 10]; + report.punish(10, |idx, count| validators[idx] += count); + assert_eq!(validators, vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + + let mut validators = vec![0; 10]; + report.skipped = 5; + report.punish(10, |idx, count| validators[idx] += count); + assert_eq!(validators, vec![0, 0, 0, 1, 1, 1, 1, 1, 0, 0]); + + let mut validators = vec![0; 10]; + report.start_slot = 8; + report.punish(10, |idx, count| validators[idx] += count); + assert_eq!(validators, vec![1, 1, 1, 0, 0, 0, 0, 0, 1, 1]); + + let mut validators = vec![0; 4]; + report.start_slot = 1; + report.skipped = 3; + report.punish(4, |idx, count| validators[idx] += count); + assert_eq!(validators, vec![0, 1, 1, 1]); + + let mut validators = vec![0; 4]; + report.start_slot = 2; + report.punish(4, |idx, count| validators[idx] += count); + assert_eq!(validators, vec![1, 0, 1, 1]); } #[test] fn aura_reports_offline() { - lazy_static! { - static ref SLASH_COUNTS: Mutex> = Mutex::new(vec![0; 4]); - } - - struct HandleTestReport; - impl HandleReport for HandleTestReport { - fn handle_report(report: AuraReport) { - let mut counts = SLASH_COUNTS.lock(); - report.punish(counts.len(), |idx, count| counts[idx] += count); - } - } - - with_externalities(&mut new_test_ext(vec![0, 1, 2, 3]), || { - System::initialize(&1, &Default::default(), &Default::default()); - let slot_duration = Aura::slot_duration(); - - Aura::on_timestamp_set::(5 * slot_duration, slot_duration); - let header = System::finalize(); - - // no slashing when last step was 0. - assert_eq!(SLASH_COUNTS.lock().as_slice(), &[0, 0, 0, 0]); - - System::initialize(&2, &header.hash(), &Default::default()); - Aura::on_timestamp_set::(8 * slot_duration, slot_duration); - let _header = System::finalize(); - - // Steps 6 and 7 were skipped. - assert_eq!(SLASH_COUNTS.lock().as_slice(), &[0, 0, 1, 1]); - }); + lazy_static! { + static ref SLASH_COUNTS: Mutex> = Mutex::new(vec![0; 4]); + } + + struct HandleTestReport; + impl HandleReport for HandleTestReport { + fn handle_report(report: AuraReport) { + let mut counts = SLASH_COUNTS.lock(); + report.punish(counts.len(), |idx, count| counts[idx] += count); + } + } + + with_externalities(&mut new_test_ext(vec![0, 1, 2, 3]), || { + System::initialize(&1, &Default::default(), &Default::default()); + let slot_duration = Aura::slot_duration(); + + Aura::on_timestamp_set::(5 * slot_duration, slot_duration); + let header = System::finalize(); + + // no slashing when last step was 0. + assert_eq!(SLASH_COUNTS.lock().as_slice(), &[0, 0, 0, 0]); + + System::initialize(&2, &header.hash(), &Default::default()); + Aura::on_timestamp_set::(8 * slot_duration, slot_duration); + let _header = System::finalize(); + + // Steps 6 and 7 were skipped. + assert_eq!(SLASH_COUNTS.lock().as_slice(), &[0, 0, 1, 1]); + }); } diff --git a/srml/balances/src/lib.rs b/srml/balances/src/lib.rs index fa34bc43a0..ba9369c31e 100644 --- a/srml/balances/src/lib.rs +++ b/srml/balances/src/lib.rs @@ -171,70 +171,86 @@ #![cfg_attr(not(feature = "std"), no_std)] +use parity_codec::{Codec, Decode, Encode}; +use primitives::traits::{ + As, CheckedAdd, CheckedSub, MaybeSerializeDebug, Member, Saturating, SimpleArithmetic, + StaticLookup, Zero, +}; use rstd::prelude::*; use rstd::{cmp, result}; -use parity_codec::{Codec, Encode, Decode}; -use srml_support::{StorageValue, StorageMap, Parameter, decl_event, decl_storage, decl_module}; -use srml_support::traits::{ - UpdateBalanceOutcome, Currency, OnFreeBalanceZero, MakePayment, OnUnbalanced, - WithdrawReason, WithdrawReasons, LockIdentifier, LockableCurrency, ExistenceRequirement, - Imbalance, SignedImbalance, ReservableCurrency -}; use srml_support::dispatch::Result; -use primitives::traits::{ - Zero, SimpleArithmetic, As, StaticLookup, Member, CheckedAdd, CheckedSub, - MaybeSerializeDebug, Saturating +use srml_support::traits::{ + Currency, ExistenceRequirement, Imbalance, LockIdentifier, LockableCurrency, MakePayment, + OnFreeBalanceZero, OnUnbalanced, ReservableCurrency, SignedImbalance, UpdateBalanceOutcome, + WithdrawReason, WithdrawReasons, }; -use system::{IsDeadAccount, OnNewAccount, ensure_signed}; +use srml_support::{decl_event, decl_module, decl_storage, Parameter, StorageMap, StorageValue}; +use system::{ensure_signed, IsDeadAccount, OnNewAccount}; mod mock; mod tests; pub trait Subtrait: system::Trait { - /// The balance of an account. - type Balance: Parameter + Member + SimpleArithmetic + Codec + Default + Copy + As + As + MaybeSerializeDebug; - - /// A function that is invoked when the free-balance has fallen below the existential deposit and - /// has been reduced to zero. - /// - /// Gives a chance to clean up resources associated with the given account. - type OnFreeBalanceZero: OnFreeBalanceZero; - - /// Handler for when a new account is created. - type OnNewAccount: OnNewAccount; + /// The balance of an account. + type Balance: Parameter + + Member + + SimpleArithmetic + + Codec + + Default + + Copy + + As + + As + + MaybeSerializeDebug; + + /// A function that is invoked when the free-balance has fallen below the existential deposit and + /// has been reduced to zero. + /// + /// Gives a chance to clean up resources associated with the given account. + type OnFreeBalanceZero: OnFreeBalanceZero; + + /// Handler for when a new account is created. + type OnNewAccount: OnNewAccount; } pub trait Trait: system::Trait { - /// The balance of an account. - type Balance: Parameter + Member + SimpleArithmetic + Codec + Default + Copy + As + As + MaybeSerializeDebug; - - /// A function that is invoked when the free-balance has fallen below the existential deposit and - /// has been reduced to zero. - /// - /// Gives a chance to clean up resources associated with the given account. - type OnFreeBalanceZero: OnFreeBalanceZero; - - /// Handler for when a new account is created. - type OnNewAccount: OnNewAccount; - - /// Handler for the unbalanced reduction when taking transaction fees. - type TransactionPayment: OnUnbalanced>; - - /// Handler for the unbalanced reduction when taking fees associated with balance - /// transfer (which may also include account creation). - type TransferPayment: OnUnbalanced>; - - /// Handler for the unbalanced reduction when removing a dust account. - type DustRemoval: OnUnbalanced>; - - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The balance of an account. + type Balance: Parameter + + Member + + SimpleArithmetic + + Codec + + Default + + Copy + + As + + As + + MaybeSerializeDebug; + + /// A function that is invoked when the free-balance has fallen below the existential deposit and + /// has been reduced to zero. + /// + /// Gives a chance to clean up resources associated with the given account. + type OnFreeBalanceZero: OnFreeBalanceZero; + + /// Handler for when a new account is created. + type OnNewAccount: OnNewAccount; + + /// Handler for the unbalanced reduction when taking transaction fees. + type TransactionPayment: OnUnbalanced>; + + /// Handler for the unbalanced reduction when taking fees associated with balance + /// transfer (which may also include account creation). + type TransferPayment: OnUnbalanced>; + + /// Handler for the unbalanced reduction when removing a dust account. + type DustRemoval: OnUnbalanced>; + + /// The overarching event type. + type Event: From> + Into<::Event>; } impl, I: Instance> Subtrait for T { - type Balance = T::Balance; - type OnFreeBalanceZero = T::OnFreeBalanceZero; - type OnNewAccount = T::OnNewAccount; + type Balance = T::Balance; + type OnFreeBalanceZero = T::OnFreeBalanceZero; + type OnNewAccount = T::OnNewAccount; } decl_event!( @@ -255,339 +271,338 @@ decl_event!( #[derive(Encode, Decode, Copy, Clone, PartialEq, Eq)] #[cfg_attr(feature = "std", derive(Debug))] pub struct VestingSchedule { - /// Locked amount at genesis. - pub offset: Balance, - /// Amount that gets unlocked every block from genesis. - pub per_block: Balance, + /// Locked amount at genesis. + pub offset: Balance, + /// Amount that gets unlocked every block from genesis. + pub per_block: Balance, } impl> VestingSchedule { - /// Amount locked at block `n`. - pub fn locked_at>(&self, n: BlockNumber) -> Balance { - if let Some(x) = Balance::sa(n.as_()).checked_mul(&self.per_block) { - self.offset.max(x) - x - } else { - Zero::zero() - } - } + /// Amount locked at block `n`. + pub fn locked_at>(&self, n: BlockNumber) -> Balance { + if let Some(x) = Balance::sa(n.as_()).checked_mul(&self.per_block) { + self.offset.max(x) - x + } else { + Zero::zero() + } + } } #[derive(Encode, Decode, Clone, PartialEq, Eq)] #[cfg_attr(feature = "std", derive(Debug))] pub struct BalanceLock { - pub id: LockIdentifier, - pub amount: Balance, - pub until: BlockNumber, - pub reasons: WithdrawReasons, + pub id: LockIdentifier, + pub amount: Balance, + pub until: BlockNumber, + pub reasons: WithdrawReasons, } decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Balances { - /// The total units issued in the system. - pub TotalIssuance get(total_issuance) build(|config: &GenesisConfig| { - config.balances.iter().fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n) - }): T::Balance; - /// The minimum amount required to keep an account open. - pub ExistentialDeposit get(existential_deposit) config(): T::Balance; - /// The fee required to make a transfer. - pub TransferFee get(transfer_fee) config(): T::Balance; - /// The fee required to create an account. - pub CreationFee get(creation_fee) config(): T::Balance; - /// The fee to be paid for making a transaction; the base. - pub TransactionBaseFee get(transaction_base_fee) config(): T::Balance; - /// The fee to be paid for making a transaction; the per-byte portion. - pub TransactionByteFee get(transaction_byte_fee) config(): T::Balance; - - /// Information regarding the vesting of a given account. - pub Vesting get(vesting) build(|config: &GenesisConfig| { - config.vesting.iter().filter_map(|&(ref who, begin, length)| { - let begin: u64 = begin.as_(); - let length: u64 = length.as_(); - let begin: T::Balance = As::sa(begin); - let length: T::Balance = As::sa(length); - - config.balances.iter() - .find(|&&(ref w, _)| w == who) - .map(|&(_, balance)| { - // <= begin it should be >= balance - // >= begin+length it should be <= 0 - - let per_block = balance / length.max(primitives::traits::One::one()); - let offset = begin * per_block + balance; - - (who.clone(), VestingSchedule { offset, per_block }) - }) - }).collect::>() - }): map T::AccountId => Option>; - - /// The 'free' balance of a given account. - /// - /// This is the only balance that matters in terms of most operations on tokens. It - /// alone is used to determine the balance when in the contract execution environment. When this - /// balance falls below the value of `ExistentialDeposit`, then the 'current account' is - /// deleted: specifically `FreeBalance`. Further, the `OnFreeBalanceZero` callback - /// is invoked, giving a chance to external modules to clean up data associated with - /// the deleted account. - /// - /// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets - /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. - pub FreeBalance get(free_balance) build(|config: &GenesisConfig| config.balances.clone()): map T::AccountId => T::Balance; - - /// The amount of the balance of a given account that is externally reserved; this can still get - /// slashed, but gets slashed last of all. - /// - /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens - /// that are still 'owned' by the account holder, but which are suspendable. - /// - /// When this balance falls below the value of `ExistentialDeposit`, then this 'reserve account' - /// is deleted: specifically, `ReservedBalance`. - /// - /// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets - /// collapsed to zero if it ever becomes less than `ExistentialDeposit`.) - pub ReservedBalance get(reserved_balance): map T::AccountId => T::Balance; - - /// Any liquidity locks on some account balances. - pub Locks get(locks): map T::AccountId => Vec>; - } - add_extra_genesis { - config(balances): Vec<(T::AccountId, T::Balance)>; - config(vesting): Vec<(T::AccountId, T::BlockNumber, T::BlockNumber)>; // begin, length - } - extra_genesis_skip_phantom_data_field; + trait Store for Module, I: Instance=DefaultInstance> as Balances { + /// The total units issued in the system. + pub TotalIssuance get(total_issuance) build(|config: &GenesisConfig| { + config.balances.iter().fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n) + }): T::Balance; + /// The minimum amount required to keep an account open. + pub ExistentialDeposit get(existential_deposit) config(): T::Balance; + /// The fee required to make a transfer. + pub TransferFee get(transfer_fee) config(): T::Balance; + /// The fee required to create an account. + pub CreationFee get(creation_fee) config(): T::Balance; + /// The fee to be paid for making a transaction; the base. + pub TransactionBaseFee get(transaction_base_fee) config(): T::Balance; + /// The fee to be paid for making a transaction; the per-byte portion. + pub TransactionByteFee get(transaction_byte_fee) config(): T::Balance; + + /// Information regarding the vesting of a given account. + pub Vesting get(vesting) build(|config: &GenesisConfig| { + config.vesting.iter().filter_map(|&(ref who, begin, length)| { + let begin: u64 = begin.as_(); + let length: u64 = length.as_(); + let begin: T::Balance = As::sa(begin); + let length: T::Balance = As::sa(length); + + config.balances.iter() + .find(|&&(ref w, _)| w == who) + .map(|&(_, balance)| { + // <= begin it should be >= balance + // >= begin+length it should be <= 0 + + let per_block = balance / length.max(primitives::traits::One::one()); + let offset = begin * per_block + balance; + + (who.clone(), VestingSchedule { offset, per_block }) + }) + }).collect::>() + }): map T::AccountId => Option>; + + /// The 'free' balance of a given account. + /// + /// This is the only balance that matters in terms of most operations on tokens. It + /// alone is used to determine the balance when in the contract execution environment. When this + /// balance falls below the value of `ExistentialDeposit`, then the 'current account' is + /// deleted: specifically `FreeBalance`. Further, the `OnFreeBalanceZero` callback + /// is invoked, giving a chance to external modules to clean up data associated with + /// the deleted account. + /// + /// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets + /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. + pub FreeBalance get(free_balance) build(|config: &GenesisConfig| config.balances.clone()): map T::AccountId => T::Balance; + + /// The amount of the balance of a given account that is externally reserved; this can still get + /// slashed, but gets slashed last of all. + /// + /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens + /// that are still 'owned' by the account holder, but which are suspendable. + /// + /// When this balance falls below the value of `ExistentialDeposit`, then this 'reserve account' + /// is deleted: specifically, `ReservedBalance`. + /// + /// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets + /// collapsed to zero if it ever becomes less than `ExistentialDeposit`.) + pub ReservedBalance get(reserved_balance): map T::AccountId => T::Balance; + + /// Any liquidity locks on some account balances. + pub Locks get(locks): map T::AccountId => Vec>; + } + add_extra_genesis { + config(balances): Vec<(T::AccountId, T::Balance)>; + config(vesting): Vec<(T::AccountId, T::BlockNumber, T::BlockNumber)>; // begin, length + } + extra_genesis_skip_phantom_data_field; } decl_module! { - pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { - fn deposit_event() = default; - - /// Transfer some liquid free balance to another account. - /// - /// `transfer` will set the `FreeBalance` of the sender and receiver. - /// It will decrease the total issuance of the system by the `TransferFee`. - /// If the sender's account is below the existential deposit as a result - /// of the transfer, the account will be reaped. - /// - /// The dispatch origin for this call must be `Signed` by the transactor. - pub fn transfer( - origin, - dest: ::Source, - #[compact] value: T::Balance - ) { - let transactor = ensure_signed(origin)?; - let dest = T::Lookup::lookup(dest)?; - >::transfer(&transactor, &dest, value)?; - } - - /// Set the balances of a given account. - /// - /// This will alter `FreeBalance` and `ReservedBalance` in storage. - /// If the new free or reserved balance is below the existential deposit, - /// it will also decrease the total issuance of the system (`TotalIssuance`) - /// and reset the account nonce (`system::AccountNonce`). - /// - /// The dispatch origin for this call is `root`. - fn set_balance( - who: ::Source, - #[compact] free: T::Balance, - #[compact] reserved: T::Balance - ) { - let who = T::Lookup::lookup(who)?; - Self::set_free_balance(&who, free); - Self::set_reserved_balance(&who, reserved); - } - } + pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { + fn deposit_event() = default; + + /// Transfer some liquid free balance to another account. + /// + /// `transfer` will set the `FreeBalance` of the sender and receiver. + /// It will decrease the total issuance of the system by the `TransferFee`. + /// If the sender's account is below the existential deposit as a result + /// of the transfer, the account will be reaped. + /// + /// The dispatch origin for this call must be `Signed` by the transactor. + pub fn transfer( + origin, + dest: ::Source, + #[compact] value: T::Balance + ) { + let transactor = ensure_signed(origin)?; + let dest = T::Lookup::lookup(dest)?; + >::transfer(&transactor, &dest, value)?; + } + + /// Set the balances of a given account. + /// + /// This will alter `FreeBalance` and `ReservedBalance` in storage. + /// If the new free or reserved balance is below the existential deposit, + /// it will also decrease the total issuance of the system (`TotalIssuance`) + /// and reset the account nonce (`system::AccountNonce`). + /// + /// The dispatch origin for this call is `root`. + fn set_balance( + who: ::Source, + #[compact] free: T::Balance, + #[compact] reserved: T::Balance + ) { + let who = T::Lookup::lookup(who)?; + Self::set_free_balance(&who, free); + Self::set_reserved_balance(&who, reserved); + } + } } impl, I: Instance> Module { - - // PUBLIC IMMUTABLES - - /// Get the amount that is currently being vested and cannot be transferred out of this account. - pub fn vesting_balance(who: &T::AccountId) -> T::Balance { - if let Some(v) = Self::vesting(who) { - Self::free_balance(who).min(v.locked_at(>::block_number())) - } else { - Zero::zero() - } - } - - // PRIVATE MUTABLES - - /// Set the reserved balance of an account to some new value. Will enforce `ExistentialDeposit` - /// law, annulling the account as needed. - /// - /// Doesn't do any preparatory work for creating a new account, so should only be used when it - /// is known that the account already exists. - /// - /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that - /// the caller will do this. - fn set_reserved_balance(who: &T::AccountId, balance: T::Balance) -> UpdateBalanceOutcome { - if balance < Self::existential_deposit() { - >::insert(who, balance); - Self::on_reserved_too_low(who); - UpdateBalanceOutcome::AccountKilled - } else { - >::insert(who, balance); - UpdateBalanceOutcome::Updated - } - } - - /// Set the free balance of an account to some new value. Will enforce `ExistentialDeposit` - /// law, annulling the account as needed. - /// - /// Doesn't do any preparatory work for creating a new account, so should only be used when it - /// is known that the account already exists. - /// - /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that - /// the caller will do this. - fn set_free_balance(who: &T::AccountId, balance: T::Balance) -> UpdateBalanceOutcome { - // Commented out for now - but consider it instructive. - // assert!(!Self::total_balance(who).is_zero()); - // assert!(Self::free_balance(who) > Self::existential_deposit()); - if balance < Self::existential_deposit() { - >::insert(who, balance); - Self::on_free_too_low(who); - UpdateBalanceOutcome::AccountKilled - } else { - >::insert(who, balance); - UpdateBalanceOutcome::Updated - } - } - - /// Register a new account (with existential balance). - /// - /// This just calls appropriate hooks. It doesn't (necessarily) make any state changes. - fn new_account(who: &T::AccountId, balance: T::Balance) { - T::OnNewAccount::on_new_account(&who); - Self::deposit_event(RawEvent::NewAccount(who.clone(), balance.clone())); - } - - /// Unregister an account. - /// - /// This just removes the nonce and leaves an event. - fn reap_account(who: &T::AccountId) { - >::remove(who); - Self::deposit_event(RawEvent::ReapedAccount(who.clone())); - } - - /// Account's free balance has dropped below existential deposit. Kill its - /// free side and the account completely if its reserved size is already dead. - /// - /// Will maintain total issuance. - fn on_free_too_low(who: &T::AccountId) { - let dust = >::take(who); - >::remove(who); - - // underflow should never happen, but if it does, there's not much we can do about it. - if !dust.is_zero() { - T::DustRemoval::on_unbalanced(NegativeImbalance(dust)); - } - - T::OnFreeBalanceZero::on_free_balance_zero(who); - - if Self::reserved_balance(who).is_zero() { - Self::reap_account(who); - } - } - - /// Account's reserved balance has dropped below existential deposit. Kill its - /// reserved side and the account completely if its free size is already dead. - /// - /// Will maintain total issuance. - fn on_reserved_too_low(who: &T::AccountId) { - let dust = >::take(who); - - // underflow should never happen, but it if does, there's nothing to be done here. - if !dust.is_zero() { - T::DustRemoval::on_unbalanced(NegativeImbalance(dust)); - } - - if Self::free_balance(who).is_zero() { - Self::reap_account(who); - } - } + // PUBLIC IMMUTABLES + + /// Get the amount that is currently being vested and cannot be transferred out of this account. + pub fn vesting_balance(who: &T::AccountId) -> T::Balance { + if let Some(v) = Self::vesting(who) { + Self::free_balance(who).min(v.locked_at(>::block_number())) + } else { + Zero::zero() + } + } + + // PRIVATE MUTABLES + + /// Set the reserved balance of an account to some new value. Will enforce `ExistentialDeposit` + /// law, annulling the account as needed. + /// + /// Doesn't do any preparatory work for creating a new account, so should only be used when it + /// is known that the account already exists. + /// + /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that + /// the caller will do this. + fn set_reserved_balance(who: &T::AccountId, balance: T::Balance) -> UpdateBalanceOutcome { + if balance < Self::existential_deposit() { + >::insert(who, balance); + Self::on_reserved_too_low(who); + UpdateBalanceOutcome::AccountKilled + } else { + >::insert(who, balance); + UpdateBalanceOutcome::Updated + } + } + + /// Set the free balance of an account to some new value. Will enforce `ExistentialDeposit` + /// law, annulling the account as needed. + /// + /// Doesn't do any preparatory work for creating a new account, so should only be used when it + /// is known that the account already exists. + /// + /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that + /// the caller will do this. + fn set_free_balance(who: &T::AccountId, balance: T::Balance) -> UpdateBalanceOutcome { + // Commented out for now - but consider it instructive. + // assert!(!Self::total_balance(who).is_zero()); + // assert!(Self::free_balance(who) > Self::existential_deposit()); + if balance < Self::existential_deposit() { + >::insert(who, balance); + Self::on_free_too_low(who); + UpdateBalanceOutcome::AccountKilled + } else { + >::insert(who, balance); + UpdateBalanceOutcome::Updated + } + } + + /// Register a new account (with existential balance). + /// + /// This just calls appropriate hooks. It doesn't (necessarily) make any state changes. + fn new_account(who: &T::AccountId, balance: T::Balance) { + T::OnNewAccount::on_new_account(&who); + Self::deposit_event(RawEvent::NewAccount(who.clone(), balance.clone())); + } + + /// Unregister an account. + /// + /// This just removes the nonce and leaves an event. + fn reap_account(who: &T::AccountId) { + >::remove(who); + Self::deposit_event(RawEvent::ReapedAccount(who.clone())); + } + + /// Account's free balance has dropped below existential deposit. Kill its + /// free side and the account completely if its reserved size is already dead. + /// + /// Will maintain total issuance. + fn on_free_too_low(who: &T::AccountId) { + let dust = >::take(who); + >::remove(who); + + // underflow should never happen, but if it does, there's not much we can do about it. + if !dust.is_zero() { + T::DustRemoval::on_unbalanced(NegativeImbalance(dust)); + } + + T::OnFreeBalanceZero::on_free_balance_zero(who); + + if Self::reserved_balance(who).is_zero() { + Self::reap_account(who); + } + } + + /// Account's reserved balance has dropped below existential deposit. Kill its + /// reserved side and the account completely if its free size is already dead. + /// + /// Will maintain total issuance. + fn on_reserved_too_low(who: &T::AccountId) { + let dust = >::take(who); + + // underflow should never happen, but it if does, there's nothing to be done here. + if !dust.is_zero() { + T::DustRemoval::on_unbalanced(NegativeImbalance(dust)); + } + + if Self::free_balance(who).is_zero() { + Self::reap_account(who); + } + } } /// Opaque, move-only struct with private fields that serves as a token denoting that /// funds have been created without any equal and opposite accounting. #[must_use] -pub struct PositiveImbalance, I: Instance=DefaultInstance>(T::Balance); +pub struct PositiveImbalance, I: Instance = DefaultInstance>(T::Balance); /// Opaque, move-only struct with private fields that serves as a token denoting that /// funds have been destroyed without any equal and opposite accounting. #[must_use] -pub struct NegativeImbalance, I: Instance=DefaultInstance>(T::Balance); +pub struct NegativeImbalance, I: Instance = DefaultInstance>(T::Balance); impl, I: Instance> Imbalance for PositiveImbalance { - type Opposite = NegativeImbalance; - - fn zero() -> Self { - Self(Zero::zero()) - } - fn drop_zero(self) -> result::Result<(), Self> { - if self.0.is_zero() { - Ok(()) - } else { - Err(self) - } - } - fn split(self, amount: T::Balance) -> (Self, Self) { - let first = self.0.min(amount); - let second = self.0 - first; - (Self(first), Self(second)) - } - fn merge(self, other: Self) -> Self { - Self(self.0.saturating_add(other.0)) - } - fn subsume(&mut self, other: Self) { - self.0 = self.0.saturating_add(other.0) - } - fn offset(self, other: Self::Opposite) -> result::Result { - if self.0 >= other.0 { - Ok(Self(self.0 - other.0)) - } else { - Err(NegativeImbalance(other.0 - self.0)) - } - } - fn peek(&self) -> T::Balance { - self.0.clone() - } + type Opposite = NegativeImbalance; + + fn zero() -> Self { + Self(Zero::zero()) + } + fn drop_zero(self) -> result::Result<(), Self> { + if self.0.is_zero() { + Ok(()) + } else { + Err(self) + } + } + fn split(self, amount: T::Balance) -> (Self, Self) { + let first = self.0.min(amount); + let second = self.0 - first; + (Self(first), Self(second)) + } + fn merge(self, other: Self) -> Self { + Self(self.0.saturating_add(other.0)) + } + fn subsume(&mut self, other: Self) { + self.0 = self.0.saturating_add(other.0) + } + fn offset(self, other: Self::Opposite) -> result::Result { + if self.0 >= other.0 { + Ok(Self(self.0 - other.0)) + } else { + Err(NegativeImbalance(other.0 - self.0)) + } + } + fn peek(&self) -> T::Balance { + self.0.clone() + } } impl, I: Instance> Imbalance for NegativeImbalance { - type Opposite = PositiveImbalance; - - fn zero() -> Self { - Self(Zero::zero()) - } - fn drop_zero(self) -> result::Result<(), Self> { - if self.0.is_zero() { - Ok(()) - } else { - Err(self) - } - } - fn split(self, amount: T::Balance) -> (Self, Self) { - let first = self.0.min(amount); - let second = self.0 - first; - (Self(first), Self(second)) - } - fn merge(self, other: Self) -> Self { - Self(self.0.saturating_add(other.0)) - } - fn subsume(&mut self, other: Self) { - self.0 = self.0.saturating_add(other.0) - } - fn offset(self, other: Self::Opposite) -> result::Result { - if self.0 >= other.0 { - Ok(Self(self.0 - other.0)) - } else { - Err(PositiveImbalance(other.0 - self.0)) - } - } - fn peek(&self) -> T::Balance { - self.0.clone() - } + type Opposite = PositiveImbalance; + + fn zero() -> Self { + Self(Zero::zero()) + } + fn drop_zero(self) -> result::Result<(), Self> { + if self.0.is_zero() { + Ok(()) + } else { + Err(self) + } + } + fn split(self, amount: T::Balance) -> (Self, Self) { + let first = self.0.min(amount); + let second = self.0 - first; + (Self(first), Self(second)) + } + fn merge(self, other: Self) -> Self { + Self(self.0.saturating_add(other.0)) + } + fn subsume(&mut self, other: Self) { + self.0 = self.0.saturating_add(other.0) + } + fn offset(self, other: Self::Opposite) -> result::Result { + if self.0 >= other.0 { + Ok(Self(self.0 - other.0)) + } else { + Err(PositiveImbalance(other.0 - self.0)) + } + } + fn peek(&self) -> T::Balance { + self.0.clone() + } } // TODO: #2052 @@ -604,408 +619,449 @@ impl, I: Instance> Imbalance for NegativeImbalance // are placed in their own SRML module. struct ElevatedTrait, I: Instance>(T, I); impl, I: Instance> Clone for ElevatedTrait { - fn clone(&self) -> Self { unimplemented!() } + fn clone(&self) -> Self { + unimplemented!() + } } impl, I: Instance> PartialEq for ElevatedTrait { - fn eq(&self, _: &Self) -> bool { unimplemented!() } + fn eq(&self, _: &Self) -> bool { + unimplemented!() + } } impl, I: Instance> Eq for ElevatedTrait {} impl, I: Instance> system::Trait for ElevatedTrait { - type Origin = T::Origin; - type Index = T::Index; - type BlockNumber = T::BlockNumber; - type Hash = T::Hash; - type Hashing = T::Hashing; - type Digest = T::Digest; - type AccountId = T::AccountId; - type Lookup = T::Lookup; - type Header = T::Header; - type Event = (); - type Log = T::Log; + type Origin = T::Origin; + type Index = T::Index; + type BlockNumber = T::BlockNumber; + type Hash = T::Hash; + type Hashing = T::Hashing; + type Digest = T::Digest; + type AccountId = T::AccountId; + type Lookup = T::Lookup; + type Header = T::Header; + type Event = (); + type Log = T::Log; } impl, I: Instance> Trait for ElevatedTrait { - type Balance = T::Balance; - type OnFreeBalanceZero = T::OnFreeBalanceZero; - type OnNewAccount = T::OnNewAccount; - type Event = (); - type TransactionPayment = (); - type TransferPayment = (); - type DustRemoval = (); + type Balance = T::Balance; + type OnFreeBalanceZero = T::OnFreeBalanceZero; + type OnNewAccount = T::OnNewAccount; + type Event = (); + type TransactionPayment = (); + type TransferPayment = (); + type DustRemoval = (); } impl, I: Instance> Drop for PositiveImbalance { - /// Basic drop handler will just square up the total issuance. - fn drop(&mut self) { - , I>>::mutate(|v| *v = v.saturating_add(self.0)); - } + /// Basic drop handler will just square up the total issuance. + fn drop(&mut self) { + , I>>::mutate(|v| *v = v.saturating_add(self.0)); + } } impl, I: Instance> Drop for NegativeImbalance { - /// Basic drop handler will just square up the total issuance. - fn drop(&mut self) { - , I>>::mutate(|v| *v = v.saturating_sub(self.0)); - } + /// Basic drop handler will just square up the total issuance. + fn drop(&mut self) { + , I>>::mutate(|v| *v = v.saturating_sub(self.0)); + } } impl, I: Instance> Currency for Module where - T::Balance: MaybeSerializeDebug + T::Balance: MaybeSerializeDebug, { - type Balance = T::Balance; - type PositiveImbalance = PositiveImbalance; - type NegativeImbalance = NegativeImbalance; - - fn total_balance(who: &T::AccountId) -> Self::Balance { - Self::free_balance(who) + Self::reserved_balance(who) - } - - fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool { - Self::free_balance(who) >= value - } - - fn total_issuance() -> Self::Balance { - >::get() - } - - fn minimum_balance() -> Self::Balance { - Self::existential_deposit() - } - - fn free_balance(who: &T::AccountId) -> Self::Balance { - >::get(who) - } - - fn ensure_can_withdraw( - who: &T::AccountId, - _amount: T::Balance, - reason: WithdrawReason, - new_balance: T::Balance, - ) -> Result { - match reason { - WithdrawReason::Reserve | WithdrawReason::Transfer if Self::vesting_balance(who) > new_balance => - return Err("vesting balance too high to send value"), - _ => {} - } - let locks = Self::locks(who); - if locks.is_empty() { - return Ok(()) - } - let now = >::block_number(); - if Self::locks(who).into_iter() - .all(|l| now >= l.until || new_balance >= l.amount || !l.reasons.contains(reason)) - { - Ok(()) - } else { - Err("account liquidity restrictions prevent withdrawal") - } - } - - fn transfer(transactor: &T::AccountId, dest: &T::AccountId, value: Self::Balance) -> Result { - let from_balance = Self::free_balance(transactor); - let to_balance = Self::free_balance(dest); - let would_create = to_balance.is_zero(); - let fee = if would_create { Self::creation_fee() } else { Self::transfer_fee() }; - let liability = match value.checked_add(&fee) { - Some(l) => l, - None => return Err("got overflow after adding a fee to value"), - }; - - let new_from_balance = match from_balance.checked_sub(&liability) { - None => return Err("balance too low to send value"), - Some(b) => b, - }; - if would_create && value < Self::existential_deposit() { - return Err("value too low to create account"); - } - Self::ensure_can_withdraw(transactor, value, WithdrawReason::Transfer, new_from_balance)?; - - // NOTE: total stake being stored in the same type means that this could never overflow - // but better to be safe than sorry. - let new_to_balance = match to_balance.checked_add(&value) { - Some(b) => b, - None => return Err("destination balance too high to receive value"), - }; - - if transactor != dest { - Self::set_free_balance(transactor, new_from_balance); - if !>::exists(dest) { - Self::new_account(dest, new_to_balance); - } - Self::set_free_balance(dest, new_to_balance); - T::TransferPayment::on_unbalanced(NegativeImbalance(fee)); - Self::deposit_event(RawEvent::Transfer(transactor.clone(), dest.clone(), value, fee)); - } - - Ok(()) - } - - fn withdraw( - who: &T::AccountId, - value: Self::Balance, - reason: WithdrawReason, - liveness: ExistenceRequirement, - ) -> result::Result { - if let Some(new_balance) = Self::free_balance(who).checked_sub(&value) { - if liveness == ExistenceRequirement::KeepAlive && new_balance < Self::existential_deposit() { - return Err("payment would kill account") - } - Self::ensure_can_withdraw(who, value, reason, new_balance)?; - Self::set_free_balance(who, new_balance); - Ok(NegativeImbalance(value)) - } else { - Err("too few free funds in account") - } - } - - fn slash( - who: &T::AccountId, - value: Self::Balance - ) -> (Self::NegativeImbalance, Self::Balance) { - let free_balance = Self::free_balance(who); - let free_slash = cmp::min(free_balance, value); - Self::set_free_balance(who, free_balance - free_slash); - let remaining_slash = value - free_slash; - // NOTE: `slash()` prefers free balance, but assumes that reserve balance can be drawn - // from in extreme circumstances. `can_slash()` should be used prior to `slash()` is avoid having - // to draw from reserved funds, however we err on the side of punishment if things are inconsistent - // or `can_slash` wasn't used appropriately. - if !remaining_slash.is_zero() { - let reserved_balance = Self::reserved_balance(who); - let reserved_slash = cmp::min(reserved_balance, remaining_slash); - Self::set_reserved_balance(who, reserved_balance - reserved_slash); - (NegativeImbalance(free_slash + reserved_slash), remaining_slash - reserved_slash) - } else { - (NegativeImbalance(value), Zero::zero()) - } - } - - fn deposit_into_existing( - who: &T::AccountId, - value: Self::Balance - ) -> result::Result { - if Self::total_balance(who).is_zero() { - return Err("beneficiary account must pre-exist"); - } - Self::set_free_balance(who, Self::free_balance(who) + value); - Ok(PositiveImbalance(value)) - } - - fn deposit_creating( - who: &T::AccountId, - value: Self::Balance, - ) -> Self::PositiveImbalance { - let (imbalance, _) = Self::make_free_balance_be(who, Self::free_balance(who) + value); - if let SignedImbalance::Positive(p) = imbalance { - p - } else { - // Impossible, but be defensive. - Self::PositiveImbalance::zero() - } - } - - fn make_free_balance_be(who: &T::AccountId, balance: T::Balance) -> ( - SignedImbalance, - UpdateBalanceOutcome - ) { - let original = Self::free_balance(who); - if balance < Self::existential_deposit() && original.is_zero() { - // If we're attempting to set an existing account to less than ED, then - // bypass the entire operation. It's a no-op if you follow it through, but - // since this is an instance where we might account for a negative imbalance - // (in the dust cleaner of set_free_balance) before we account for its actual - // equal and opposite cause (returned as an Imbalance), then in the - // instance that there's no other accounts on the system at all, we might - // underflow the issuance and our arithmetic will be off. - return ( - SignedImbalance::Positive(Self::PositiveImbalance::zero()), - UpdateBalanceOutcome::AccountKilled, - ) - } - let imbalance = if original <= balance { - SignedImbalance::Positive(PositiveImbalance(balance - original)) - } else { - SignedImbalance::Negative(NegativeImbalance(original - balance)) - }; - // If the balance is too low, then the account is reaped. - // NOTE: There are two balances for every account: `reserved_balance` and - // `free_balance`. This contract subsystem only cares about the latter: whenever - // the term "balance" is used *here* it should be assumed to mean "free balance" - // in the rest of the module. - // Free balance can never be less than ED. If that happens, it gets reduced to zero - // and the account information relevant to this subsystem is deleted (i.e. the - // account is reaped). - let outcome = if balance < >::existential_deposit() { - Self::set_free_balance(who, balance); - UpdateBalanceOutcome::AccountKilled - } else { - if !>::exists(who) { - Self::new_account(&who, balance); - } - Self::set_free_balance(who, balance); - UpdateBalanceOutcome::Updated - }; - (imbalance, outcome) - } + type Balance = T::Balance; + type PositiveImbalance = PositiveImbalance; + type NegativeImbalance = NegativeImbalance; + + fn total_balance(who: &T::AccountId) -> Self::Balance { + Self::free_balance(who) + Self::reserved_balance(who) + } + + fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool { + Self::free_balance(who) >= value + } + + fn total_issuance() -> Self::Balance { + >::get() + } + + fn minimum_balance() -> Self::Balance { + Self::existential_deposit() + } + + fn free_balance(who: &T::AccountId) -> Self::Balance { + >::get(who) + } + + fn ensure_can_withdraw( + who: &T::AccountId, + _amount: T::Balance, + reason: WithdrawReason, + new_balance: T::Balance, + ) -> Result { + match reason { + WithdrawReason::Reserve | WithdrawReason::Transfer + if Self::vesting_balance(who) > new_balance => + { + return Err("vesting balance too high to send value"); + } + _ => {} + } + let locks = Self::locks(who); + if locks.is_empty() { + return Ok(()); + } + let now = >::block_number(); + if Self::locks(who) + .into_iter() + .all(|l| now >= l.until || new_balance >= l.amount || !l.reasons.contains(reason)) + { + Ok(()) + } else { + Err("account liquidity restrictions prevent withdrawal") + } + } + + fn transfer(transactor: &T::AccountId, dest: &T::AccountId, value: Self::Balance) -> Result { + let from_balance = Self::free_balance(transactor); + let to_balance = Self::free_balance(dest); + let would_create = to_balance.is_zero(); + let fee = if would_create { + Self::creation_fee() + } else { + Self::transfer_fee() + }; + let liability = match value.checked_add(&fee) { + Some(l) => l, + None => return Err("got overflow after adding a fee to value"), + }; + + let new_from_balance = match from_balance.checked_sub(&liability) { + None => return Err("balance too low to send value"), + Some(b) => b, + }; + if would_create && value < Self::existential_deposit() { + return Err("value too low to create account"); + } + Self::ensure_can_withdraw( + transactor, + value, + WithdrawReason::Transfer, + new_from_balance, + )?; + + // NOTE: total stake being stored in the same type means that this could never overflow + // but better to be safe than sorry. + let new_to_balance = match to_balance.checked_add(&value) { + Some(b) => b, + None => return Err("destination balance too high to receive value"), + }; + + if transactor != dest { + Self::set_free_balance(transactor, new_from_balance); + if !>::exists(dest) { + Self::new_account(dest, new_to_balance); + } + Self::set_free_balance(dest, new_to_balance); + T::TransferPayment::on_unbalanced(NegativeImbalance(fee)); + Self::deposit_event(RawEvent::Transfer( + transactor.clone(), + dest.clone(), + value, + fee, + )); + } + + Ok(()) + } + + fn withdraw( + who: &T::AccountId, + value: Self::Balance, + reason: WithdrawReason, + liveness: ExistenceRequirement, + ) -> result::Result { + if let Some(new_balance) = Self::free_balance(who).checked_sub(&value) { + if liveness == ExistenceRequirement::KeepAlive + && new_balance < Self::existential_deposit() + { + return Err("payment would kill account"); + } + Self::ensure_can_withdraw(who, value, reason, new_balance)?; + Self::set_free_balance(who, new_balance); + Ok(NegativeImbalance(value)) + } else { + Err("too few free funds in account") + } + } + + fn slash(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) { + let free_balance = Self::free_balance(who); + let free_slash = cmp::min(free_balance, value); + Self::set_free_balance(who, free_balance - free_slash); + let remaining_slash = value - free_slash; + // NOTE: `slash()` prefers free balance, but assumes that reserve balance can be drawn + // from in extreme circumstances. `can_slash()` should be used prior to `slash()` is avoid having + // to draw from reserved funds, however we err on the side of punishment if things are inconsistent + // or `can_slash` wasn't used appropriately. + if !remaining_slash.is_zero() { + let reserved_balance = Self::reserved_balance(who); + let reserved_slash = cmp::min(reserved_balance, remaining_slash); + Self::set_reserved_balance(who, reserved_balance - reserved_slash); + ( + NegativeImbalance(free_slash + reserved_slash), + remaining_slash - reserved_slash, + ) + } else { + (NegativeImbalance(value), Zero::zero()) + } + } + + fn deposit_into_existing( + who: &T::AccountId, + value: Self::Balance, + ) -> result::Result { + if Self::total_balance(who).is_zero() { + return Err("beneficiary account must pre-exist"); + } + Self::set_free_balance(who, Self::free_balance(who) + value); + Ok(PositiveImbalance(value)) + } + + fn deposit_creating(who: &T::AccountId, value: Self::Balance) -> Self::PositiveImbalance { + let (imbalance, _) = Self::make_free_balance_be(who, Self::free_balance(who) + value); + if let SignedImbalance::Positive(p) = imbalance { + p + } else { + // Impossible, but be defensive. + Self::PositiveImbalance::zero() + } + } + + fn make_free_balance_be( + who: &T::AccountId, + balance: T::Balance, + ) -> ( + SignedImbalance, + UpdateBalanceOutcome, + ) { + let original = Self::free_balance(who); + if balance < Self::existential_deposit() && original.is_zero() { + // If we're attempting to set an existing account to less than ED, then + // bypass the entire operation. It's a no-op if you follow it through, but + // since this is an instance where we might account for a negative imbalance + // (in the dust cleaner of set_free_balance) before we account for its actual + // equal and opposite cause (returned as an Imbalance), then in the + // instance that there's no other accounts on the system at all, we might + // underflow the issuance and our arithmetic will be off. + return ( + SignedImbalance::Positive(Self::PositiveImbalance::zero()), + UpdateBalanceOutcome::AccountKilled, + ); + } + let imbalance = if original <= balance { + SignedImbalance::Positive(PositiveImbalance(balance - original)) + } else { + SignedImbalance::Negative(NegativeImbalance(original - balance)) + }; + // If the balance is too low, then the account is reaped. + // NOTE: There are two balances for every account: `reserved_balance` and + // `free_balance`. This contract subsystem only cares about the latter: whenever + // the term "balance" is used *here* it should be assumed to mean "free balance" + // in the rest of the module. + // Free balance can never be less than ED. If that happens, it gets reduced to zero + // and the account information relevant to this subsystem is deleted (i.e. the + // account is reaped). + let outcome = if balance < >::existential_deposit() { + Self::set_free_balance(who, balance); + UpdateBalanceOutcome::AccountKilled + } else { + if !>::exists(who) { + Self::new_account(&who, balance); + } + Self::set_free_balance(who, balance); + UpdateBalanceOutcome::Updated + }; + (imbalance, outcome) + } } impl, I: Instance> ReservableCurrency for Module where - T::Balance: MaybeSerializeDebug + T::Balance: MaybeSerializeDebug, { - fn can_reserve(who: &T::AccountId, value: Self::Balance) -> bool { - Self::free_balance(who) - .checked_sub(&value) - .map_or(false, |new_balance| - Self::ensure_can_withdraw(who, value, WithdrawReason::Reserve, new_balance).is_ok() - ) - } - - fn reserved_balance(who: &T::AccountId) -> Self::Balance { - >::get(who) - } - - fn reserve(who: &T::AccountId, value: Self::Balance) -> result::Result<(), &'static str> { - let b = Self::free_balance(who); - if b < value { - return Err("not enough free funds") - } - let new_balance = b - value; - Self::ensure_can_withdraw(who, value, WithdrawReason::Reserve, new_balance)?; - Self::set_reserved_balance(who, Self::reserved_balance(who) + value); - Self::set_free_balance(who, new_balance); - Ok(()) - } - - fn unreserve(who: &T::AccountId, value: Self::Balance) -> Self::Balance { - let b = Self::reserved_balance(who); - let actual = cmp::min(b, value); - Self::set_free_balance(who, Self::free_balance(who) + actual); - Self::set_reserved_balance(who, b - actual); - value - actual - } - - fn slash_reserved( - who: &T::AccountId, - value: Self::Balance - ) -> (Self::NegativeImbalance, Self::Balance) { - let b = Self::reserved_balance(who); - let slash = cmp::min(b, value); - // underflow should never happen, but it if does, there's nothing to be done here. - Self::set_reserved_balance(who, b - slash); - (NegativeImbalance(slash), value - slash) - } - - fn repatriate_reserved( - slashed: &T::AccountId, - beneficiary: &T::AccountId, - value: Self::Balance, - ) -> result::Result { - if Self::total_balance(beneficiary).is_zero() { - return Err("beneficiary account must pre-exist"); - } - let b = Self::reserved_balance(slashed); - let slash = cmp::min(b, value); - Self::set_free_balance(beneficiary, Self::free_balance(beneficiary) + slash); - Self::set_reserved_balance(slashed, b - slash); - Ok(value - slash) - } + fn can_reserve(who: &T::AccountId, value: Self::Balance) -> bool { + Self::free_balance(who) + .checked_sub(&value) + .map_or(false, |new_balance| { + Self::ensure_can_withdraw(who, value, WithdrawReason::Reserve, new_balance).is_ok() + }) + } + + fn reserved_balance(who: &T::AccountId) -> Self::Balance { + >::get(who) + } + + fn reserve(who: &T::AccountId, value: Self::Balance) -> result::Result<(), &'static str> { + let b = Self::free_balance(who); + if b < value { + return Err("not enough free funds"); + } + let new_balance = b - value; + Self::ensure_can_withdraw(who, value, WithdrawReason::Reserve, new_balance)?; + Self::set_reserved_balance(who, Self::reserved_balance(who) + value); + Self::set_free_balance(who, new_balance); + Ok(()) + } + + fn unreserve(who: &T::AccountId, value: Self::Balance) -> Self::Balance { + let b = Self::reserved_balance(who); + let actual = cmp::min(b, value); + Self::set_free_balance(who, Self::free_balance(who) + actual); + Self::set_reserved_balance(who, b - actual); + value - actual + } + + fn slash_reserved( + who: &T::AccountId, + value: Self::Balance, + ) -> (Self::NegativeImbalance, Self::Balance) { + let b = Self::reserved_balance(who); + let slash = cmp::min(b, value); + // underflow should never happen, but it if does, there's nothing to be done here. + Self::set_reserved_balance(who, b - slash); + (NegativeImbalance(slash), value - slash) + } + + fn repatriate_reserved( + slashed: &T::AccountId, + beneficiary: &T::AccountId, + value: Self::Balance, + ) -> result::Result { + if Self::total_balance(beneficiary).is_zero() { + return Err("beneficiary account must pre-exist"); + } + let b = Self::reserved_balance(slashed); + let slash = cmp::min(b, value); + Self::set_free_balance(beneficiary, Self::free_balance(beneficiary) + slash); + Self::set_reserved_balance(slashed, b - slash); + Ok(value - slash) + } } impl, I: Instance> LockableCurrency for Module where - T::Balance: MaybeSerializeDebug + T::Balance: MaybeSerializeDebug, { - type Moment = T::BlockNumber; - - fn set_lock( - id: LockIdentifier, - who: &T::AccountId, - amount: T::Balance, - until: T::BlockNumber, - reasons: WithdrawReasons, - ) { - let now = >::block_number(); - let mut new_lock = Some(BalanceLock { id, amount, until, reasons }); - let mut locks = Self::locks(who).into_iter().filter_map(|l| - if l.id == id { - new_lock.take() - } else if l.until > now { - Some(l) - } else { - None - }).collect::>(); - if let Some(lock) = new_lock { - locks.push(lock) - } - >::insert(who, locks); - } - - fn extend_lock( - id: LockIdentifier, - who: &T::AccountId, - amount: T::Balance, - until: T::BlockNumber, - reasons: WithdrawReasons, - ) { - let now = >::block_number(); - let mut new_lock = Some(BalanceLock { id, amount, until, reasons }); - let mut locks = Self::locks(who).into_iter().filter_map(|l| - if l.id == id { - new_lock.take().map(|nl| { - BalanceLock { - id: l.id, - amount: l.amount.max(nl.amount), - until: l.until.max(nl.until), - reasons: l.reasons | nl.reasons, - } - }) - } else if l.until > now { - Some(l) - } else { - None - }).collect::>(); - if let Some(lock) = new_lock { - locks.push(lock) - } - >::insert(who, locks); - } - - fn remove_lock( - id: LockIdentifier, - who: &T::AccountId, - ) { - let now = >::block_number(); - let locks = Self::locks(who).into_iter().filter_map(|l| - if l.until > now && l.id != id { - Some(l) - } else { - None - }).collect::>(); - >::insert(who, locks); - } + type Moment = T::BlockNumber; + + fn set_lock( + id: LockIdentifier, + who: &T::AccountId, + amount: T::Balance, + until: T::BlockNumber, + reasons: WithdrawReasons, + ) { + let now = >::block_number(); + let mut new_lock = Some(BalanceLock { + id, + amount, + until, + reasons, + }); + let mut locks = Self::locks(who) + .into_iter() + .filter_map(|l| { + if l.id == id { + new_lock.take() + } else if l.until > now { + Some(l) + } else { + None + } + }) + .collect::>(); + if let Some(lock) = new_lock { + locks.push(lock) + } + >::insert(who, locks); + } + + fn extend_lock( + id: LockIdentifier, + who: &T::AccountId, + amount: T::Balance, + until: T::BlockNumber, + reasons: WithdrawReasons, + ) { + let now = >::block_number(); + let mut new_lock = Some(BalanceLock { + id, + amount, + until, + reasons, + }); + let mut locks = Self::locks(who) + .into_iter() + .filter_map(|l| { + if l.id == id { + new_lock.take().map(|nl| BalanceLock { + id: l.id, + amount: l.amount.max(nl.amount), + until: l.until.max(nl.until), + reasons: l.reasons | nl.reasons, + }) + } else if l.until > now { + Some(l) + } else { + None + } + }) + .collect::>(); + if let Some(lock) = new_lock { + locks.push(lock) + } + >::insert(who, locks); + } + + fn remove_lock(id: LockIdentifier, who: &T::AccountId) { + let now = >::block_number(); + let locks = Self::locks(who) + .into_iter() + .filter_map(|l| { + if l.until > now && l.id != id { + Some(l) + } else { + None + } + }) + .collect::>(); + >::insert(who, locks); + } } impl, I: Instance> MakePayment for Module { - fn make_payment(transactor: &T::AccountId, encoded_len: usize) -> Result { - let encoded_len = >::sa(encoded_len as u64); - let transaction_fee = Self::transaction_base_fee() + Self::transaction_byte_fee() * encoded_len; - let imbalance = Self::withdraw( - transactor, - transaction_fee, - WithdrawReason::TransactionPayment, - ExistenceRequirement::KeepAlive - )?; - T::TransactionPayment::on_unbalanced(imbalance); - Ok(()) - } + fn make_payment(transactor: &T::AccountId, encoded_len: usize) -> Result { + let encoded_len = >::sa(encoded_len as u64); + let transaction_fee = + Self::transaction_base_fee() + Self::transaction_byte_fee() * encoded_len; + let imbalance = Self::withdraw( + transactor, + transaction_fee, + WithdrawReason::TransactionPayment, + ExistenceRequirement::KeepAlive, + )?; + T::TransactionPayment::on_unbalanced(imbalance); + Ok(()) + } } impl, I: Instance> IsDeadAccount for Module where - T::Balance: MaybeSerializeDebug + T::Balance: MaybeSerializeDebug, { - fn is_dead_account(who: &T::AccountId) -> bool { - Self::total_balance(who).is_zero() - } + fn is_dead_account(who: &T::AccountId) -> bool { + Self::total_balance(who).is_zero() + } } - diff --git a/srml/balances/src/mock.rs b/srml/balances/src/mock.rs index db20efc475..022db1fe74 100644 --- a/srml/balances/src/mock.rs +++ b/srml/balances/src/mock.rs @@ -18,116 +18,132 @@ #![cfg(test)] +use crate::{GenesisConfig, Module, Trait}; use primitives::BuildStorage; -use primitives::{traits::{IdentityLookup}, testing::{Digest, DigestItem, Header}}; -use substrate_primitives::{H256, Blake2Hasher}; +use primitives::{ + testing::{Digest, DigestItem, Header}, + traits::IdentityLookup, +}; use runtime_io; use srml_support::impl_outer_origin; -use crate::{GenesisConfig, Module, Trait}; +use substrate_primitives::{Blake2Hasher, H256}; -impl_outer_origin!{ - pub enum Origin for Runtime {} +impl_outer_origin! { + pub enum Origin for Runtime {} } // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. #[derive(Clone, PartialEq, Eq, Debug)] pub struct Runtime; impl system::Trait for Runtime { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = ::primitives::traits::BlakeTwo256; - type Digest = Digest; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type Log = DigestItem; + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = ::primitives::traits::BlakeTwo256; + type Digest = Digest; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type Log = DigestItem; } impl Trait for Runtime { - type Balance = u64; - type OnFreeBalanceZero = (); - type OnNewAccount = (); - type Event = (); - type TransactionPayment = (); - type DustRemoval = (); - type TransferPayment = (); + type Balance = u64; + type OnFreeBalanceZero = (); + type OnNewAccount = (); + type Event = (); + type TransactionPayment = (); + type DustRemoval = (); + type TransferPayment = (); } pub struct ExtBuilder { - transaction_base_fee: u64, - transaction_byte_fee: u64, - existential_deposit: u64, - transfer_fee: u64, - creation_fee: u64, - monied: bool, - vesting: bool, + transaction_base_fee: u64, + transaction_byte_fee: u64, + existential_deposit: u64, + transfer_fee: u64, + creation_fee: u64, + monied: bool, + vesting: bool, } impl Default for ExtBuilder { - fn default() -> Self { - Self { - transaction_base_fee: 0, - transaction_byte_fee: 0, - existential_deposit: 0, - transfer_fee: 0, - creation_fee: 0, - monied: false, - vesting: false, - } - } + fn default() -> Self { + Self { + transaction_base_fee: 0, + transaction_byte_fee: 0, + existential_deposit: 0, + transfer_fee: 0, + creation_fee: 0, + monied: false, + vesting: false, + } + } } impl ExtBuilder { - pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { - self.existential_deposit = existential_deposit; - self - } - #[allow(dead_code)] - pub fn transfer_fee(mut self, transfer_fee: u64) -> Self { - self.transfer_fee = transfer_fee; - self - } - pub fn creation_fee(mut self, creation_fee: u64) -> Self { - self.creation_fee = creation_fee; - self - } - pub fn transaction_fees(mut self, base_fee: u64, byte_fee: u64) -> Self { - self.transaction_base_fee = base_fee; - self.transaction_byte_fee = byte_fee; - self - } - pub fn monied(mut self, monied: bool) -> Self { - self.monied = monied; - if self.existential_deposit == 0 { - self.existential_deposit = 1; - } - self - } - pub fn vesting(mut self, vesting: bool) -> Self { - self.vesting = vesting; - self - } - pub fn build(self) -> runtime_io::TestExternalities { - let mut t = system::GenesisConfig::::default().build_storage().unwrap().0; - t.extend(GenesisConfig:: { - transaction_base_fee: self.transaction_base_fee, - transaction_byte_fee: self.transaction_byte_fee, - balances: if self.monied { - vec![(1, 10 * self.existential_deposit), (2, 20 * self.existential_deposit), (3, 30 * self.existential_deposit), (4, 40 * self.existential_deposit)] - } else { - vec![] - }, - existential_deposit: self.existential_deposit, - transfer_fee: self.transfer_fee, - creation_fee: self.creation_fee, - vesting: if self.vesting && self.monied { - vec![(1, 0, 10), (2, 10, 20)] - } else { - vec![] - }, - }.build_storage().unwrap().0); - t.into() - } + pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { + self.existential_deposit = existential_deposit; + self + } + #[allow(dead_code)] + pub fn transfer_fee(mut self, transfer_fee: u64) -> Self { + self.transfer_fee = transfer_fee; + self + } + pub fn creation_fee(mut self, creation_fee: u64) -> Self { + self.creation_fee = creation_fee; + self + } + pub fn transaction_fees(mut self, base_fee: u64, byte_fee: u64) -> Self { + self.transaction_base_fee = base_fee; + self.transaction_byte_fee = byte_fee; + self + } + pub fn monied(mut self, monied: bool) -> Self { + self.monied = monied; + if self.existential_deposit == 0 { + self.existential_deposit = 1; + } + self + } + pub fn vesting(mut self, vesting: bool) -> Self { + self.vesting = vesting; + self + } + pub fn build(self) -> runtime_io::TestExternalities { + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0; + t.extend( + GenesisConfig:: { + transaction_base_fee: self.transaction_base_fee, + transaction_byte_fee: self.transaction_byte_fee, + balances: if self.monied { + vec![ + (1, 10 * self.existential_deposit), + (2, 20 * self.existential_deposit), + (3, 30 * self.existential_deposit), + (4, 40 * self.existential_deposit), + ] + } else { + vec![] + }, + existential_deposit: self.existential_deposit, + transfer_fee: self.transfer_fee, + creation_fee: self.creation_fee, + vesting: if self.vesting && self.monied { + vec![(1, 0, 10), (2, 10, 20)] + } else { + vec![] + }, + } + .build_storage() + .unwrap() + .0, + ); + t.into() + } } pub type System = system::Module; diff --git a/srml/balances/src/tests.rs b/srml/balances/src/tests.rs index 89491fe5f8..56c4c90c9a 100644 --- a/srml/balances/src/tests.rs +++ b/srml/balances/src/tests.rs @@ -22,9 +22,11 @@ use super::*; use mock::{Balances, ExtBuilder, Runtime, System}; use runtime_io::with_externalities; use srml_support::{ - assert_noop, assert_ok, assert_err, - traits::{LockableCurrency, LockIdentifier, WithdrawReason, WithdrawReasons, - Currency, MakePayment, ReservableCurrency} + assert_err, assert_noop, assert_ok, + traits::{ + Currency, LockIdentifier, LockableCurrency, MakePayment, ReservableCurrency, + WithdrawReason, WithdrawReasons, + }, }; const ID_1: LockIdentifier = *b"1 "; @@ -33,588 +35,727 @@ const ID_3: LockIdentifier = *b"3 "; #[test] fn basic_locking_should_work() { - with_externalities(&mut ExtBuilder::default().existential_deposit(1).monied(true).build(), || { - assert_eq!(Balances::free_balance(&1), 10); - Balances::set_lock(ID_1, &1, 9, u64::max_value(), WithdrawReasons::all()); - assert_noop!(>::transfer(&1, &2, 5), "account liquidity restrictions prevent withdrawal"); - }); + with_externalities( + &mut ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build(), + || { + assert_eq!(Balances::free_balance(&1), 10); + Balances::set_lock(ID_1, &1, 9, u64::max_value(), WithdrawReasons::all()); + assert_noop!( + >::transfer(&1, &2, 5), + "account liquidity restrictions prevent withdrawal" + ); + }, + ); } #[test] fn partial_locking_should_work() { - with_externalities(&mut ExtBuilder::default().existential_deposit(1).monied(true).build(), || { - Balances::set_lock(ID_1, &1, 5, u64::max_value(), WithdrawReasons::all()); - assert_ok!(>::transfer(&1, &2, 1)); - }); + with_externalities( + &mut ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build(), + || { + Balances::set_lock(ID_1, &1, 5, u64::max_value(), WithdrawReasons::all()); + assert_ok!(>::transfer(&1, &2, 1)); + }, + ); } #[test] fn lock_removal_should_work() { - with_externalities(&mut ExtBuilder::default().existential_deposit(1).monied(true).build(), || { - Balances::set_lock(ID_1, &1, u64::max_value(), u64::max_value(), WithdrawReasons::all()); - Balances::remove_lock(ID_1, &1); - assert_ok!(>::transfer(&1, &2, 1)); - }); + with_externalities( + &mut ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build(), + || { + Balances::set_lock( + ID_1, + &1, + u64::max_value(), + u64::max_value(), + WithdrawReasons::all(), + ); + Balances::remove_lock(ID_1, &1); + assert_ok!(>::transfer(&1, &2, 1)); + }, + ); } #[test] fn lock_replacement_should_work() { - with_externalities(&mut ExtBuilder::default().existential_deposit(1).monied(true).build(), || { - Balances::set_lock(ID_1, &1, u64::max_value(), u64::max_value(), WithdrawReasons::all()); - Balances::set_lock(ID_1, &1, 5, u64::max_value(), WithdrawReasons::all()); - assert_ok!(>::transfer(&1, &2, 1)); - }); + with_externalities( + &mut ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build(), + || { + Balances::set_lock( + ID_1, + &1, + u64::max_value(), + u64::max_value(), + WithdrawReasons::all(), + ); + Balances::set_lock(ID_1, &1, 5, u64::max_value(), WithdrawReasons::all()); + assert_ok!(>::transfer(&1, &2, 1)); + }, + ); } #[test] fn double_locking_should_work() { - with_externalities(&mut ExtBuilder::default().existential_deposit(1).monied(true).build(), || { - Balances::set_lock(ID_1, &1, 5, u64::max_value(), WithdrawReasons::all()); - Balances::set_lock(ID_2, &1, 5, u64::max_value(), WithdrawReasons::all()); - assert_ok!(>::transfer(&1, &2, 1)); - }); + with_externalities( + &mut ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build(), + || { + Balances::set_lock(ID_1, &1, 5, u64::max_value(), WithdrawReasons::all()); + Balances::set_lock(ID_2, &1, 5, u64::max_value(), WithdrawReasons::all()); + assert_ok!(>::transfer(&1, &2, 1)); + }, + ); } #[test] fn combination_locking_should_work() { - with_externalities(&mut ExtBuilder::default().existential_deposit(1).monied(true).build(), || { - Balances::set_lock(ID_1, &1, u64::max_value(), 0, WithdrawReasons::none()); - Balances::set_lock(ID_2, &1, 0, u64::max_value(), WithdrawReasons::none()); - Balances::set_lock(ID_3, &1, 0, 0, WithdrawReasons::all()); - assert_ok!(>::transfer(&1, &2, 1)); - }); + with_externalities( + &mut ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build(), + || { + Balances::set_lock(ID_1, &1, u64::max_value(), 0, WithdrawReasons::none()); + Balances::set_lock(ID_2, &1, 0, u64::max_value(), WithdrawReasons::none()); + Balances::set_lock(ID_3, &1, 0, 0, WithdrawReasons::all()); + assert_ok!(>::transfer(&1, &2, 1)); + }, + ); } #[test] fn lock_value_extension_should_work() { - with_externalities(&mut ExtBuilder::default().existential_deposit(1).monied(true).build(), || { - Balances::set_lock(ID_1, &1, 5, u64::max_value(), WithdrawReasons::all()); - assert_noop!(>::transfer(&1, &2, 6), "account liquidity restrictions prevent withdrawal"); - Balances::extend_lock(ID_1, &1, 2, u64::max_value(), WithdrawReasons::all()); - assert_noop!(>::transfer(&1, &2, 6), "account liquidity restrictions prevent withdrawal"); - Balances::extend_lock(ID_1, &1, 8, u64::max_value(), WithdrawReasons::all()); - assert_noop!(>::transfer(&1, &2, 3), "account liquidity restrictions prevent withdrawal"); - }); + with_externalities( + &mut ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build(), + || { + Balances::set_lock(ID_1, &1, 5, u64::max_value(), WithdrawReasons::all()); + assert_noop!( + >::transfer(&1, &2, 6), + "account liquidity restrictions prevent withdrawal" + ); + Balances::extend_lock(ID_1, &1, 2, u64::max_value(), WithdrawReasons::all()); + assert_noop!( + >::transfer(&1, &2, 6), + "account liquidity restrictions prevent withdrawal" + ); + Balances::extend_lock(ID_1, &1, 8, u64::max_value(), WithdrawReasons::all()); + assert_noop!( + >::transfer(&1, &2, 3), + "account liquidity restrictions prevent withdrawal" + ); + }, + ); } #[test] fn lock_reasons_should_work() { - with_externalities(&mut ExtBuilder::default().existential_deposit(1).monied(true).transaction_fees(0, 1).build(), || { - Balances::set_lock(ID_1, &1, 10, u64::max_value(), WithdrawReason::Transfer.into()); - assert_noop!(>::transfer(&1, &2, 1), "account liquidity restrictions prevent withdrawal"); - assert_ok!(>::reserve(&1, 1)); - assert_ok!(>::make_payment(&1, 1)); - - Balances::set_lock(ID_1, &1, 10, u64::max_value(), WithdrawReason::Reserve.into()); - assert_ok!(>::transfer(&1, &2, 1)); - assert_noop!(>::reserve(&1, 1), "account liquidity restrictions prevent withdrawal"); - assert_ok!(>::make_payment(&1, 1)); - - Balances::set_lock(ID_1, &1, 10, u64::max_value(), WithdrawReason::TransactionPayment.into()); - assert_ok!(>::transfer(&1, &2, 1)); - assert_ok!(>::reserve(&1, 1)); - assert_noop!(>::make_payment(&1, 1), "account liquidity restrictions prevent withdrawal"); - }); + with_externalities( + &mut ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .transaction_fees(0, 1) + .build(), + || { + Balances::set_lock( + ID_1, + &1, + 10, + u64::max_value(), + WithdrawReason::Transfer.into(), + ); + assert_noop!( + >::transfer(&1, &2, 1), + "account liquidity restrictions prevent withdrawal" + ); + assert_ok!(>::reserve(&1, 1)); + assert_ok!(>::make_payment(&1, 1)); + + Balances::set_lock( + ID_1, + &1, + 10, + u64::max_value(), + WithdrawReason::Reserve.into(), + ); + assert_ok!(>::transfer(&1, &2, 1)); + assert_noop!( + >::reserve(&1, 1), + "account liquidity restrictions prevent withdrawal" + ); + assert_ok!(>::make_payment(&1, 1)); + + Balances::set_lock( + ID_1, + &1, + 10, + u64::max_value(), + WithdrawReason::TransactionPayment.into(), + ); + assert_ok!(>::transfer(&1, &2, 1)); + assert_ok!(>::reserve(&1, 1)); + assert_noop!( + >::make_payment(&1, 1), + "account liquidity restrictions prevent withdrawal" + ); + }, + ); } #[test] fn lock_block_number_should_work() { - with_externalities(&mut ExtBuilder::default().existential_deposit(1).monied(true).build(), || { - Balances::set_lock(ID_1, &1, 10, 2, WithdrawReasons::all()); - assert_noop!(>::transfer(&1, &2, 1), "account liquidity restrictions prevent withdrawal"); + with_externalities( + &mut ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build(), + || { + Balances::set_lock(ID_1, &1, 10, 2, WithdrawReasons::all()); + assert_noop!( + >::transfer(&1, &2, 1), + "account liquidity restrictions prevent withdrawal" + ); - System::set_block_number(2); - assert_ok!(>::transfer(&1, &2, 1)); - }); + System::set_block_number(2); + assert_ok!(>::transfer(&1, &2, 1)); + }, + ); } #[test] fn lock_block_number_extension_should_work() { - with_externalities(&mut ExtBuilder::default().existential_deposit(1).monied(true).build(), || { - Balances::set_lock(ID_1, &1, 10, 2, WithdrawReasons::all()); - assert_noop!(>::transfer(&1, &2, 6), "account liquidity restrictions prevent withdrawal"); - Balances::extend_lock(ID_1, &1, 10, 1, WithdrawReasons::all()); - assert_noop!(>::transfer(&1, &2, 6), "account liquidity restrictions prevent withdrawal"); - System::set_block_number(2); - Balances::extend_lock(ID_1, &1, 10, 8, WithdrawReasons::all()); - assert_noop!(>::transfer(&1, &2, 3), "account liquidity restrictions prevent withdrawal"); - }); + with_externalities( + &mut ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build(), + || { + Balances::set_lock(ID_1, &1, 10, 2, WithdrawReasons::all()); + assert_noop!( + >::transfer(&1, &2, 6), + "account liquidity restrictions prevent withdrawal" + ); + Balances::extend_lock(ID_1, &1, 10, 1, WithdrawReasons::all()); + assert_noop!( + >::transfer(&1, &2, 6), + "account liquidity restrictions prevent withdrawal" + ); + System::set_block_number(2); + Balances::extend_lock(ID_1, &1, 10, 8, WithdrawReasons::all()); + assert_noop!( + >::transfer(&1, &2, 3), + "account liquidity restrictions prevent withdrawal" + ); + }, + ); } #[test] fn lock_reasons_extension_should_work() { - with_externalities(&mut ExtBuilder::default().existential_deposit(1).monied(true).build(), || { - Balances::set_lock(ID_1, &1, 10, 10, WithdrawReason::Transfer.into()); - assert_noop!(>::transfer(&1, &2, 6), "account liquidity restrictions prevent withdrawal"); - Balances::extend_lock(ID_1, &1, 10, 10, WithdrawReasons::none()); - assert_noop!(>::transfer(&1, &2, 6), "account liquidity restrictions prevent withdrawal"); - Balances::extend_lock(ID_1, &1, 10, 10, WithdrawReason::Reserve.into()); - assert_noop!(>::transfer(&1, &2, 6), "account liquidity restrictions prevent withdrawal"); - }); + with_externalities( + &mut ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build(), + || { + Balances::set_lock(ID_1, &1, 10, 10, WithdrawReason::Transfer.into()); + assert_noop!( + >::transfer(&1, &2, 6), + "account liquidity restrictions prevent withdrawal" + ); + Balances::extend_lock(ID_1, &1, 10, 10, WithdrawReasons::none()); + assert_noop!( + >::transfer(&1, &2, 6), + "account liquidity restrictions prevent withdrawal" + ); + Balances::extend_lock(ID_1, &1, 10, 10, WithdrawReason::Reserve.into()); + assert_noop!( + >::transfer(&1, &2, 6), + "account liquidity restrictions prevent withdrawal" + ); + }, + ); } #[test] fn default_indexing_on_new_accounts_should_not_work2() { - with_externalities( - &mut ExtBuilder::default() - .existential_deposit(10) - .creation_fee(50) - .monied(true) - .build(), - || { - assert_eq!(Balances::is_dead_account(&5), true); // account 5 should not exist - // account 1 has 256 * 10 = 2560, account 5 is not exist, ext_deposit is 10, value is 9, not satisfies for ext_deposit - assert_noop!( - Balances::transfer(Some(1).into(), 5, 9), - "value too low to create account" - ); - assert_eq!(Balances::is_dead_account(&5), true); // account 5 should not exist - assert_eq!(Balances::free_balance(&1), 100); - }, - ); + with_externalities( + &mut ExtBuilder::default() + .existential_deposit(10) + .creation_fee(50) + .monied(true) + .build(), + || { + assert_eq!(Balances::is_dead_account(&5), true); // account 5 should not exist + // account 1 has 256 * 10 = 2560, account 5 is not exist, ext_deposit is 10, value is 9, not satisfies for ext_deposit + assert_noop!( + Balances::transfer(Some(1).into(), 5, 9), + "value too low to create account" + ); + assert_eq!(Balances::is_dead_account(&5), true); // account 5 should not exist + assert_eq!(Balances::free_balance(&1), 100); + }, + ); } #[test] fn reserved_balance_should_prevent_reclaim_count() { - with_externalities( - &mut ExtBuilder::default() - .existential_deposit(256 * 1) - .monied(true) - .build(), - || { - System::inc_account_nonce(&2); - assert_eq!(Balances::is_dead_account(&2), false); - assert_eq!(Balances::is_dead_account(&5), true); - assert_eq!(Balances::total_balance(&2), 256 * 20); - - assert_ok!(Balances::reserve(&2, 256 * 19 + 1)); // account 2 becomes mostly reserved - assert_eq!(Balances::free_balance(&2), 0); // "free" account deleted." - assert_eq!(Balances::total_balance(&2), 256 * 19 + 1); // reserve still exists. - assert_eq!(Balances::is_dead_account(&2), false); - assert_eq!(System::account_nonce(&2), 1); - - assert_ok!(Balances::transfer(Some(4).into(), 5, 256 * 1 + 0x69)); // account 4 tries to take index 1 for account 5. - assert_eq!(Balances::total_balance(&5), 256 * 1 + 0x69); - assert_eq!(Balances::is_dead_account(&5), false); - - assert!(Balances::slash(&2, 256 * 18 + 2).1.is_zero()); // account 2 gets slashed - assert_eq!(Balances::total_balance(&2), 0); // "reserve" account reduced to 255 (below ED) so account deleted - assert_eq!(System::account_nonce(&2), 0); // nonce zero - assert_eq!(Balances::is_dead_account(&2), true); - - assert_ok!(Balances::transfer(Some(4).into(), 6, 256 * 1 + 0x69)); // account 4 tries to take index 1 again for account 6. - assert_eq!(Balances::total_balance(&6), 256 * 1 + 0x69); - assert_eq!(Balances::is_dead_account(&6), false); - }, - ); + with_externalities( + &mut ExtBuilder::default() + .existential_deposit(256 * 1) + .monied(true) + .build(), + || { + System::inc_account_nonce(&2); + assert_eq!(Balances::is_dead_account(&2), false); + assert_eq!(Balances::is_dead_account(&5), true); + assert_eq!(Balances::total_balance(&2), 256 * 20); + + assert_ok!(Balances::reserve(&2, 256 * 19 + 1)); // account 2 becomes mostly reserved + assert_eq!(Balances::free_balance(&2), 0); // "free" account deleted." + assert_eq!(Balances::total_balance(&2), 256 * 19 + 1); // reserve still exists. + assert_eq!(Balances::is_dead_account(&2), false); + assert_eq!(System::account_nonce(&2), 1); + + assert_ok!(Balances::transfer(Some(4).into(), 5, 256 * 1 + 0x69)); // account 4 tries to take index 1 for account 5. + assert_eq!(Balances::total_balance(&5), 256 * 1 + 0x69); + assert_eq!(Balances::is_dead_account(&5), false); + + assert!(Balances::slash(&2, 256 * 18 + 2).1.is_zero()); // account 2 gets slashed + assert_eq!(Balances::total_balance(&2), 0); // "reserve" account reduced to 255 (below ED) so account deleted + assert_eq!(System::account_nonce(&2), 0); // nonce zero + assert_eq!(Balances::is_dead_account(&2), true); + + assert_ok!(Balances::transfer(Some(4).into(), 6, 256 * 1 + 0x69)); // account 4 tries to take index 1 again for account 6. + assert_eq!(Balances::total_balance(&6), 256 * 1 + 0x69); + assert_eq!(Balances::is_dead_account(&6), false); + }, + ); } - #[test] fn reward_should_work() { - with_externalities(&mut ExtBuilder::default().monied(true).build(), || { - assert_eq!(Balances::total_balance(&1), 10); - assert_ok!(Balances::deposit_into_existing(&1, 10).map(drop)); - assert_eq!(Balances::total_balance(&1), 20); - assert_eq!(>::get(), 110); - }); + with_externalities(&mut ExtBuilder::default().monied(true).build(), || { + assert_eq!(Balances::total_balance(&1), 10); + assert_ok!(Balances::deposit_into_existing(&1, 10).map(drop)); + assert_eq!(Balances::total_balance(&1), 20); + assert_eq!(>::get(), 110); + }); } #[test] fn dust_account_removal_should_work() { - with_externalities( - &mut ExtBuilder::default() - .existential_deposit(100) - .monied(true) - .build(), - || { - System::inc_account_nonce(&2); - assert_eq!(System::account_nonce(&2), 1); - assert_eq!(Balances::total_balance(&2), 2000); + with_externalities( + &mut ExtBuilder::default() + .existential_deposit(100) + .monied(true) + .build(), + || { + System::inc_account_nonce(&2); + assert_eq!(System::account_nonce(&2), 1); + assert_eq!(Balances::total_balance(&2), 2000); - assert_ok!(Balances::transfer(Some(2).into(), 5, 1901)); // index 1 (account 2) becomes zombie - assert_eq!(Balances::total_balance(&2), 0); - assert_eq!(Balances::total_balance(&5), 1901); - assert_eq!(System::account_nonce(&2), 0); - }, - ); + assert_ok!(Balances::transfer(Some(2).into(), 5, 1901)); // index 1 (account 2) becomes zombie + assert_eq!(Balances::total_balance(&2), 0); + assert_eq!(Balances::total_balance(&5), 1901); + assert_eq!(System::account_nonce(&2), 0); + }, + ); } #[test] fn dust_account_removal_should_work2() { - with_externalities( - &mut ExtBuilder::default() - .existential_deposit(100) - .creation_fee(50) - .monied(true) - .build(), - || { - System::inc_account_nonce(&2); - assert_eq!(System::account_nonce(&2), 1); - assert_eq!(Balances::total_balance(&2), 2000); - assert_ok!(Balances::transfer(Some(2).into(), 5, 1851)); // index 1 (account 2) becomes zombie for 256*10 + 50(fee) < 256 * 10 (ext_deposit) - assert_eq!(Balances::total_balance(&2), 0); - assert_eq!(Balances::total_balance(&5), 1851); - assert_eq!(System::account_nonce(&2), 0); - }, - ); + with_externalities( + &mut ExtBuilder::default() + .existential_deposit(100) + .creation_fee(50) + .monied(true) + .build(), + || { + System::inc_account_nonce(&2); + assert_eq!(System::account_nonce(&2), 1); + assert_eq!(Balances::total_balance(&2), 2000); + assert_ok!(Balances::transfer(Some(2).into(), 5, 1851)); // index 1 (account 2) becomes zombie for 256*10 + 50(fee) < 256 * 10 (ext_deposit) + assert_eq!(Balances::total_balance(&2), 0); + assert_eq!(Balances::total_balance(&5), 1851); + assert_eq!(System::account_nonce(&2), 0); + }, + ); } #[test] fn balance_works() { - with_externalities(&mut ExtBuilder::default().build(), || { - let _ = Balances::deposit_creating(&1, 42); - assert_eq!(Balances::free_balance(&1), 42); - assert_eq!(Balances::reserved_balance(&1), 0); - assert_eq!(Balances::total_balance(&1), 42); - assert_eq!(Balances::free_balance(&2), 0); - assert_eq!(Balances::reserved_balance(&2), 0); - assert_eq!(Balances::total_balance(&2), 0); - }); + with_externalities(&mut ExtBuilder::default().build(), || { + let _ = Balances::deposit_creating(&1, 42); + assert_eq!(Balances::free_balance(&1), 42); + assert_eq!(Balances::reserved_balance(&1), 0); + assert_eq!(Balances::total_balance(&1), 42); + assert_eq!(Balances::free_balance(&2), 0); + assert_eq!(Balances::reserved_balance(&2), 0); + assert_eq!(Balances::total_balance(&2), 0); + }); } #[test] fn balance_transfer_works() { - with_externalities(&mut ExtBuilder::default().build(), || { - let _ = Balances::deposit_creating(&1, 111); - assert_ok!(Balances::transfer(Some(1).into(), 2, 69)); - assert_eq!(Balances::total_balance(&1), 42); - assert_eq!(Balances::total_balance(&2), 69); - }); + with_externalities(&mut ExtBuilder::default().build(), || { + let _ = Balances::deposit_creating(&1, 111); + assert_ok!(Balances::transfer(Some(1).into(), 2, 69)); + assert_eq!(Balances::total_balance(&1), 42); + assert_eq!(Balances::total_balance(&2), 69); + }); } #[test] fn reserving_balance_should_work() { - with_externalities(&mut ExtBuilder::default().build(), || { - let _ = Balances::deposit_creating(&1, 111); + with_externalities(&mut ExtBuilder::default().build(), || { + let _ = Balances::deposit_creating(&1, 111); - assert_eq!(Balances::total_balance(&1), 111); - assert_eq!(Balances::free_balance(&1), 111); - assert_eq!(Balances::reserved_balance(&1), 0); + assert_eq!(Balances::total_balance(&1), 111); + assert_eq!(Balances::free_balance(&1), 111); + assert_eq!(Balances::reserved_balance(&1), 0); - assert_ok!(Balances::reserve(&1, 69)); + assert_ok!(Balances::reserve(&1, 69)); - assert_eq!(Balances::total_balance(&1), 111); - assert_eq!(Balances::free_balance(&1), 42); - assert_eq!(Balances::reserved_balance(&1), 69); - }); + assert_eq!(Balances::total_balance(&1), 111); + assert_eq!(Balances::free_balance(&1), 42); + assert_eq!(Balances::reserved_balance(&1), 69); + }); } #[test] fn balance_transfer_when_reserved_should_not_work() { - with_externalities(&mut ExtBuilder::default().build(), || { - let _ = Balances::deposit_creating(&1, 111); - assert_ok!(Balances::reserve(&1, 69)); - assert_noop!(Balances::transfer(Some(1).into(), 2, 69), "balance too low to send value"); - }); + with_externalities(&mut ExtBuilder::default().build(), || { + let _ = Balances::deposit_creating(&1, 111); + assert_ok!(Balances::reserve(&1, 69)); + assert_noop!( + Balances::transfer(Some(1).into(), 2, 69), + "balance too low to send value" + ); + }); } #[test] fn deducting_balance_should_work() { - with_externalities(&mut ExtBuilder::default().build(), || { - let _ = Balances::deposit_creating(&1, 111); - assert_ok!(Balances::reserve(&1, 69)); - assert_eq!(Balances::free_balance(&1), 42); - }); + with_externalities(&mut ExtBuilder::default().build(), || { + let _ = Balances::deposit_creating(&1, 111); + assert_ok!(Balances::reserve(&1, 69)); + assert_eq!(Balances::free_balance(&1), 42); + }); } #[test] fn refunding_balance_should_work() { - with_externalities(&mut ExtBuilder::default().build(), || { - let _ = Balances::deposit_creating(&1, 42); - Balances::set_reserved_balance(&1, 69); - Balances::unreserve(&1, 69); - assert_eq!(Balances::free_balance(&1), 111); - assert_eq!(Balances::reserved_balance(&1), 0); - }); + with_externalities(&mut ExtBuilder::default().build(), || { + let _ = Balances::deposit_creating(&1, 42); + Balances::set_reserved_balance(&1, 69); + Balances::unreserve(&1, 69); + assert_eq!(Balances::free_balance(&1), 111); + assert_eq!(Balances::reserved_balance(&1), 0); + }); } #[test] fn slashing_balance_should_work() { - with_externalities(&mut ExtBuilder::default().build(), || { - let _ = Balances::deposit_creating(&1, 111); - assert_ok!(Balances::reserve(&1, 69)); - assert!(Balances::slash(&1, 69).1.is_zero()); - assert_eq!(Balances::free_balance(&1), 0); - assert_eq!(Balances::reserved_balance(&1), 42); - assert_eq!(>::get(), 42); - }); + with_externalities(&mut ExtBuilder::default().build(), || { + let _ = Balances::deposit_creating(&1, 111); + assert_ok!(Balances::reserve(&1, 69)); + assert!(Balances::slash(&1, 69).1.is_zero()); + assert_eq!(Balances::free_balance(&1), 0); + assert_eq!(Balances::reserved_balance(&1), 42); + assert_eq!(>::get(), 42); + }); } #[test] fn slashing_incomplete_balance_should_work() { - with_externalities(&mut ExtBuilder::default().build(), || { - let _ = Balances::deposit_creating(&1, 42); - assert_ok!(Balances::reserve(&1, 21)); - assert_eq!(Balances::slash(&1, 69).1, 27); - assert_eq!(Balances::free_balance(&1), 0); - assert_eq!(Balances::reserved_balance(&1), 0); - assert_eq!(>::get(), 0); - }); + with_externalities(&mut ExtBuilder::default().build(), || { + let _ = Balances::deposit_creating(&1, 42); + assert_ok!(Balances::reserve(&1, 21)); + assert_eq!(Balances::slash(&1, 69).1, 27); + assert_eq!(Balances::free_balance(&1), 0); + assert_eq!(Balances::reserved_balance(&1), 0); + assert_eq!(>::get(), 0); + }); } #[test] fn unreserving_balance_should_work() { - with_externalities(&mut ExtBuilder::default().build(), || { - let _ = Balances::deposit_creating(&1, 111); - assert_ok!(Balances::reserve(&1, 111)); - Balances::unreserve(&1, 42); - assert_eq!(Balances::reserved_balance(&1), 69); - assert_eq!(Balances::free_balance(&1), 42); - }); + with_externalities(&mut ExtBuilder::default().build(), || { + let _ = Balances::deposit_creating(&1, 111); + assert_ok!(Balances::reserve(&1, 111)); + Balances::unreserve(&1, 42); + assert_eq!(Balances::reserved_balance(&1), 69); + assert_eq!(Balances::free_balance(&1), 42); + }); } #[test] fn slashing_reserved_balance_should_work() { - with_externalities(&mut ExtBuilder::default().build(), || { - let _ = Balances::deposit_creating(&1, 111); - assert_ok!(Balances::reserve(&1, 111)); - assert_eq!(Balances::slash_reserved(&1, 42).1, 0); - assert_eq!(Balances::reserved_balance(&1), 69); - assert_eq!(Balances::free_balance(&1), 0); - assert_eq!(>::get(), 69); - }); + with_externalities(&mut ExtBuilder::default().build(), || { + let _ = Balances::deposit_creating(&1, 111); + assert_ok!(Balances::reserve(&1, 111)); + assert_eq!(Balances::slash_reserved(&1, 42).1, 0); + assert_eq!(Balances::reserved_balance(&1), 69); + assert_eq!(Balances::free_balance(&1), 0); + assert_eq!(>::get(), 69); + }); } #[test] fn slashing_incomplete_reserved_balance_should_work() { - with_externalities(&mut ExtBuilder::default().build(), || { - let _ = Balances::deposit_creating(&1, 111); - assert_ok!(Balances::reserve(&1, 42)); - assert_eq!(Balances::slash_reserved(&1, 69).1, 27); - assert_eq!(Balances::free_balance(&1), 69); - assert_eq!(Balances::reserved_balance(&1), 0); - assert_eq!(>::get(), 69); - }); + with_externalities(&mut ExtBuilder::default().build(), || { + let _ = Balances::deposit_creating(&1, 111); + assert_ok!(Balances::reserve(&1, 42)); + assert_eq!(Balances::slash_reserved(&1, 69).1, 27); + assert_eq!(Balances::free_balance(&1), 69); + assert_eq!(Balances::reserved_balance(&1), 0); + assert_eq!(>::get(), 69); + }); } #[test] fn transferring_reserved_balance_should_work() { - with_externalities(&mut ExtBuilder::default().build(), || { - let _ = Balances::deposit_creating(&1, 110); - let _ = Balances::deposit_creating(&2, 1); - assert_ok!(Balances::reserve(&1, 110)); - assert_ok!(Balances::repatriate_reserved(&1, &2, 41), 0); - assert_eq!(Balances::reserved_balance(&1), 69); - assert_eq!(Balances::free_balance(&1), 0); - assert_eq!(Balances::reserved_balance(&2), 0); - assert_eq!(Balances::free_balance(&2), 42); - }); + with_externalities(&mut ExtBuilder::default().build(), || { + let _ = Balances::deposit_creating(&1, 110); + let _ = Balances::deposit_creating(&2, 1); + assert_ok!(Balances::reserve(&1, 110)); + assert_ok!(Balances::repatriate_reserved(&1, &2, 41), 0); + assert_eq!(Balances::reserved_balance(&1), 69); + assert_eq!(Balances::free_balance(&1), 0); + assert_eq!(Balances::reserved_balance(&2), 0); + assert_eq!(Balances::free_balance(&2), 42); + }); } #[test] fn transferring_reserved_balance_to_nonexistent_should_fail() { - with_externalities(&mut ExtBuilder::default().build(), || { - let _ = Balances::deposit_creating(&1, 111); - assert_ok!(Balances::reserve(&1, 111)); - assert_noop!(Balances::repatriate_reserved(&1, &2, 42), "beneficiary account must pre-exist"); - }); + with_externalities(&mut ExtBuilder::default().build(), || { + let _ = Balances::deposit_creating(&1, 111); + assert_ok!(Balances::reserve(&1, 111)); + assert_noop!( + Balances::repatriate_reserved(&1, &2, 42), + "beneficiary account must pre-exist" + ); + }); } #[test] fn transferring_incomplete_reserved_balance_should_work() { - with_externalities(&mut ExtBuilder::default().build(), || { - let _ = Balances::deposit_creating(&1, 110); - let _ = Balances::deposit_creating(&2, 1); - assert_ok!(Balances::reserve(&1, 41)); - assert_ok!(Balances::repatriate_reserved(&1, &2, 69), 28); - assert_eq!(Balances::reserved_balance(&1), 0); - assert_eq!(Balances::free_balance(&1), 69); - assert_eq!(Balances::reserved_balance(&2), 0); - assert_eq!(Balances::free_balance(&2), 42); - }); + with_externalities(&mut ExtBuilder::default().build(), || { + let _ = Balances::deposit_creating(&1, 110); + let _ = Balances::deposit_creating(&2, 1); + assert_ok!(Balances::reserve(&1, 41)); + assert_ok!(Balances::repatriate_reserved(&1, &2, 69), 28); + assert_eq!(Balances::reserved_balance(&1), 0); + assert_eq!(Balances::free_balance(&1), 69); + assert_eq!(Balances::reserved_balance(&2), 0); + assert_eq!(Balances::free_balance(&2), 42); + }); } #[test] fn transferring_too_high_value_should_not_panic() { - with_externalities(&mut ExtBuilder::default().build(), || { - >::insert(1, u64::max_value()); - >::insert(2, 1); + with_externalities(&mut ExtBuilder::default().build(), || { + >::insert(1, u64::max_value()); + >::insert(2, 1); - assert_err!( - Balances::transfer(Some(1).into(), 2, u64::max_value()), - "destination balance too high to receive value" - ); + assert_err!( + Balances::transfer(Some(1).into(), 2, u64::max_value()), + "destination balance too high to receive value" + ); - assert_eq!(Balances::free_balance(&1), u64::max_value()); - assert_eq!(Balances::free_balance(&2), 1); - }); + assert_eq!(Balances::free_balance(&1), u64::max_value()); + assert_eq!(Balances::free_balance(&2), 1); + }); } #[test] fn account_create_on_free_too_low_with_other() { - with_externalities( - &mut ExtBuilder::default().existential_deposit(100).build(), - || { - let _ = Balances::deposit_creating(&1, 100); - assert_eq!(>::get(), 100); + with_externalities( + &mut ExtBuilder::default().existential_deposit(100).build(), + || { + let _ = Balances::deposit_creating(&1, 100); + assert_eq!(>::get(), 100); - // No-op. - let _ = Balances::deposit_creating(&2, 50); - assert_eq!(Balances::free_balance(&2), 0); - assert_eq!(>::get(), 100); - } - ) + // No-op. + let _ = Balances::deposit_creating(&2, 50); + assert_eq!(Balances::free_balance(&2), 0); + assert_eq!(>::get(), 100); + }, + ) } - #[test] fn account_create_on_free_too_low() { - with_externalities( - &mut ExtBuilder::default().existential_deposit(100).build(), - || { - // No-op. - let _ = Balances::deposit_creating(&2, 50); - assert_eq!(Balances::free_balance(&2), 0); - assert_eq!(>::get(), 0); - } - ) + with_externalities( + &mut ExtBuilder::default().existential_deposit(100).build(), + || { + // No-op. + let _ = Balances::deposit_creating(&2, 50); + assert_eq!(Balances::free_balance(&2), 0); + assert_eq!(>::get(), 0); + }, + ) } #[test] fn account_removal_on_free_too_low() { - with_externalities( - &mut ExtBuilder::default().existential_deposit(100).build(), - || { - assert_eq!(>::get(), 0); + with_externalities( + &mut ExtBuilder::default().existential_deposit(100).build(), + || { + assert_eq!(>::get(), 0); - // Setup two accounts with free balance above the existential threshold. - let _ = Balances::deposit_creating(&1, 110); - let _ = Balances::deposit_creating(&2, 110); + // Setup two accounts with free balance above the existential threshold. + let _ = Balances::deposit_creating(&1, 110); + let _ = Balances::deposit_creating(&2, 110); - assert_eq!(Balances::free_balance(&1), 110); - assert_eq!(Balances::free_balance(&2), 110); - assert_eq!(>::get(), 220); + assert_eq!(Balances::free_balance(&1), 110); + assert_eq!(Balances::free_balance(&2), 110); + assert_eq!(>::get(), 220); - // Transfer funds from account 1 of such amount that after this transfer - // the balance of account 1 will be below the existential threshold. - // This should lead to the removal of all balance of this account. - assert_ok!(Balances::transfer(Some(1).into(), 2, 20)); + // Transfer funds from account 1 of such amount that after this transfer + // the balance of account 1 will be below the existential threshold. + // This should lead to the removal of all balance of this account. + assert_ok!(Balances::transfer(Some(1).into(), 2, 20)); - // Verify free balance removal of account 1. - assert_eq!(Balances::free_balance(&1), 0); - assert_eq!(Balances::free_balance(&2), 130); + // Verify free balance removal of account 1. + assert_eq!(Balances::free_balance(&1), 0); + assert_eq!(Balances::free_balance(&2), 130); - // Verify that TotalIssuance tracks balance removal when free balance is too low. - assert_eq!(>::get(), 130); - }, - ); + // Verify that TotalIssuance tracks balance removal when free balance is too low. + assert_eq!(>::get(), 130); + }, + ); } #[test] fn transfer_overflow_isnt_exploitable() { - with_externalities( - &mut ExtBuilder::default().creation_fee(50).build(), - || { - // Craft a value that will overflow if summed with `creation_fee`. - let evil_value = u64::max_value() - 49; + with_externalities(&mut ExtBuilder::default().creation_fee(50).build(), || { + // Craft a value that will overflow if summed with `creation_fee`. + let evil_value = u64::max_value() - 49; - assert_err!( - Balances::transfer(Some(1).into(), 5, evil_value), - "got overflow after adding a fee to value" - ); - } - ); + assert_err!( + Balances::transfer(Some(1).into(), 5, evil_value), + "got overflow after adding a fee to value" + ); + }); } #[test] fn check_vesting_status() { - with_externalities( - &mut ExtBuilder::default() - .existential_deposit(256) - .monied(true) - .vesting(true) - .build(), - || { - assert_eq!(System::block_number(), 1); - let user1_free_balance = Balances::free_balance(&1); - let user2_free_balance = Balances::free_balance(&2); - assert_eq!(user1_free_balance, 256 * 10); // Account 1 has free balance - assert_eq!(user2_free_balance, 256 * 20); // Account 2 has free balance - let user1_vesting_schedule = VestingSchedule { - offset: 256 * 10, - per_block: 256, - }; - let user2_vesting_schedule = VestingSchedule { - offset: 256 * 30, - per_block: 256, - }; - assert_eq!(Balances::vesting(&1), Some(user1_vesting_schedule)); // Account 1 has a vesting schedule - assert_eq!(Balances::vesting(&2), Some(user2_vesting_schedule)); // Account 2 has a vesting schedule - - assert_eq!(Balances::vesting_balance(&1), user1_free_balance - 256); // Account 1 has only 256 units vested at block 1 - - System::set_block_number(10); - assert_eq!(System::block_number(), 10); - - assert_eq!(Balances::vesting_balance(&1), 0); // Account 1 has fully vested by block 10 - assert_eq!(Balances::vesting_balance(&2), user2_free_balance); // Account 2 has started vesting by block 10 - - System::set_block_number(30); - assert_eq!(System::block_number(), 30); - - assert_eq!(Balances::vesting_balance(&1), 0); // Account 1 is still fully vested, and not negative - assert_eq!(Balances::vesting_balance(&2), 0); // Account 2 has fully vested by block 30 - - } - ); + with_externalities( + &mut ExtBuilder::default() + .existential_deposit(256) + .monied(true) + .vesting(true) + .build(), + || { + assert_eq!(System::block_number(), 1); + let user1_free_balance = Balances::free_balance(&1); + let user2_free_balance = Balances::free_balance(&2); + assert_eq!(user1_free_balance, 256 * 10); // Account 1 has free balance + assert_eq!(user2_free_balance, 256 * 20); // Account 2 has free balance + let user1_vesting_schedule = VestingSchedule { + offset: 256 * 10, + per_block: 256, + }; + let user2_vesting_schedule = VestingSchedule { + offset: 256 * 30, + per_block: 256, + }; + assert_eq!(Balances::vesting(&1), Some(user1_vesting_schedule)); // Account 1 has a vesting schedule + assert_eq!(Balances::vesting(&2), Some(user2_vesting_schedule)); // Account 2 has a vesting schedule + + assert_eq!(Balances::vesting_balance(&1), user1_free_balance - 256); // Account 1 has only 256 units vested at block 1 + + System::set_block_number(10); + assert_eq!(System::block_number(), 10); + + assert_eq!(Balances::vesting_balance(&1), 0); // Account 1 has fully vested by block 10 + assert_eq!(Balances::vesting_balance(&2), user2_free_balance); // Account 2 has started vesting by block 10 + + System::set_block_number(30); + assert_eq!(System::block_number(), 30); + + assert_eq!(Balances::vesting_balance(&1), 0); // Account 1 is still fully vested, and not negative + assert_eq!(Balances::vesting_balance(&2), 0); // Account 2 has fully vested by block 30 + }, + ); } #[test] fn unvested_balance_should_not_transfer() { - with_externalities( - &mut ExtBuilder::default() - .existential_deposit(10) - .monied(true) - .vesting(true) - .build(), - || { - assert_eq!(System::block_number(), 1); - let user1_free_balance = Balances::free_balance(&1); - assert_eq!(user1_free_balance, 100); // Account 1 has free balance - assert_eq!(Balances::vesting_balance(&1), 90); // Account 1 has only 10 units vested at block 1 - assert_noop!( - Balances::transfer(Some(1).into(), 2, 11), - "vesting balance too high to send value" - ); // Account 1 cannot send more than vested amount - } - ); + with_externalities( + &mut ExtBuilder::default() + .existential_deposit(10) + .monied(true) + .vesting(true) + .build(), + || { + assert_eq!(System::block_number(), 1); + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 100); // Account 1 has free balance + assert_eq!(Balances::vesting_balance(&1), 90); // Account 1 has only 10 units vested at block 1 + assert_noop!( + Balances::transfer(Some(1).into(), 2, 11), + "vesting balance too high to send value" + ); // Account 1 cannot send more than vested amount + }, + ); } #[test] fn vested_balance_should_transfer() { - with_externalities( - &mut ExtBuilder::default() - .existential_deposit(10) - .monied(true) - .vesting(true) - .build(), - || { - assert_eq!(System::block_number(), 1); - let user1_free_balance = Balances::free_balance(&1); - assert_eq!(user1_free_balance, 100); // Account 1 has free balance - assert_eq!(Balances::vesting_balance(&1), 90); // Account 1 has only 10 units vested at block 1 - assert_ok!(Balances::transfer(Some(1).into(), 2, 10)); - } - ); + with_externalities( + &mut ExtBuilder::default() + .existential_deposit(10) + .monied(true) + .vesting(true) + .build(), + || { + assert_eq!(System::block_number(), 1); + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 100); // Account 1 has free balance + assert_eq!(Balances::vesting_balance(&1), 90); // Account 1 has only 10 units vested at block 1 + assert_ok!(Balances::transfer(Some(1).into(), 2, 10)); + }, + ); } #[test] fn extra_balance_should_transfer() { - with_externalities( - &mut ExtBuilder::default() - .existential_deposit(10) - .monied(true) - .vesting(true) - .build(), - || { - assert_eq!(System::block_number(), 1); - assert_ok!(Balances::transfer(Some(3).into(), 1, 100)); - let user1_free_balance = Balances::free_balance(&1); - assert_eq!(user1_free_balance, 200); // Account 1 has 100 more free balance than normal - - assert_eq!(Balances::vesting_balance(&1), 90); // Account 1 has 90 units vested at block 1 - assert_ok!(Balances::transfer(Some(1).into(), 2, 105)); // Account 1 can send extra units gained - } - ); + with_externalities( + &mut ExtBuilder::default() + .existential_deposit(10) + .monied(true) + .vesting(true) + .build(), + || { + assert_eq!(System::block_number(), 1); + assert_ok!(Balances::transfer(Some(3).into(), 1, 100)); + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 200); // Account 1 has 100 more free balance than normal + + assert_eq!(Balances::vesting_balance(&1), 90); // Account 1 has 90 units vested at block 1 + assert_ok!(Balances::transfer(Some(1).into(), 2, 105)); // Account 1 can send extra units gained + }, + ); } diff --git a/srml/consensus/src/lib.rs b/srml/consensus/src/lib.rs index 4c2a57b83c..b28f6ba02c 100644 --- a/srml/consensus/src/lib.rs +++ b/srml/consensus/src/lib.rs @@ -116,20 +116,18 @@ #![cfg_attr(not(feature = "std"), no_std)] +use codec::{Decode, Encode}; +use inherents::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent, RuntimeString}; +use parity_codec as codec; +use primitives::traits::{MaybeSerializeDebug, Member}; +use rstd::prelude::*; #[cfg(feature = "std")] use serde_derive::Serialize; -use rstd::prelude::*; -use parity_codec as codec; -use codec::{Encode, Decode}; -use srml_support::{storage, Parameter, decl_storage, decl_module}; -use srml_support::storage::StorageValue; use srml_support::storage::unhashed::StorageVec; -use primitives::traits::{MaybeSerializeDebug, Member}; +use srml_support::storage::StorageValue; +use srml_support::{decl_module, decl_storage, storage, Parameter}; use substrate_primitives::storage::well_known_keys; -use system::{ensure_signed, ensure_inherent}; -use inherents::{ - ProvideInherent, InherentData, InherentIdentifier, RuntimeString, MakeFatalError -}; +use system::{ensure_inherent, ensure_signed}; #[cfg(any(feature = "std", test))] use substrate_primitives::ed25519::Public as AuthorityId; @@ -145,8 +143,8 @@ pub type InherentError = RuntimeString; struct AuthorityStorageVec(rstd::marker::PhantomData); impl StorageVec for AuthorityStorageVec { - type Item = S; - const PREFIX: &'static [u8] = well_known_keys::AUTHORITY_PREFIX; + type Item = S; + const PREFIX: &'static [u8] = well_known_keys::AUTHORITY_PREFIX; } pub type Key = Vec; @@ -154,36 +152,41 @@ pub type KeyValue = (Vec, Vec); /// Handling offline validator reports in a generic way. pub trait OnOfflineReport { - fn handle_report(offline: Offline); + fn handle_report(offline: Offline); } impl OnOfflineReport for () { - fn handle_report(_: T) {} + fn handle_report(_: T) {} } /// Describes the offline-reporting extrinsic. pub trait InherentOfflineReport { - /// The report data type passed to the runtime during block authorship. - type Inherent: codec::Codec + Parameter; + /// The report data type passed to the runtime during block authorship. + type Inherent: codec::Codec + Parameter; - /// Whether an inherent is empty and doesn't need to be included. - fn is_empty(inherent: &Self::Inherent) -> bool; + /// Whether an inherent is empty and doesn't need to be included. + fn is_empty(inherent: &Self::Inherent) -> bool; - /// Handle the report. - fn handle_report(report: Self::Inherent); + /// Handle the report. + fn handle_report(report: Self::Inherent); - /// Whether two reports are compatible. - fn check_inherent(contained: &Self::Inherent, expected: &Self::Inherent) -> Result<(), &'static str>; + /// Whether two reports are compatible. + fn check_inherent( + contained: &Self::Inherent, + expected: &Self::Inherent, + ) -> Result<(), &'static str>; } impl InherentOfflineReport for () { - type Inherent = (); - - fn is_empty(_inherent: &()) -> bool { true } - fn handle_report(_: ()) { } - fn check_inherent(_: &(), _: &()) -> Result<(), &'static str> { - Err("Explicit reporting not allowed") - } + type Inherent = (); + + fn is_empty(_inherent: &()) -> bool { + true + } + fn handle_report(_: ()) {} + fn check_inherent(_: &(), _: &()) -> Result<(), &'static str> { + Err("Explicit reporting not allowed") + } } /// A variant of the `OfflineReport` that is useful for instant-finality blocks. @@ -192,235 +195,246 @@ impl InherentOfflineReport for () { pub struct InstantFinalityReportVec(::rstd::marker::PhantomData); impl>> InherentOfflineReport for InstantFinalityReportVec { - type Inherent = Vec; - - fn is_empty(inherent: &Self::Inherent) -> bool { inherent.is_empty() } - - fn handle_report(report: Vec) { - T::handle_report(report) - } - - fn check_inherent(contained: &Self::Inherent, expected: &Self::Inherent) -> Result<(), &'static str> { - contained.iter().try_for_each(|n| - if !expected.contains(n) { - Err("Node we believe online marked offline") - } else { - Ok(()) - } - ) - } + type Inherent = Vec; + + fn is_empty(inherent: &Self::Inherent) -> bool { + inherent.is_empty() + } + + fn handle_report(report: Vec) { + T::handle_report(report) + } + + fn check_inherent( + contained: &Self::Inherent, + expected: &Self::Inherent, + ) -> Result<(), &'static str> { + contained.iter().try_for_each(|n| { + if !expected.contains(n) { + Err("Node we believe online marked offline") + } else { + Ok(()) + } + }) + } } -pub type Log = RawLog< - ::SessionKey, ->; +pub type Log = RawLog<::SessionKey>; /// Logs in this module. #[cfg_attr(feature = "std", derive(Serialize, Debug))] #[derive(Encode, Decode, PartialEq, Eq, Clone)] pub enum RawLog { - /// Authorities set has been changed. Contains the new set of authorities. - AuthoritiesChange(Vec), + /// Authorities set has been changed. Contains the new set of authorities. + AuthoritiesChange(Vec), } impl RawLog { - /// Try to cast the log entry as AuthoritiesChange log entry. - pub fn as_authorities_change(&self) -> Option<&[SessionKey]> { - match *self { - RawLog::AuthoritiesChange(ref item) => Some(item), - } - } + /// Try to cast the log entry as AuthoritiesChange log entry. + pub fn as_authorities_change(&self) -> Option<&[SessionKey]> { + match *self { + RawLog::AuthoritiesChange(ref item) => Some(item), + } + } } // Implementation for tests outside of this crate. #[cfg(any(feature = "std", test))] -impl From> for primitives::testing::DigestItem where N: Into { - fn from(log: RawLog) -> primitives::testing::DigestItem { - match log { - RawLog::AuthoritiesChange(authorities) => - primitives::generic::DigestItem::AuthoritiesChange( - authorities.into_iter() - .map(Into::into).collect()), - } - } +impl From> for primitives::testing::DigestItem +where + N: Into, +{ + fn from(log: RawLog) -> primitives::testing::DigestItem { + match log { + RawLog::AuthoritiesChange(authorities) => { + primitives::generic::DigestItem::AuthoritiesChange( + authorities.into_iter().map(Into::into).collect(), + ) + } + } + } } pub trait Trait: system::Trait { - /// Type for all log entries of this module. - type Log: From> + Into>; + /// Type for all log entries of this module. + type Log: From> + Into>; - type SessionKey: Parameter + Default + MaybeSerializeDebug; - /// Defines the offline-report type of the trait. - /// Set to `()` if offline-reports aren't needed for this runtime. - type InherentOfflineReport: InherentOfflineReport; + type SessionKey: Parameter + Default + MaybeSerializeDebug; + /// Defines the offline-report type of the trait. + /// Set to `()` if offline-reports aren't needed for this runtime. + type InherentOfflineReport: InherentOfflineReport; } decl_storage! { - trait Store for Module as Consensus { - // Actual authorities set at the block execution start. Is `Some` iff - // the set has been changed. - OriginalAuthorities: Option>; - } - add_extra_genesis { - config(authorities): Vec; - #[serde(with = "substrate_primitives::bytes")] - config(code): Vec; - - build(|storage: &mut primitives::StorageOverlay, _: &mut primitives::ChildrenStorageOverlay, config: &GenesisConfig| { - use codec::{Encode, KeyedVec}; - - let auth_count = config.authorities.len() as u32; - config.authorities.iter().enumerate().for_each(|(i, v)| { - storage.insert((i as u32).to_keyed_vec(well_known_keys::AUTHORITY_PREFIX), v.encode()); - }); - storage.insert(well_known_keys::AUTHORITY_COUNT.to_vec(), auth_count.encode()); - storage.insert(well_known_keys::CODE.to_vec(), config.code.clone()); - }); - } + trait Store for Module as Consensus { + // Actual authorities set at the block execution start. Is `Some` iff + // the set has been changed. + OriginalAuthorities: Option>; + } + add_extra_genesis { + config(authorities): Vec; + #[serde(with = "substrate_primitives::bytes")] + config(code): Vec; + + build(|storage: &mut primitives::StorageOverlay, _: &mut primitives::ChildrenStorageOverlay, config: &GenesisConfig| { + use codec::{Encode, KeyedVec}; + + let auth_count = config.authorities.len() as u32; + config.authorities.iter().enumerate().for_each(|(i, v)| { + storage.insert((i as u32).to_keyed_vec(well_known_keys::AUTHORITY_PREFIX), v.encode()); + }); + storage.insert(well_known_keys::AUTHORITY_COUNT.to_vec(), auth_count.encode()); + storage.insert(well_known_keys::CODE.to_vec(), config.code.clone()); + }); + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - /// Report some misbehavior. - fn report_misbehavior(origin, _report: Vec) { - ensure_signed(origin)?; - } - - /// Note that the previous block's validator missed its opportunity to propose a block. - fn note_offline(origin, offline: ::Inherent) { - ensure_inherent(origin)?; - - T::InherentOfflineReport::handle_report(offline); - } - - /// Make some on-chain remark. - fn remark(origin, _remark: Vec) { - ensure_signed(origin)?; - } - - /// Set the number of pages in the WebAssembly environment's heap. - fn set_heap_pages(pages: u64) { - storage::unhashed::put_raw(well_known_keys::HEAP_PAGES, &pages.encode()); - } - - /// Set the new code. - pub fn set_code(new: Vec) { - storage::unhashed::put_raw(well_known_keys::CODE, &new); - } - - /// Set some items of storage. - fn set_storage(items: Vec) { - for i in &items { - storage::unhashed::put_raw(&i.0, &i.1); - } - } - - /// Kill some items from storage. - fn kill_storage(keys: Vec) { - for key in &keys { - storage::unhashed::kill(&key); - } - } - - fn on_finalize() { - if let Some(original_authorities) = >::take() { - let current_authorities = AuthorityStorageVec::::items(); - if current_authorities != original_authorities { - Self::deposit_log(RawLog::AuthoritiesChange(current_authorities)); - } - } - } - } + pub struct Module for enum Call where origin: T::Origin { + /// Report some misbehavior. + fn report_misbehavior(origin, _report: Vec) { + ensure_signed(origin)?; + } + + /// Note that the previous block's validator missed its opportunity to propose a block. + fn note_offline(origin, offline: ::Inherent) { + ensure_inherent(origin)?; + + T::InherentOfflineReport::handle_report(offline); + } + + /// Make some on-chain remark. + fn remark(origin, _remark: Vec) { + ensure_signed(origin)?; + } + + /// Set the number of pages in the WebAssembly environment's heap. + fn set_heap_pages(pages: u64) { + storage::unhashed::put_raw(well_known_keys::HEAP_PAGES, &pages.encode()); + } + + /// Set the new code. + pub fn set_code(new: Vec) { + storage::unhashed::put_raw(well_known_keys::CODE, &new); + } + + /// Set some items of storage. + fn set_storage(items: Vec) { + for i in &items { + storage::unhashed::put_raw(&i.0, &i.1); + } + } + + /// Kill some items from storage. + fn kill_storage(keys: Vec) { + for key in &keys { + storage::unhashed::kill(&key); + } + } + + fn on_finalize() { + if let Some(original_authorities) = >::take() { + let current_authorities = AuthorityStorageVec::::items(); + if current_authorities != original_authorities { + Self::deposit_log(RawLog::AuthoritiesChange(current_authorities)); + } + } + } + } } impl Module { - /// Get the current set of authorities. These are the session keys. - pub fn authorities() -> Vec { - AuthorityStorageVec::::items() - } - - /// Set the current set of authorities' session keys. - /// - /// Called by `rotate_session` only. - pub fn set_authorities(authorities: &[T::SessionKey]) { - let current_authorities = AuthorityStorageVec::::items(); - if current_authorities != authorities { - Self::save_original_authorities(Some(current_authorities)); - AuthorityStorageVec::::set_items(authorities); - } - } - - /// Set a single authority by index. - pub fn set_authority_count(count: u32) { - Self::save_original_authorities(None); - AuthorityStorageVec::::set_count(count); - } - - /// Set a single authority by index. - pub fn set_authority(index: u32, key: &T::SessionKey) { - let current_authority = AuthorityStorageVec::::item(index); - if current_authority != *key { - Self::save_original_authorities(None); - AuthorityStorageVec::::set_item(index, key); - } - } - - /// Save original authorities set. - fn save_original_authorities(current_authorities: Option>) { - if OriginalAuthorities::::get().is_some() { - // if we have already saved original set before, do not overwrite - return; - } - - >::put(current_authorities.unwrap_or_else(|| - AuthorityStorageVec::::items())); - } - - /// Deposit one of this module's logs. - fn deposit_log(log: Log) { - >::deposit_log(::Log::from(log).into()); - } + /// Get the current set of authorities. These are the session keys. + pub fn authorities() -> Vec { + AuthorityStorageVec::::items() + } + + /// Set the current set of authorities' session keys. + /// + /// Called by `rotate_session` only. + pub fn set_authorities(authorities: &[T::SessionKey]) { + let current_authorities = AuthorityStorageVec::::items(); + if current_authorities != authorities { + Self::save_original_authorities(Some(current_authorities)); + AuthorityStorageVec::::set_items(authorities); + } + } + + /// Set a single authority by index. + pub fn set_authority_count(count: u32) { + Self::save_original_authorities(None); + AuthorityStorageVec::::set_count(count); + } + + /// Set a single authority by index. + pub fn set_authority(index: u32, key: &T::SessionKey) { + let current_authority = AuthorityStorageVec::::item(index); + if current_authority != *key { + Self::save_original_authorities(None); + AuthorityStorageVec::::set_item(index, key); + } + } + + /// Save original authorities set. + fn save_original_authorities(current_authorities: Option>) { + if OriginalAuthorities::::get().is_some() { + // if we have already saved original set before, do not overwrite + return; + } + + >::put( + current_authorities.unwrap_or_else(|| AuthorityStorageVec::::items()), + ); + } + + /// Deposit one of this module's logs. + fn deposit_log(log: Log) { + >::deposit_log(::Log::from(log).into()); + } } /// Implementing `ProvideInherent` enables this module to create and check inherents. impl ProvideInherent for Module { - /// The call type of the module. - type Call = Call; - /// The error returned by `check_inherent`. - type Error = MakeFatalError; - /// The inherent identifier used by this inherent. - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - - /// Creates an inherent from the `InherentData`. - fn create_inherent(data: &InherentData) -> Option { - if let Ok(Some(data)) = - data.get_data::<::Inherent>( - &INHERENT_IDENTIFIER - ) - { - if ::is_empty(&data) { - None - } else { - Some(Call::note_offline(data)) - } - } else { - None - } - } - - /// Verify the validity of the given inherent. - fn check_inherent(call: &Self::Call, data: &InherentData) -> Result<(), Self::Error> { - let offline = match call { - Call::note_offline(ref offline) => offline, - _ => return Ok(()), - }; - - let expected = data - .get_data::<::Inherent>(&INHERENT_IDENTIFIER)? - .ok_or(RuntimeString::from("No `offline_report` found in the inherent data!"))?; - - ::check_inherent( - &offline, &expected - ).map_err(|e| RuntimeString::from(e).into()) - } + /// The call type of the module. + type Call = Call; + /// The error returned by `check_inherent`. + type Error = MakeFatalError; + /// The inherent identifier used by this inherent. + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + /// Creates an inherent from the `InherentData`. + fn create_inherent(data: &InherentData) -> Option { + if let Ok(Some(data)) = data + .get_data::<::Inherent>( + &INHERENT_IDENTIFIER, + ) + { + if ::is_empty(&data) { + None + } else { + Some(Call::note_offline(data)) + } + } else { + None + } + } + + /// Verify the validity of the given inherent. + fn check_inherent(call: &Self::Call, data: &InherentData) -> Result<(), Self::Error> { + let offline = match call { + Call::note_offline(ref offline) => offline, + _ => return Ok(()), + }; + + let expected = data + .get_data::<::Inherent>( + &INHERENT_IDENTIFIER, + )? + .ok_or(RuntimeString::from( + "No `offline_report` found in the inherent data!", + ))?; + + ::check_inherent(&offline, &expected) + .map_err(|e| RuntimeString::from(e).into()) + } } diff --git a/srml/consensus/src/mock.rs b/srml/consensus/src/mock.rs index 85e6dc3654..4c314d8a18 100644 --- a/srml/consensus/src/mock.rs +++ b/srml/consensus/src/mock.rs @@ -18,45 +18,60 @@ #![cfg(test)] -use primitives::{BuildStorage, traits::IdentityLookup, testing::{Digest, DigestItem, Header, UintAuthorityId}}; -use srml_support::impl_outer_origin; +use crate::{GenesisConfig, Module, Trait}; +use primitives::{ + testing::{Digest, DigestItem, Header, UintAuthorityId}, + traits::IdentityLookup, + BuildStorage, +}; use runtime_io; -use substrate_primitives::{H256, Blake2Hasher}; -use crate::{GenesisConfig, Trait, Module}; +use srml_support::impl_outer_origin; +use substrate_primitives::{Blake2Hasher, H256}; -impl_outer_origin!{ - pub enum Origin for Test {} +impl_outer_origin! { + pub enum Origin for Test {} } // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. #[derive(Clone, PartialEq, Eq, Debug)] pub struct Test; impl Trait for Test { - type Log = DigestItem; - type SessionKey = UintAuthorityId; - type InherentOfflineReport = crate::InstantFinalityReportVec<()>; + type Log = DigestItem; + type SessionKey = UintAuthorityId; + type InherentOfflineReport = crate::InstantFinalityReportVec<()>; } impl system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = ::primitives::traits::BlakeTwo256; - type Digest = Digest; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type Log = DigestItem; + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = ::primitives::traits::BlakeTwo256; + type Digest = Digest; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type Log = DigestItem; } pub fn new_test_ext(authorities: Vec) -> runtime_io::TestExternalities { - let mut t = system::GenesisConfig::::default().build_storage().unwrap().0; - t.extend(GenesisConfig::{ - code: vec![], - authorities: authorities.into_iter().map(|a| UintAuthorityId(a)).collect(), - }.build_storage().unwrap().0); - t.into() + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0; + t.extend( + GenesisConfig:: { + code: vec![], + authorities: authorities + .into_iter() + .map(|a| UintAuthorityId(a)) + .collect(), + } + .build_storage() + .unwrap() + .0, + ); + t.into() } pub type System = system::Module; diff --git a/srml/consensus/src/tests.rs b/srml/consensus/src/tests.rs index bf8b3a09f3..1dd57a6f75 100644 --- a/srml/consensus/src/tests.rs +++ b/srml/consensus/src/tests.rs @@ -18,114 +18,107 @@ #![cfg(test)] -use primitives::{generic, testing::{self, UintAuthorityId}, traits::OnFinalize}; -use runtime_io::with_externalities; -use crate::mock::{Consensus, System, new_test_ext}; +use crate::mock::{new_test_ext, Consensus, System}; use inherents::{InherentData, ProvideInherent}; +use primitives::{ + generic, + testing::{self, UintAuthorityId}, + traits::OnFinalize, +}; +use runtime_io::with_externalities; #[test] fn authorities_change_logged() { - with_externalities(&mut new_test_ext(vec![1, 2, 3]), || { - System::initialize(&1, &Default::default(), &Default::default()); - Consensus::set_authorities(&[UintAuthorityId(4), UintAuthorityId(5), UintAuthorityId(6)]); - Consensus::on_finalize(1); - let header = System::finalize(); - assert_eq!(header.digest, testing::Digest { - logs: vec![ - generic::DigestItem::AuthoritiesChange( - vec![ - UintAuthorityId(4).into(), - UintAuthorityId(5).into(), - UintAuthorityId(6).into() - ] - ), - ], - }); - }); + with_externalities(&mut new_test_ext(vec![1, 2, 3]), || { + System::initialize(&1, &Default::default(), &Default::default()); + Consensus::set_authorities(&[UintAuthorityId(4), UintAuthorityId(5), UintAuthorityId(6)]); + Consensus::on_finalize(1); + let header = System::finalize(); + assert_eq!( + header.digest, + testing::Digest { + logs: vec![generic::DigestItem::AuthoritiesChange(vec![ + UintAuthorityId(4).into(), + UintAuthorityId(5).into(), + UintAuthorityId(6).into() + ]),], + } + ); + }); } #[test] fn partial_authorities_change_logged() { - with_externalities(&mut new_test_ext(vec![1, 2, 3]), || { - System::initialize(&2, &Default::default(), &Default::default()); - Consensus::set_authorities(&[UintAuthorityId(2), UintAuthorityId(4), UintAuthorityId(5)]); - Consensus::on_finalize(2); - let header = System::finalize(); - assert_eq!(header.digest, testing::Digest { - logs: vec![ - generic::DigestItem::AuthoritiesChange( - vec![ - UintAuthorityId(2).into(), - UintAuthorityId(4).into(), - UintAuthorityId(5).into() - ] - ), - ], - }); - }); + with_externalities(&mut new_test_ext(vec![1, 2, 3]), || { + System::initialize(&2, &Default::default(), &Default::default()); + Consensus::set_authorities(&[UintAuthorityId(2), UintAuthorityId(4), UintAuthorityId(5)]); + Consensus::on_finalize(2); + let header = System::finalize(); + assert_eq!( + header.digest, + testing::Digest { + logs: vec![generic::DigestItem::AuthoritiesChange(vec![ + UintAuthorityId(2).into(), + UintAuthorityId(4).into(), + UintAuthorityId(5).into() + ]),], + } + ); + }); } #[test] fn authorities_change_is_not_logged_when_not_changed() { - with_externalities(&mut new_test_ext(vec![1, 2, 3]), || { - System::initialize(&1, &Default::default(), &Default::default()); - Consensus::on_finalize(1); - let header = System::finalize(); - assert_eq!(header.digest, testing::Digest { - logs: vec![], - }); - }); + with_externalities(&mut new_test_ext(vec![1, 2, 3]), || { + System::initialize(&1, &Default::default(), &Default::default()); + Consensus::on_finalize(1); + let header = System::finalize(); + assert_eq!(header.digest, testing::Digest { logs: vec![] }); + }); } #[test] fn authorities_change_is_not_logged_when_changed_back_to_original() { - with_externalities(&mut new_test_ext(vec![1, 2, 3]), || { - System::initialize(&1, &Default::default(), &Default::default()); - Consensus::set_authorities(&[UintAuthorityId(4), UintAuthorityId(5), UintAuthorityId(6)]); - Consensus::set_authorities(&[UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - Consensus::on_finalize(1); - let header = System::finalize(); - assert_eq!(header.digest, testing::Digest { - logs: vec![], - }); - }); + with_externalities(&mut new_test_ext(vec![1, 2, 3]), || { + System::initialize(&1, &Default::default(), &Default::default()); + Consensus::set_authorities(&[UintAuthorityId(4), UintAuthorityId(5), UintAuthorityId(6)]); + Consensus::set_authorities(&[UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); + Consensus::on_finalize(1); + let header = System::finalize(); + assert_eq!(header.digest, testing::Digest { logs: vec![] }); + }); } #[test] fn offline_report_can_be_excluded() { - with_externalities(&mut new_test_ext(vec![1, 2, 3]), || { - System::initialize(&1, &Default::default(), &Default::default()); - assert!(Consensus::create_inherent(&InherentData::new()).is_none()); + with_externalities(&mut new_test_ext(vec![1, 2, 3]), || { + System::initialize(&1, &Default::default(), &Default::default()); + assert!(Consensus::create_inherent(&InherentData::new()).is_none()); - let offline_report: Vec = vec![0]; - let mut data = InherentData::new(); - data.put_data(super::INHERENT_IDENTIFIER, &offline_report).unwrap(); + let offline_report: Vec = vec![0]; + let mut data = InherentData::new(); + data.put_data(super::INHERENT_IDENTIFIER, &offline_report) + .unwrap(); - assert!(Consensus::create_inherent(&data).is_some()); - }); + assert!(Consensus::create_inherent(&data).is_some()); + }); } #[test] fn set_and_kill_storage_work() { - use srml_support::storage; + use srml_support::storage; - with_externalities(&mut new_test_ext(vec![1, 2, 3]), || { - System::initialize(&1, &Default::default(), &Default::default()); + with_externalities(&mut new_test_ext(vec![1, 2, 3]), || { + System::initialize(&1, &Default::default(), &Default::default()); - let item = (vec![42u8], vec![42u8]); + let item = (vec![42u8], vec![42u8]); - Consensus::set_storage(vec![item.clone()]).unwrap(); + Consensus::set_storage(vec![item.clone()]).unwrap(); - assert_eq!( - storage::unhashed::get_raw(&item.0), - Some(item.1), - ); + assert_eq!(storage::unhashed::get_raw(&item.0), Some(item.1),); - Consensus::kill_storage(vec![item.0.clone()]).unwrap(); + Consensus::kill_storage(vec![item.0.clone()]).unwrap(); - assert_eq!( - storage::unhashed::get_raw(&item.0), - None, - ); - }); + assert_eq!(storage::unhashed::get_raw(&item.0), None,); + }); } diff --git a/srml/contract/src/account_db.rs b/srml/contract/src/account_db.rs index 6b5142b6c8..eff4bef6af 100644 --- a/srml/contract/src/account_db.rs +++ b/srml/contract/src/account_db.rs @@ -16,273 +16,298 @@ //! Auxilliaries to help with managing partial changes to accounts state. -use super::{CodeHash, CodeHashOf, Trait, AccountInfo, TrieId, AccountInfoOf, BalanceOf}; -use system; +use super::{AccountInfo, AccountInfoOf, BalanceOf, CodeHash, CodeHashOf, Trait, TrieId}; use rstd::cell::RefCell; -use rstd::rc::Rc; use rstd::collections::btree_map::{BTreeMap, Entry}; use rstd::prelude::*; +use rstd::rc::Rc; use runtime_primitives::traits::Zero; -use srml_support::{StorageMap, traits::{UpdateBalanceOutcome, - SignedImbalance, Currency, Imbalance}, storage::child}; +use srml_support::{ + storage::child, + traits::{Currency, Imbalance, SignedImbalance, UpdateBalanceOutcome}, + StorageMap, +}; +use system; pub struct ChangeEntry { - balance: Option>, - /// In the case the outer option is None, the code_hash remains untouched, while providing `Some(None)` signifies a removing of the code in question - code: Option>>, - storage: BTreeMap, Option>>, + balance: Option>, + /// In the case the outer option is None, the code_hash remains untouched, while providing `Some(None)` signifies a removing of the code in question + code: Option>>, + storage: BTreeMap, Option>>, } // Cannot derive(Default) since it erroneously bounds T by Default. impl Default for ChangeEntry { - fn default() -> Self { - ChangeEntry { - balance: Default::default(), - code: Default::default(), - storage: Default::default(), - } - } + fn default() -> Self { + ChangeEntry { + balance: Default::default(), + code: Default::default(), + storage: Default::default(), + } + } } pub type ChangeSet = BTreeMap<::AccountId, ChangeEntry>; #[derive(Clone, Default)] pub struct AccountTrieIdMapping { - to_account: BTreeMap, - to_key: BTreeMap, - // this lock is related to the way overlaydb stack - // if set it must be unset at the lower level - lock: bool, + to_account: BTreeMap, + to_key: BTreeMap, + // this lock is related to the way overlaydb stack + // if set it must be unset at the lower level + lock: bool, } impl AccountTrieIdMapping { + pub fn new() -> Self { + AccountTrieIdMapping { + to_account: BTreeMap::new(), + to_key: BTreeMap::new(), + lock: false, + } + } - pub fn new() -> Self { - AccountTrieIdMapping { - to_account: BTreeMap::new(), - to_key: BTreeMap::new(), - lock: false, - } - } - - pub fn lock(&mut self) { - self.lock = true; - } - pub fn unlock(&mut self) { - self.lock = false; - } - pub fn insert(&mut self, account: A, ks: TrieId) { - self.to_account.insert(ks.clone(), account.clone()); - self.to_key.insert(account, ks); - } - pub fn get_trieid(&self, account: &A) -> Option<&TrieId> { - if self.lock { return None } - self.to_key.get(account) - } - pub fn get_account(&self, ks: &TrieId) -> Option<&A> { - if self.lock { return None } - self.to_account.get(ks) - } - + pub fn lock(&mut self) { + self.lock = true; + } + pub fn unlock(&mut self) { + self.lock = false; + } + pub fn insert(&mut self, account: A, ks: TrieId) { + self.to_account.insert(ks.clone(), account.clone()); + self.to_key.insert(account, ks); + } + pub fn get_trieid(&self, account: &A) -> Option<&TrieId> { + if self.lock { + return None; + } + self.to_key.get(account) + } + pub fn get_account(&self, ks: &TrieId) -> Option<&A> { + if self.lock { + return None; + } + self.to_account.get(ks) + } } pub trait AccountDb { - fn get_account_info(&self, account: &T::AccountId) -> Option; - fn get_or_create_trieid(&self, account: &T::AccountId) -> TrieId; - fn get_storage(&self, trie_id: &TrieId, location: &[u8]) -> Option>; - fn get_code(&self, account: &T::AccountId) -> Option>; - fn get_balance(&self, account: &T::AccountId) -> BalanceOf; + fn get_account_info(&self, account: &T::AccountId) -> Option; + fn get_or_create_trieid(&self, account: &T::AccountId) -> TrieId; + fn get_storage(&self, trie_id: &TrieId, location: &[u8]) -> Option>; + fn get_code(&self, account: &T::AccountId) -> Option>; + fn get_balance(&self, account: &T::AccountId) -> BalanceOf; - fn commit(&mut self, change_set: ChangeSet); + fn commit(&mut self, change_set: ChangeSet); } pub struct DirectAccountDb; impl AccountDb for DirectAccountDb { - fn get_account_info(&self, account: &T::AccountId) -> Option { - let res: Option = AccountInfoOf::::get(account); - res - } - fn get_or_create_trieid(&self, account: &T::AccountId) -> TrieId { - use super::TrieIdGenerator; - >::get_account_info(self, account) - .map(|s|s.trie_id) - .unwrap_or_else(||::TrieIdGenerator::trie_id(account)) - } - fn get_storage(&self, trie_id: &TrieId, location: &[u8]) -> Option> { - child::get_raw(trie_id, location) - } - fn get_code(&self, account: &T::AccountId) -> Option> { - >::get(account) - } - fn get_balance(&self, account: &T::AccountId) -> BalanceOf { - T::Currency::free_balance(account) - } - fn commit(&mut self, s: ChangeSet) { - let mut total_imbalance = SignedImbalance::zero(); - for (address, changed) in s.into_iter() { - let trieid = >::get_or_create_trieid(&self, &address); - if let Some(balance) = changed.balance { - let (imbalance, outcome) = T::Currency::make_free_balance_be(&address, balance); - total_imbalance = total_imbalance.merge(imbalance); - if let UpdateBalanceOutcome::AccountKilled = outcome { - // Account killed. This will ultimately lead to calling `OnFreeBalanceZero` callback - // which will make removal of CodeHashOf and AccountStorage for this account. - // In order to avoid writing over the deleted properties we `continue` here. - continue; - } - } - if let Some(code) = changed.code { - if let Some(code) = code { - >::insert(&address, code); - } else { - >::remove(&address); - } - } - for (k, v) in changed.storage.into_iter() { - if let Some(value) = v { - child::put_raw(&trieid[..], &k, &value[..]); - } else { - child::kill(&trieid[..], &k); - } - } - } + fn get_account_info(&self, account: &T::AccountId) -> Option { + let res: Option = AccountInfoOf::::get(account); + res + } + fn get_or_create_trieid(&self, account: &T::AccountId) -> TrieId { + use super::TrieIdGenerator; + >::get_account_info(self, account) + .map(|s| s.trie_id) + .unwrap_or_else(|| ::TrieIdGenerator::trie_id(account)) + } + fn get_storage(&self, trie_id: &TrieId, location: &[u8]) -> Option> { + child::get_raw(trie_id, location) + } + fn get_code(&self, account: &T::AccountId) -> Option> { + >::get(account) + } + fn get_balance(&self, account: &T::AccountId) -> BalanceOf { + T::Currency::free_balance(account) + } + fn commit(&mut self, s: ChangeSet) { + let mut total_imbalance = SignedImbalance::zero(); + for (address, changed) in s.into_iter() { + let trieid = >::get_or_create_trieid(&self, &address); + if let Some(balance) = changed.balance { + let (imbalance, outcome) = T::Currency::make_free_balance_be(&address, balance); + total_imbalance = total_imbalance.merge(imbalance); + if let UpdateBalanceOutcome::AccountKilled = outcome { + // Account killed. This will ultimately lead to calling `OnFreeBalanceZero` callback + // which will make removal of CodeHashOf and AccountStorage for this account. + // In order to avoid writing over the deleted properties we `continue` here. + continue; + } + } + if let Some(code) = changed.code { + if let Some(code) = code { + >::insert(&address, code); + } else { + >::remove(&address); + } + } + for (k, v) in changed.storage.into_iter() { + if let Some(value) = v { + child::put_raw(&trieid[..], &k, &value[..]); + } else { + child::kill(&trieid[..], &k); + } + } + } - match total_imbalance { - // If we've detected a positive imbalance as a result of our contract-level machinations - // then it's indicative of a buggy contracts system. - // Panicking is far from ideal as it opens up a DoS attack on block validators, however - // it's a less bad option than allowing arbitrary value to be created. - SignedImbalance::Positive(ref p) if !p.peek().is_zero() => - panic!("contract subsystem resulting in positive imbalance!"), - _ => {} - } - } + match total_imbalance { + // If we've detected a positive imbalance as a result of our contract-level machinations + // then it's indicative of a buggy contracts system. + // Panicking is far from ideal as it opens up a DoS attack on block validators, however + // it's a less bad option than allowing arbitrary value to be created. + SignedImbalance::Positive(ref p) if !p.peek().is_zero() => { + panic!("contract subsystem resulting in positive imbalance!") + } + _ => {} + } + } } pub struct OverlayAccountDb<'a, T: Trait + 'a> { - local: RefCell>, - trie_account: Rc::AccountId>>>, - trie_account_cache: bool, - underlying: &'a AccountDb, + local: RefCell>, + trie_account: Rc::AccountId>>>, + trie_account_cache: bool, + underlying: &'a AccountDb, } impl<'a, T: Trait> OverlayAccountDb<'a, T> { - pub fn new( - underlying: &'a AccountDb, - trie_account: Rc::AccountId>>>, - trie_account_cache: bool, - ) -> OverlayAccountDb<'a, T> { - OverlayAccountDb { - local: RefCell::new(ChangeSet::new()), - trie_account, - trie_account_cache, - underlying, - } - } + pub fn new( + underlying: &'a AccountDb, + trie_account: Rc::AccountId>>>, + trie_account_cache: bool, + ) -> OverlayAccountDb<'a, T> { + OverlayAccountDb { + local: RefCell::new(ChangeSet::new()), + trie_account, + trie_account_cache, + underlying, + } + } - pub fn reg_cache_new_rc(&self) -> Rc::AccountId>>> { - self.trie_account.clone() - } + pub fn reg_cache_new_rc( + &self, + ) -> Rc::AccountId>>> { + self.trie_account.clone() + } - pub fn into_change_set(self) -> ChangeSet { - self.local.into_inner() - } + pub fn into_change_set(self) -> ChangeSet { + self.local.into_inner() + } - pub fn set_storage( - &mut self, - account: &T::AccountId, - location: Vec, - value: Option>, - ) { - self.local.borrow_mut() - .entry(account.clone()) - .or_insert(Default::default()) - .storage - .insert(location, value); - } + pub fn set_storage( + &mut self, + account: &T::AccountId, + location: Vec, + value: Option>, + ) { + self.local + .borrow_mut() + .entry(account.clone()) + .or_insert(Default::default()) + .storage + .insert(location, value); + } - pub fn set_code(&mut self, account: &T::AccountId, code: Option>) { - self.local - .borrow_mut() - .entry(account.clone()) - .or_insert(Default::default()) - .code = Some(code); - } - pub fn set_balance(&mut self, account: &T::AccountId, balance: BalanceOf) { - self.local - .borrow_mut() - .entry(account.clone()) - .or_insert(Default::default()) - .balance = Some(balance); - } + pub fn set_code(&mut self, account: &T::AccountId, code: Option>) { + self.local + .borrow_mut() + .entry(account.clone()) + .or_insert(Default::default()) + .code = Some(code); + } + pub fn set_balance(&mut self, account: &T::AccountId, balance: BalanceOf) { + self.local + .borrow_mut() + .entry(account.clone()) + .or_insert(Default::default()) + .balance = Some(balance); + } } impl<'a, T: Trait> AccountDb for OverlayAccountDb<'a, T> { - fn get_account_info(&self, account: &T::AccountId) -> Option { - let v = self.underlying.get_account_info(account); - if self.trie_account_cache { - v.as_ref().map(|v|self.trie_account.as_ref().borrow_mut().insert(account.clone(), v.trie_id.clone())); - } - v - } - fn get_or_create_trieid(&self, account: &T::AccountId) -> TrieId { - if self.trie_account_cache { - let mut ka_mut = self.trie_account.as_ref().borrow_mut(); - if let Some(v) = ka_mut.get_trieid(account) { - v.clone() - } else { - ka_mut.unlock(); - let v = self.underlying.get_or_create_trieid(account); - ka_mut.insert(account.clone(), v.clone()); - v - } - } else { - let res = self.trie_account.as_ref().borrow().get_trieid(account).map(|v|v.clone()); - res.unwrap_or_else(|| { - self.trie_account.as_ref().borrow_mut().lock(); - self.underlying.get_or_create_trieid(account) - }) - } - } - fn get_storage(&self, ks: &TrieId, location: &[u8]) -> Option> { - self.trie_account.as_ref().borrow().get_account(ks).and_then(|account| self.local - .borrow() - .get(&account) - .and_then(|a| a.storage.get(location)) - .cloned() - .unwrap_or_else(|| self.underlying.get_storage(ks, location))) - } - fn get_code(&self, account: &T::AccountId) -> Option> { - self.local - .borrow() - .get(account) - .and_then(|a| a.code.clone()) - .unwrap_or_else(|| self.underlying.get_code(account)) - } - fn get_balance(&self, account: &T::AccountId) -> BalanceOf { - self.local - .borrow() - .get(account) - .and_then(|a| a.balance) - .unwrap_or_else(|| self.underlying.get_balance(account)) - } - fn commit(&mut self, s: ChangeSet) { - let mut local = self.local.borrow_mut(); + fn get_account_info(&self, account: &T::AccountId) -> Option { + let v = self.underlying.get_account_info(account); + if self.trie_account_cache { + v.as_ref().map(|v| { + self.trie_account + .as_ref() + .borrow_mut() + .insert(account.clone(), v.trie_id.clone()) + }); + } + v + } + fn get_or_create_trieid(&self, account: &T::AccountId) -> TrieId { + if self.trie_account_cache { + let mut ka_mut = self.trie_account.as_ref().borrow_mut(); + if let Some(v) = ka_mut.get_trieid(account) { + v.clone() + } else { + ka_mut.unlock(); + let v = self.underlying.get_or_create_trieid(account); + ka_mut.insert(account.clone(), v.clone()); + v + } + } else { + let res = self + .trie_account + .as_ref() + .borrow() + .get_trieid(account) + .map(|v| v.clone()); + res.unwrap_or_else(|| { + self.trie_account.as_ref().borrow_mut().lock(); + self.underlying.get_or_create_trieid(account) + }) + } + } + fn get_storage(&self, ks: &TrieId, location: &[u8]) -> Option> { + self.trie_account + .as_ref() + .borrow() + .get_account(ks) + .and_then(|account| { + self.local + .borrow() + .get(&account) + .and_then(|a| a.storage.get(location)) + .cloned() + .unwrap_or_else(|| self.underlying.get_storage(ks, location)) + }) + } + fn get_code(&self, account: &T::AccountId) -> Option> { + self.local + .borrow() + .get(account) + .and_then(|a| a.code.clone()) + .unwrap_or_else(|| self.underlying.get_code(account)) + } + fn get_balance(&self, account: &T::AccountId) -> BalanceOf { + self.local + .borrow() + .get(account) + .and_then(|a| a.balance) + .unwrap_or_else(|| self.underlying.get_balance(account)) + } + fn commit(&mut self, s: ChangeSet) { + let mut local = self.local.borrow_mut(); - for (address, changed) in s.into_iter() { - match local.entry(address) { - Entry::Occupied(e) => { - let mut value = e.into_mut(); - if changed.balance.is_some() { - value.balance = changed.balance; - } - if changed.code.is_some() { - value.code = changed.code; - } - value.storage.extend(changed.storage.into_iter()); - } - Entry::Vacant(e) => { - e.insert(changed); - } - } - } - } + for (address, changed) in s.into_iter() { + match local.entry(address) { + Entry::Occupied(e) => { + let mut value = e.into_mut(); + if changed.balance.is_some() { + value.balance = changed.balance; + } + if changed.code.is_some() { + value.code = changed.code; + } + value.storage.extend(changed.storage.into_iter()); + } + Entry::Vacant(e) => { + e.insert(changed); + } + } + } + } } diff --git a/srml/contract/src/exec.rs b/srml/contract/src/exec.rs index afd5159fce..cd9e9dc7f1 100644 --- a/srml/contract/src/exec.rs +++ b/srml/contract/src/exec.rs @@ -14,15 +14,15 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use super::{CodeHash, Config, ContractAddressFor, Event, RawEvent, Trait, TrieId, BalanceOf}; -use crate::account_db::{AccountDb, DirectAccountDb, OverlayAccountDb, AccountTrieIdMapping}; -use crate::gas::{GasMeter, Token, approx_gas_for_balance}; +use super::{BalanceOf, CodeHash, Config, ContractAddressFor, Event, RawEvent, Trait, TrieId}; +use crate::account_db::{AccountDb, AccountTrieIdMapping, DirectAccountDb, OverlayAccountDb}; +use crate::gas::{approx_gas_for_balance, GasMeter, Token}; -use rstd::prelude::*; use rstd::cell::RefCell; +use rstd::prelude::*; use rstd::rc::Rc; use runtime_primitives::traits::{CheckedAdd, CheckedSub, Zero}; -use srml_support::traits::{WithdrawReason, Currency}; +use srml_support::traits::{Currency, WithdrawReason}; use timestamp; pub type AccountIdOf = ::AccountId; @@ -32,13 +32,13 @@ pub type SeedOf = ::Hash; #[cfg_attr(test, derive(Debug))] pub struct InstantiateReceipt { - pub address: AccountId, + pub address: AccountId, } #[cfg_attr(test, derive(Debug))] pub struct CallReceipt { - /// Output data received as a result of a call. - pub output_data: Vec, + /// Output data received as a result of a call. + pub output_data: Vec, } /// An interface that provides access to the external environment in which the @@ -47,77 +47,76 @@ pub struct CallReceipt { /// This interface is specialized to an account of the executing code, so all /// operations are implicitly performed on that account. pub trait Ext { - type T: Trait; - - /// Returns the storage entry of the executing account by the given `key`. - /// - /// Returns `None` if the `key` wasn't previously set by `set_storage` or - /// was deleted. - fn get_storage(&self, key: &[u8]) -> Option>; - - /// Sets the storage entry by the given key to the specified value. - /// - /// If `value` is `None` then the storage entry is deleted. - fn set_storage(&mut self, key: &[u8], value: Option>); - - /// Instantiate a contract from the given code. - /// - /// The newly created account will be associated with `code`. `value` specifies the amount of value - /// transfered from this to the newly created account (also known as endowment). - fn instantiate( - &mut self, - code: &CodeHash, - value: BalanceOf, - gas_meter: &mut GasMeter, - input_data: &[u8], - ) -> Result>, &'static str>; - - /// Call (possibly transfering some amount of funds) into the specified account. - fn call( - &mut self, - to: &AccountIdOf, - value: BalanceOf, - gas_meter: &mut GasMeter, - input_data: &[u8], - empty_output_buf: EmptyOutputBuf, - ) -> Result; - - /// Notes a call dispatch. - fn note_dispatch_call(&mut self, call: CallOf); - - /// Returns a reference to the account id of the caller. - fn caller(&self) -> &AccountIdOf; - - /// Returns a reference to the account id of the current contract. - fn address(&self) -> &AccountIdOf; - - - /// Returns the balance of the current contract. - /// - /// The `value_transferred` is already added. - fn balance(&self) -> BalanceOf; - - /// Returns the value transfered along with this call or as endowment. - fn value_transferred(&self) -> BalanceOf; - - /// Returns a reference to the timestamp of the current block - fn now(&self) -> &MomentOf; - - /// Returns a reference to the random seed for the current block - fn random_seed(&self) -> &SeedOf; + type T: Trait; + + /// Returns the storage entry of the executing account by the given `key`. + /// + /// Returns `None` if the `key` wasn't previously set by `set_storage` or + /// was deleted. + fn get_storage(&self, key: &[u8]) -> Option>; + + /// Sets the storage entry by the given key to the specified value. + /// + /// If `value` is `None` then the storage entry is deleted. + fn set_storage(&mut self, key: &[u8], value: Option>); + + /// Instantiate a contract from the given code. + /// + /// The newly created account will be associated with `code`. `value` specifies the amount of value + /// transfered from this to the newly created account (also known as endowment). + fn instantiate( + &mut self, + code: &CodeHash, + value: BalanceOf, + gas_meter: &mut GasMeter, + input_data: &[u8], + ) -> Result>, &'static str>; + + /// Call (possibly transfering some amount of funds) into the specified account. + fn call( + &mut self, + to: &AccountIdOf, + value: BalanceOf, + gas_meter: &mut GasMeter, + input_data: &[u8], + empty_output_buf: EmptyOutputBuf, + ) -> Result; + + /// Notes a call dispatch. + fn note_dispatch_call(&mut self, call: CallOf); + + /// Returns a reference to the account id of the caller. + fn caller(&self) -> &AccountIdOf; + + /// Returns a reference to the account id of the current contract. + fn address(&self) -> &AccountIdOf; + + /// Returns the balance of the current contract. + /// + /// The `value_transferred` is already added. + fn balance(&self) -> BalanceOf; + + /// Returns the value transfered along with this call or as endowment. + fn value_transferred(&self) -> BalanceOf; + + /// Returns a reference to the timestamp of the current block + fn now(&self) -> &MomentOf; + + /// Returns a reference to the random seed for the current block + fn random_seed(&self) -> &SeedOf; } /// Loader is a companion of the `Vm` trait. It loads an appropriate abstract /// executable to be executed by an accompanying `Vm` implementation. pub trait Loader { - type Executable; - - /// Load the initializer portion of the code specified by the `code_hash`. This - /// executable is called upon instantiation. - fn load_init(&self, code_hash: &CodeHash) -> Result; - /// Load the main portion of the code specified by the `code_hash`. This executable - /// is called for each call to a contract. - fn load_main(&self, code_hash: &CodeHash) -> Result; + type Executable; + + /// Load the initializer portion of the code specified by the `code_hash`. This + /// executable is called upon instantiation. + fn load_init(&self, code_hash: &CodeHash) -> Result; + /// Load the main portion of the code specified by the `code_hash`. This executable + /// is called for each call to a contract. + fn load_main(&self, code_hash: &CodeHash) -> Result; } /// An `EmptyOutputBuf` is used as an optimization for reusing empty vectors when @@ -128,30 +127,37 @@ pub trait Loader { pub struct EmptyOutputBuf(Vec); impl EmptyOutputBuf { - /// Create an output buffer from a spare vector which is not longer needed. - /// - /// All contents are discarded, but capacity is preserved. - pub fn from_spare_vec(mut v: Vec) -> Self { - v.clear(); - EmptyOutputBuf(v) - } - - /// Create an output buffer ready for receiving a result. - /// - /// Use this function to create output buffer if you don't have a spare - /// vector. Otherwise, use `from_spare_vec`. - pub fn new() -> Self { - EmptyOutputBuf(Vec::new()) - } - - /// Write to the buffer result of the specified size. - /// - /// Calls closure with the buffer of the requested size. - pub fn fill Result<(), E>>(mut self, size: usize, f: F) -> Result { - assert!(self.0.len() == 0, "the vector is always cleared; it's written only once"); - self.0.resize(size, 0); - f(&mut self.0).map(|()| OutputBuf(self.0)) - } + /// Create an output buffer from a spare vector which is not longer needed. + /// + /// All contents are discarded, but capacity is preserved. + pub fn from_spare_vec(mut v: Vec) -> Self { + v.clear(); + EmptyOutputBuf(v) + } + + /// Create an output buffer ready for receiving a result. + /// + /// Use this function to create output buffer if you don't have a spare + /// vector. Otherwise, use `from_spare_vec`. + pub fn new() -> Self { + EmptyOutputBuf(Vec::new()) + } + + /// Write to the buffer result of the specified size. + /// + /// Calls closure with the buffer of the requested size. + pub fn fill Result<(), E>>( + mut self, + size: usize, + f: F, + ) -> Result { + assert!( + self.0.len() == 0, + "the vector is always cleared; it's written only once" + ); + self.0.resize(size, 0); + f(&mut self.0).map(|()| OutputBuf(self.0)) + } } /// `OutputBuf` is the end result of filling an `EmptyOutputBuf`. @@ -159,25 +165,25 @@ pub struct OutputBuf(Vec); #[must_use] pub enum VmExecResult { - Ok, - Returned(OutputBuf), - /// A program executed some forbidden operation. - /// - /// This can include, e.g.: division by 0, OOB access or failure to satisfy some precondition - /// of a system call. - /// - /// Contains some vm-specific description of an trap. - Trap(&'static str), + Ok, + Returned(OutputBuf), + /// A program executed some forbidden operation. + /// + /// This can include, e.g.: division by 0, OOB access or failure to satisfy some precondition + /// of a system call. + /// + /// Contains some vm-specific description of an trap. + Trap(&'static str), } impl VmExecResult { - pub fn into_result(self) -> Result, &'static str> { - match self { - VmExecResult::Ok => Ok(Vec::new()), - VmExecResult::Returned(buf) => Ok(buf.0), - VmExecResult::Trap(description) => Err(description), - } - } + pub fn into_result(self) -> Result, &'static str> { + match self { + VmExecResult::Ok => Ok(Vec::new()), + VmExecResult::Returned(buf) => Ok(buf.0), + VmExecResult::Trap(description) => Err(description), + } + } } /// A trait that represent a virtual machine. @@ -192,270 +198,286 @@ impl VmExecResult { /// You can optionally provide a vector for collecting output if a spare is available. If you don't have /// it will be created anyway. pub trait Vm { - type Executable; - - fn execute>( - &self, - exec: &Self::Executable, - ext: &mut E, - input_data: &[u8], - empty_output_buf: EmptyOutputBuf, - gas_meter: &mut GasMeter, - ) -> VmExecResult; + type Executable; + + fn execute>( + &self, + exec: &Self::Executable, + ext: &mut E, + input_data: &[u8], + empty_output_buf: EmptyOutputBuf, + gas_meter: &mut GasMeter, + ) -> VmExecResult; } #[cfg_attr(test, derive(Debug, PartialEq, Eq))] #[derive(Copy, Clone)] pub enum ExecFeeToken { - /// Base fee charged for a call. - Call, - /// Base fee charged for a instantiate. - Instantiate, + /// Base fee charged for a call. + Call, + /// Base fee charged for a instantiate. + Instantiate, } impl Token for ExecFeeToken { - type Metadata = Config; - #[inline] - fn calculate_amount(&self, metadata: &Config) -> T::Gas { - match *self { - ExecFeeToken::Call => metadata.call_base_fee, - ExecFeeToken::Instantiate => metadata.instantiate_base_fee, - } - } + type Metadata = Config; + #[inline] + fn calculate_amount(&self, metadata: &Config) -> T::Gas { + match *self { + ExecFeeToken::Call => metadata.call_base_fee, + ExecFeeToken::Instantiate => metadata.instantiate_base_fee, + } + } } pub struct ExecutionContext<'a, T: Trait + 'a, V, L> { - pub self_account: T::AccountId, - pub self_trieid: TrieId, - pub overlay: OverlayAccountDb<'a, T>, - pub depth: usize, - pub events: Vec>, - pub calls: Vec<(T::AccountId, T::Call)>, - pub config: &'a Config, - pub vm: &'a V, - pub loader: &'a L, + pub self_account: T::AccountId, + pub self_trieid: TrieId, + pub overlay: OverlayAccountDb<'a, T>, + pub depth: usize, + pub events: Vec>, + pub calls: Vec<(T::AccountId, T::Call)>, + pub config: &'a Config, + pub vm: &'a V, + pub loader: &'a L, } impl<'a, T, E, V, L> ExecutionContext<'a, T, V, L> where - T: Trait, - L: Loader, - V: Vm, + T: Trait, + L: Loader, + V: Vm, { - /// Create the top level execution context. - /// - /// The specified `origin` address will be used as `sender` for - pub fn top_level(origin: T::AccountId, cfg: &'a Config, vm: &'a V, loader: &'a L) -> Self { - let overlay = OverlayAccountDb::::new(&DirectAccountDb, Rc::new(RefCell::new(AccountTrieIdMapping::new())), true); - let self_trieid = overlay.get_or_create_trieid(&origin); - ExecutionContext { - self_account: origin, - self_trieid, - depth: 0, - overlay, - events: Vec::new(), - calls: Vec::new(), - config: &cfg, - vm: &vm, - loader: &loader, - } - } - - fn nested(&self, overlay: OverlayAccountDb<'a, T>, dest: T::AccountId) -> Self { - let self_trieid = overlay.get_or_create_trieid(&dest); - ExecutionContext { - overlay, - self_account: dest, - self_trieid, - depth: self.depth + 1, - events: Vec::new(), - calls: Vec::new(), - config: self.config, - vm: self.vm, - loader: self.loader, - } - } - - /// Make a call to the specified address, optionally transfering some funds. - pub fn call( - &mut self, - dest: T::AccountId, - value: BalanceOf, - gas_meter: &mut GasMeter, - input_data: &[u8], - empty_output_buf: EmptyOutputBuf, - ) -> Result { - if self.depth == self.config.max_depth as usize { - return Err("reached maximum depth, cannot make a call"); - } - - if gas_meter - .charge(self.config, ExecFeeToken::Call) - .is_out_of_gas() - { - return Err("not enough gas to pay base call fee"); - } - - let dest_code_hash = self.overlay.get_code(&dest); - let mut output_data = Vec::new(); - - let (change_set, events, calls) = { - let mut nested = self.nested( - OverlayAccountDb::new(&self.overlay, self.overlay.reg_cache_new_rc(), false), - dest.clone() - ); - - if value > BalanceOf::::zero() { - transfer( - gas_meter, - TransferCause::Call, - &self.self_account, - &dest, - value, - &mut nested, - )?; - } - - if let Some(dest_code_hash) = dest_code_hash { - let executable = self.loader.load_main(&dest_code_hash)?; - output_data = self - .vm - .execute( - &executable, - &mut CallContext { - ctx: &mut nested, - caller: self.self_account.clone(), - value_transferred: value, - timestamp: timestamp::Module::::now(), - random_seed: system::Module::::random_seed(), - }, - input_data, - empty_output_buf, - gas_meter, - ) - .into_result()?; - } - - (nested.overlay.into_change_set(), nested.events, nested.calls) - }; - - self.overlay.commit(change_set); - self.events.extend(events); - self.calls.extend(calls); - - Ok(CallReceipt { output_data }) - } - - pub fn instantiate( - &mut self, - endowment: BalanceOf, - gas_meter: &mut GasMeter, - code_hash: &CodeHash, - input_data: &[u8], - ) -> Result, &'static str> { - if self.depth == self.config.max_depth as usize { - return Err("reached maximum depth, cannot create"); - } - - if gas_meter - .charge(self.config, ExecFeeToken::Instantiate) - .is_out_of_gas() - { - return Err("not enough gas to pay base instantiate fee"); - } - - let dest = T::DetermineContractAddress::contract_address_for( - code_hash, - input_data, - &self.self_account, - ); - - if self.overlay.get_code(&dest).is_some() { - // It should be enough to check only the code. - return Err("contract already exists"); - } - - let (change_set, events, calls) = { - let mut overlay = OverlayAccountDb::new(&self.overlay, self.overlay.reg_cache_new_rc(), false); - - overlay.set_code(&dest, Some(code_hash.clone())); - let mut nested = self.nested(overlay, dest.clone()); - - // Send funds unconditionally here. If the `endowment` is below existential_deposit - // then error will be returned here. - transfer( - gas_meter, - TransferCause::Instantiate, - &self.self_account, - &dest, - endowment, - &mut nested, - )?; - - let executable = self.loader.load_init(&code_hash)?; - self.vm - .execute( - &executable, - &mut CallContext { - ctx: &mut nested, - caller: self.self_account.clone(), - value_transferred: endowment, - timestamp: timestamp::Module::::now(), - random_seed: system::Module::::random_seed(), - }, - input_data, - EmptyOutputBuf::new(), - gas_meter, - ) - .into_result()?; - - // Deposit an instantiation event. - nested.events.push(RawEvent::Instantiated(self.self_account.clone(), dest.clone())); - - (nested.overlay.into_change_set(), nested.events, nested.calls) - }; - - self.overlay.commit(change_set); - self.events.extend(events); - self.calls.extend(calls); - - Ok(InstantiateReceipt { address: dest }) - } + /// Create the top level execution context. + /// + /// The specified `origin` address will be used as `sender` for + pub fn top_level(origin: T::AccountId, cfg: &'a Config, vm: &'a V, loader: &'a L) -> Self { + let overlay = OverlayAccountDb::::new( + &DirectAccountDb, + Rc::new(RefCell::new(AccountTrieIdMapping::new())), + true, + ); + let self_trieid = overlay.get_or_create_trieid(&origin); + ExecutionContext { + self_account: origin, + self_trieid, + depth: 0, + overlay, + events: Vec::new(), + calls: Vec::new(), + config: &cfg, + vm: &vm, + loader: &loader, + } + } + + fn nested(&self, overlay: OverlayAccountDb<'a, T>, dest: T::AccountId) -> Self { + let self_trieid = overlay.get_or_create_trieid(&dest); + ExecutionContext { + overlay, + self_account: dest, + self_trieid, + depth: self.depth + 1, + events: Vec::new(), + calls: Vec::new(), + config: self.config, + vm: self.vm, + loader: self.loader, + } + } + + /// Make a call to the specified address, optionally transfering some funds. + pub fn call( + &mut self, + dest: T::AccountId, + value: BalanceOf, + gas_meter: &mut GasMeter, + input_data: &[u8], + empty_output_buf: EmptyOutputBuf, + ) -> Result { + if self.depth == self.config.max_depth as usize { + return Err("reached maximum depth, cannot make a call"); + } + + if gas_meter + .charge(self.config, ExecFeeToken::Call) + .is_out_of_gas() + { + return Err("not enough gas to pay base call fee"); + } + + let dest_code_hash = self.overlay.get_code(&dest); + let mut output_data = Vec::new(); + + let (change_set, events, calls) = { + let mut nested = self.nested( + OverlayAccountDb::new(&self.overlay, self.overlay.reg_cache_new_rc(), false), + dest.clone(), + ); + + if value > BalanceOf::::zero() { + transfer( + gas_meter, + TransferCause::Call, + &self.self_account, + &dest, + value, + &mut nested, + )?; + } + + if let Some(dest_code_hash) = dest_code_hash { + let executable = self.loader.load_main(&dest_code_hash)?; + output_data = self + .vm + .execute( + &executable, + &mut CallContext { + ctx: &mut nested, + caller: self.self_account.clone(), + value_transferred: value, + timestamp: timestamp::Module::::now(), + random_seed: system::Module::::random_seed(), + }, + input_data, + empty_output_buf, + gas_meter, + ) + .into_result()?; + } + + ( + nested.overlay.into_change_set(), + nested.events, + nested.calls, + ) + }; + + self.overlay.commit(change_set); + self.events.extend(events); + self.calls.extend(calls); + + Ok(CallReceipt { output_data }) + } + + pub fn instantiate( + &mut self, + endowment: BalanceOf, + gas_meter: &mut GasMeter, + code_hash: &CodeHash, + input_data: &[u8], + ) -> Result, &'static str> { + if self.depth == self.config.max_depth as usize { + return Err("reached maximum depth, cannot create"); + } + + if gas_meter + .charge(self.config, ExecFeeToken::Instantiate) + .is_out_of_gas() + { + return Err("not enough gas to pay base instantiate fee"); + } + + let dest = T::DetermineContractAddress::contract_address_for( + code_hash, + input_data, + &self.self_account, + ); + + if self.overlay.get_code(&dest).is_some() { + // It should be enough to check only the code. + return Err("contract already exists"); + } + + let (change_set, events, calls) = { + let mut overlay = + OverlayAccountDb::new(&self.overlay, self.overlay.reg_cache_new_rc(), false); + + overlay.set_code(&dest, Some(code_hash.clone())); + let mut nested = self.nested(overlay, dest.clone()); + + // Send funds unconditionally here. If the `endowment` is below existential_deposit + // then error will be returned here. + transfer( + gas_meter, + TransferCause::Instantiate, + &self.self_account, + &dest, + endowment, + &mut nested, + )?; + + let executable = self.loader.load_init(&code_hash)?; + self.vm + .execute( + &executable, + &mut CallContext { + ctx: &mut nested, + caller: self.self_account.clone(), + value_transferred: endowment, + timestamp: timestamp::Module::::now(), + random_seed: system::Module::::random_seed(), + }, + input_data, + EmptyOutputBuf::new(), + gas_meter, + ) + .into_result()?; + + // Deposit an instantiation event. + nested.events.push(RawEvent::Instantiated( + self.self_account.clone(), + dest.clone(), + )); + + ( + nested.overlay.into_change_set(), + nested.events, + nested.calls, + ) + }; + + self.overlay.commit(change_set); + self.events.extend(events); + self.calls.extend(calls); + + Ok(InstantiateReceipt { address: dest }) + } } #[cfg_attr(test, derive(Debug, PartialEq, Eq))] #[derive(Copy, Clone)] pub enum TransferFeeKind { - ContractInstantiate, - AccountCreate, - Transfer, + ContractInstantiate, + AccountCreate, + Transfer, } #[cfg_attr(test, derive(Debug, PartialEq, Eq))] #[derive(Copy, Clone)] pub struct TransferFeeToken { - kind: TransferFeeKind, - gas_price: Balance, + kind: TransferFeeKind, + gas_price: Balance, } impl Token for TransferFeeToken> { - type Metadata = Config; - - #[inline] - fn calculate_amount(&self, metadata: &Config) -> T::Gas { - let balance_fee = match self.kind { - TransferFeeKind::ContractInstantiate => metadata.contract_account_instantiate_fee, - TransferFeeKind::AccountCreate => metadata.account_create_fee, - TransferFeeKind::Transfer => metadata.transfer_fee, - }; - approx_gas_for_balance::(self.gas_price, balance_fee) - } + type Metadata = Config; + + #[inline] + fn calculate_amount(&self, metadata: &Config) -> T::Gas { + let balance_fee = match self.kind { + TransferFeeKind::ContractInstantiate => metadata.contract_account_instantiate_fee, + TransferFeeKind::AccountCreate => metadata.account_create_fee, + TransferFeeKind::Transfer => metadata.transfer_fee, + }; + approx_gas_for_balance::(self.gas_price, balance_fee) + } } /// Describes possible transfer causes. enum TransferCause { - Call, - Instantiate, + Call, + Instantiate, } /// Transfer some funds from `transactor` to `dest`. @@ -475,153 +497,159 @@ enum TransferCause { /// can go below existential deposit, essentially giving a contract /// the chance to give up it's life. fn transfer<'a, T: Trait, V: Vm, L: Loader>( - gas_meter: &mut GasMeter, - cause: TransferCause, - transactor: &T::AccountId, - dest: &T::AccountId, - value: BalanceOf, - ctx: &mut ExecutionContext<'a, T, V, L>, + gas_meter: &mut GasMeter, + cause: TransferCause, + transactor: &T::AccountId, + dest: &T::AccountId, + value: BalanceOf, + ctx: &mut ExecutionContext<'a, T, V, L>, ) -> Result<(), &'static str> { - use self::TransferCause::*; - use self::TransferFeeKind::*; - - let to_balance = ctx.overlay.get_balance(dest); - - // `would_create` indicates whether the account will be created if this transfer gets executed. - // This flag is orthogonal to `cause. - // For example, we can instantiate a contract at the address which already has some funds. In this - // `would_create` will be `false`. Another example would be when this function is called from `call`, - // and account with the address `dest` doesn't exist yet `would_create` will be `true`. - let would_create = to_balance.is_zero(); - - let token = { - let kind: TransferFeeKind = match cause { - // If this function is called from `Instantiate` routine, then we always - // charge contract account creation fee. - Instantiate => ContractInstantiate, - - // Otherwise the fee depends on whether we create a new account or transfer - // to an existing one. - Call => if would_create { - TransferFeeKind::AccountCreate - } else { - TransferFeeKind::Transfer - }, - }; - TransferFeeToken { - kind, - gas_price: gas_meter.gas_price(), - } - }; - - if gas_meter.charge(ctx.config, token).is_out_of_gas() { - return Err("not enough gas to pay transfer fee"); - } - - // We allow balance to go below the existential deposit here: - let from_balance = ctx.overlay.get_balance(transactor); - let new_from_balance = match from_balance.checked_sub(&value) { - Some(b) => b, - None => return Err("balance too low to send value"), - }; - if would_create && value < ctx.config.existential_deposit { - return Err("value too low to create account"); - } - T::Currency::ensure_can_withdraw(transactor, value, WithdrawReason::Transfer, new_from_balance)?; - - let new_to_balance = match to_balance.checked_add(&value) { - Some(b) => b, - None => return Err("destination balance too high to receive value"), - }; - - if transactor != dest { - ctx.overlay.set_balance(transactor, new_from_balance); - ctx.overlay.set_balance(dest, new_to_balance); - ctx.events - .push(RawEvent::Transfer(transactor.clone(), dest.clone(), value)); - } - - Ok(()) + use self::TransferCause::*; + use self::TransferFeeKind::*; + + let to_balance = ctx.overlay.get_balance(dest); + + // `would_create` indicates whether the account will be created if this transfer gets executed. + // This flag is orthogonal to `cause. + // For example, we can instantiate a contract at the address which already has some funds. In this + // `would_create` will be `false`. Another example would be when this function is called from `call`, + // and account with the address `dest` doesn't exist yet `would_create` will be `true`. + let would_create = to_balance.is_zero(); + + let token = { + let kind: TransferFeeKind = match cause { + // If this function is called from `Instantiate` routine, then we always + // charge contract account creation fee. + Instantiate => ContractInstantiate, + + // Otherwise the fee depends on whether we create a new account or transfer + // to an existing one. + Call => { + if would_create { + TransferFeeKind::AccountCreate + } else { + TransferFeeKind::Transfer + } + } + }; + TransferFeeToken { + kind, + gas_price: gas_meter.gas_price(), + } + }; + + if gas_meter.charge(ctx.config, token).is_out_of_gas() { + return Err("not enough gas to pay transfer fee"); + } + + // We allow balance to go below the existential deposit here: + let from_balance = ctx.overlay.get_balance(transactor); + let new_from_balance = match from_balance.checked_sub(&value) { + Some(b) => b, + None => return Err("balance too low to send value"), + }; + if would_create && value < ctx.config.existential_deposit { + return Err("value too low to create account"); + } + T::Currency::ensure_can_withdraw( + transactor, + value, + WithdrawReason::Transfer, + new_from_balance, + )?; + + let new_to_balance = match to_balance.checked_add(&value) { + Some(b) => b, + None => return Err("destination balance too high to receive value"), + }; + + if transactor != dest { + ctx.overlay.set_balance(transactor, new_from_balance); + ctx.overlay.set_balance(dest, new_to_balance); + ctx.events + .push(RawEvent::Transfer(transactor.clone(), dest.clone(), value)); + } + + Ok(()) } struct CallContext<'a, 'b: 'a, T: Trait + 'b, V: Vm + 'b, L: Loader> { - ctx: &'a mut ExecutionContext<'b, T, V, L>, - caller: T::AccountId, - value_transferred: BalanceOf, - timestamp: T::Moment, - random_seed: T::Hash, + ctx: &'a mut ExecutionContext<'b, T, V, L>, + caller: T::AccountId, + value_transferred: BalanceOf, + timestamp: T::Moment, + random_seed: T::Hash, } impl<'a, 'b: 'a, T, E, V, L> Ext for CallContext<'a, 'b, T, V, L> where - T: Trait + 'b, - V: Vm, - L: Loader, + T: Trait + 'b, + V: Vm, + L: Loader, { - type T = T; - - fn get_storage(&self, key: &[u8]) -> Option> { - self.ctx.overlay.get_storage(&self.ctx.self_trieid, key) - } - - fn set_storage(&mut self, key: &[u8], value: Option>) { - self.ctx - .overlay - .set_storage(&self.ctx.self_account, key.to_vec(), value) - } - - fn instantiate( - &mut self, - code_hash: &CodeHash, - endowment: BalanceOf, - gas_meter: &mut GasMeter, - input_data: &[u8], - ) -> Result>, &'static str> { - self.ctx.instantiate(endowment, gas_meter, code_hash, input_data) - } - - fn call( - &mut self, - to: &T::AccountId, - value: BalanceOf, - gas_meter: &mut GasMeter, - input_data: &[u8], - empty_output_buf: EmptyOutputBuf, - ) -> Result { - self.ctx - .call(to.clone(), value, gas_meter, input_data, empty_output_buf) - } - - /// Notes a call dispatch. - fn note_dispatch_call(&mut self, call: CallOf) { - self.ctx.calls.push( - (self.ctx.self_account.clone(), call) - ); - } - - fn address(&self) -> &T::AccountId { - &self.ctx.self_account - } - - fn caller(&self) -> &T::AccountId { - &self.caller - } - - fn balance(&self) -> BalanceOf { - self.ctx.overlay.get_balance(&self.ctx.self_account) - } - - fn value_transferred(&self) -> BalanceOf { - self.value_transferred - } - - fn random_seed(&self) -> &T::Hash { - &self.random_seed - } - - fn now(&self) -> &T::Moment { - &self.timestamp - } + type T = T; + + fn get_storage(&self, key: &[u8]) -> Option> { + self.ctx.overlay.get_storage(&self.ctx.self_trieid, key) + } + + fn set_storage(&mut self, key: &[u8], value: Option>) { + self.ctx + .overlay + .set_storage(&self.ctx.self_account, key.to_vec(), value) + } + + fn instantiate( + &mut self, + code_hash: &CodeHash, + endowment: BalanceOf, + gas_meter: &mut GasMeter, + input_data: &[u8], + ) -> Result>, &'static str> { + self.ctx + .instantiate(endowment, gas_meter, code_hash, input_data) + } + + fn call( + &mut self, + to: &T::AccountId, + value: BalanceOf, + gas_meter: &mut GasMeter, + input_data: &[u8], + empty_output_buf: EmptyOutputBuf, + ) -> Result { + self.ctx + .call(to.clone(), value, gas_meter, input_data, empty_output_buf) + } + + /// Notes a call dispatch. + fn note_dispatch_call(&mut self, call: CallOf) { + self.ctx.calls.push((self.ctx.self_account.clone(), call)); + } + + fn address(&self) -> &T::AccountId { + &self.ctx.self_account + } + + fn caller(&self) -> &T::AccountId { + &self.caller + } + + fn balance(&self) -> BalanceOf { + self.ctx.overlay.get_balance(&self.ctx.self_account) + } + + fn value_transferred(&self) -> BalanceOf { + self.value_transferred + } + + fn random_seed(&self) -> &T::Hash { + &self.random_seed + } + + fn now(&self) -> &T::Moment { + &self.timestamp + } } /// These tests exercise the executive layer. @@ -636,714 +664,731 @@ where /// - executive layer doesn't alter any storage! #[cfg(test)] mod tests { - use super::{ - ExecFeeToken, ExecutionContext, Ext, Loader, EmptyOutputBuf, TransferFeeKind, TransferFeeToken, - Vm, VmExecResult, InstantiateReceipt, RawEvent, - }; - use crate::account_db::AccountDb; - use crate::gas::GasMeter; - use crate::tests::{ExtBuilder, Test}; - use crate::{CodeHash, Config}; - use runtime_io::with_externalities; - use std::cell::RefCell; - use std::rc::Rc; - use std::collections::HashMap; - use std::marker::PhantomData; - use assert_matches::assert_matches; - - const ALICE: u64 = 1; - const BOB: u64 = 2; - const CHARLIE: u64 = 3; - - struct MockCtx<'a> { - ext: &'a mut dyn Ext, - input_data: &'a [u8], - empty_output_buf: Option, - gas_meter: &'a mut GasMeter, - } - - #[derive(Clone)] - struct MockExecutable<'a>(Rc VmExecResult + 'a>); - - impl<'a> MockExecutable<'a> { - fn new(f: impl Fn(MockCtx) -> VmExecResult + 'a) -> Self { - MockExecutable(Rc::new(f)) - } - } - - struct MockLoader<'a> { - map: HashMap, MockExecutable<'a>>, - counter: u64, - } - - impl<'a> MockLoader<'a> { - fn empty() -> Self { - MockLoader { - map: HashMap::new(), - counter: 0, - } - } - - fn insert(&mut self, f: impl Fn(MockCtx) -> VmExecResult + 'a) -> CodeHash { - // Generate code hashes as monotonically increasing values. - let code_hash = ::Hash::from_low_u64_be(self.counter); - - self.counter += 1; - self.map.insert(code_hash, MockExecutable::new(f)); - code_hash - } - } - - struct MockVm<'a> { - _marker: PhantomData<&'a ()>, - } - - impl<'a> MockVm<'a> { - fn new() -> Self { - MockVm { _marker: PhantomData } - } - } - - impl<'a> Loader for MockLoader<'a> { - type Executable = MockExecutable<'a>; - - fn load_init(&self, code_hash: &CodeHash) -> Result { - self.map - .get(code_hash) - .cloned() - .ok_or_else(|| "code not found") - } - fn load_main(&self, code_hash: &CodeHash) -> Result { - self.map - .get(code_hash) - .cloned() - .ok_or_else(|| "code not found") - } - } - - impl<'a> Vm for MockVm<'a> { - type Executable = MockExecutable<'a>; - - fn execute>( - &self, - exec: &MockExecutable, - ext: &mut E, - input_data: &[u8], - empty_output_buf: EmptyOutputBuf, - gas_meter: &mut GasMeter, - ) -> VmExecResult { - (exec.0)(MockCtx { - ext, - input_data, - empty_output_buf: Some(empty_output_buf), - gas_meter, - }) - } - } - - #[test] - fn it_works() { - let value = Default::default(); - let mut gas_meter = GasMeter::::with_limit(10000, 1); - let data = vec![]; - - let vm = MockVm::new(); - - let test_data = Rc::new(RefCell::new(vec![0usize])); - - let mut loader = MockLoader::empty(); - let exec_ch = loader.insert(|_ctx| { - test_data.borrow_mut().push(1); - VmExecResult::Ok - }); - - with_externalities(&mut ExtBuilder::default().build(), || { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_code(&BOB, Some(exec_ch)); - - assert_matches!( - ctx.call(BOB, value, &mut gas_meter, &data, EmptyOutputBuf::new()), - Ok(_) - ); - }); - - assert_eq!(&*test_data.borrow(), &vec![0, 1]); - } - - #[test] - fn base_fees() { - let origin = ALICE; - let dest = BOB; - - // This test verifies that base fee for call is taken. - with_externalities(&mut ExtBuilder::default().build(), || { - let vm = MockVm::new(); - let loader = MockLoader::empty(); - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.set_balance(&origin, 100); - ctx.overlay.set_balance(&dest, 0); - - let mut gas_meter = GasMeter::::with_limit(1000, 1); - - let result = ctx.call(dest, 0, &mut gas_meter, &[], EmptyOutputBuf::new()); - assert_matches!(result, Ok(_)); - - let mut toks = gas_meter.tokens().iter(); - match_tokens!(toks, ExecFeeToken::Call,); - }); - - // This test verifies that base fee for instantiation is taken. - with_externalities(&mut ExtBuilder::default().build(), || { - let mut loader = MockLoader::empty(); - let code = loader.insert(|_| VmExecResult::Ok); - - let vm = MockVm::new(); - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - - ctx.overlay.set_balance(&origin, 100); - - let mut gas_meter = GasMeter::::with_limit(1000, 1); - - let result = ctx.instantiate(0, &mut gas_meter, &code, &[]); - assert_matches!(result, Ok(_)); - - let mut toks = gas_meter.tokens().iter(); - match_tokens!(toks, ExecFeeToken::Instantiate,); - }); - } - - #[test] - fn transfer_works() { - // This test verifies that a contract is able to transfer - // some funds to another account. - let origin = ALICE; - let dest = BOB; - - let vm = MockVm::new(); - let loader = MockLoader::empty(); - - with_externalities(&mut ExtBuilder::default().build(), || { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.set_balance(&origin, 100); - ctx.overlay.set_balance(&dest, 0); - - let result = ctx.call( - dest, - 55, - &mut GasMeter::::with_limit(1000, 1), - &[], - EmptyOutputBuf::new(), - ); - assert_matches!(result, Ok(_)); - assert_eq!(ctx.overlay.get_balance(&origin), 45); - assert_eq!(ctx.overlay.get_balance(&dest), 55); - }); - } - - #[test] - fn transfer_fees() { - let origin = ALICE; - let dest = BOB; - - // This test sends 50 units of currency to a non-existent account. - // This should create lead to creation of a new account thus - // a fee should be charged. - with_externalities( - &mut ExtBuilder::default().existential_deposit(15).build(), - || { - let vm = MockVm::new(); - let loader = MockLoader::empty(); - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.set_balance(&origin, 100); - ctx.overlay.set_balance(&dest, 0); - - let mut gas_meter = GasMeter::::with_limit(1000, 1); - - let result = ctx.call(dest, 50, &mut gas_meter, &[], EmptyOutputBuf::new()); - assert_matches!(result, Ok(_)); - - let mut toks = gas_meter.tokens().iter(); - match_tokens!( - toks, - ExecFeeToken::Call, - TransferFeeToken { - kind: TransferFeeKind::AccountCreate, - gas_price: 1u64 - }, - ); - }, - ); - - // This one is similar to the previous one but transfer to an existing account. - // In this test we expect that a regular transfer fee is charged. - with_externalities( - &mut ExtBuilder::default().existential_deposit(15).build(), - || { - let vm = MockVm::new(); - let loader = MockLoader::empty(); - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.set_balance(&origin, 100); - ctx.overlay.set_balance(&dest, 15); - - let mut gas_meter = GasMeter::::with_limit(1000, 1); - - let result = ctx.call(dest, 50, &mut gas_meter, &[], EmptyOutputBuf::new()); - assert_matches!(result, Ok(_)); - - let mut toks = gas_meter.tokens().iter(); - match_tokens!( - toks, - ExecFeeToken::Call, - TransferFeeToken { - kind: TransferFeeKind::Transfer, - gas_price: 1u64 - }, - ); - }, - ); - - // This test sends 50 units of currency as an endownment to a newly - // created contract. - with_externalities( - &mut ExtBuilder::default().existential_deposit(15).build(), - || { - let mut loader = MockLoader::empty(); - let code = loader.insert(|_| VmExecResult::Ok); - - let vm = MockVm::new(); - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - - ctx.overlay.set_balance(&origin, 100); - ctx.overlay.set_balance(&dest, 15); - - let mut gas_meter = GasMeter::::with_limit(1000, 1); - - let result = ctx.instantiate(50, &mut gas_meter, &code, &[]); - assert_matches!(result, Ok(_)); - - let mut toks = gas_meter.tokens().iter(); - match_tokens!( - toks, - ExecFeeToken::Instantiate, - TransferFeeToken { - kind: TransferFeeKind::ContractInstantiate, - gas_price: 1u64 - }, - ); - }, - ); - } - - #[test] - fn balance_too_low() { - // This test verifies that a contract can't send value if it's - // balance is too low. - let origin = ALICE; - let dest = BOB; - - let vm = MockVm::new(); - let loader = MockLoader::empty(); - - with_externalities(&mut ExtBuilder::default().build(), || { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.set_balance(&origin, 0); - - let result = ctx.call( - dest, - 100, - &mut GasMeter::::with_limit(1000, 1), - &[], - EmptyOutputBuf::new(), - ); - - assert_matches!(result, Err("balance too low to send value")); - assert_eq!(ctx.overlay.get_balance(&origin), 0); - assert_eq!(ctx.overlay.get_balance(&dest), 0); - }); - } - - #[test] - fn output_is_returned() { - // Verifies that if a contract returns data, this data - // is returned from the execution context. - let origin = ALICE; - let dest = BOB; - - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let return_ch = loader.insert(|mut ctx| { - #[derive(Debug)] - enum Void {} - let empty_output_buf = ctx.empty_output_buf.take().unwrap(); - let output_buf = - empty_output_buf.fill::(4, |data| { - data.copy_from_slice(&[1, 2, 3, 4]); - Ok(()) - }) - .expect("Ok is always returned"); - VmExecResult::Returned(output_buf) - }); - - with_externalities(&mut ExtBuilder::default().build(), || { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.set_code(&BOB, Some(return_ch)); - - let result = ctx.call( - dest, - 0, - &mut GasMeter::::with_limit(1000, 1), - &[], - EmptyOutputBuf::new(), - ); - - let output_data = result.unwrap().output_data; - assert_eq!(&output_data, &[1, 2, 3, 4]); - }); - } - - #[test] - fn input_data() { - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let input_data_ch = loader.insert(|ctx| { - assert_eq!(ctx.input_data, &[1, 2, 3, 4]); - VmExecResult::Ok - }); - - // This one tests passing the input data into a contract via call. - with_externalities(&mut ExtBuilder::default().build(), || { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_code(&BOB, Some(input_data_ch)); - - let result = ctx.call( - BOB, - 0, - &mut GasMeter::::with_limit(10000, 1), - &[1, 2, 3, 4], - EmptyOutputBuf::new(), - ); - assert_matches!(result, Ok(_)); - }); - - // This one tests passing the input data into a contract via call. - with_externalities(&mut ExtBuilder::default().build(), || { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - - let result = ctx.instantiate( - 0, - &mut GasMeter::::with_limit(10000, 1), - &input_data_ch, - &[1, 2, 3, 4], - ); - assert_matches!(result, Ok(_)); - }); - } - - #[test] - fn max_depth() { - // This test verifies that when we reach the maximal depth creation of an - // yet another context fails. - let value = Default::default(); - let reached_bottom = RefCell::new(false); - - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let recurse_ch = loader.insert(|ctx| { - // Try to call into yourself. - let r = ctx - .ext - .call(&BOB, 0, ctx.gas_meter, &[], EmptyOutputBuf::new()); - - let mut reached_bottom = reached_bottom.borrow_mut(); - if !*reached_bottom { - // We are first time here, it means we just reached bottom. - // Verify that we've got proper error and set `reached_bottom`. - assert_matches!(r, Err("reached maximum depth, cannot make a call")); - *reached_bottom = true; - } else { - // We just unwinding stack here. - assert_matches!(r, Ok(_)); - } - - VmExecResult::Ok - }); - - with_externalities(&mut ExtBuilder::default().build(), || { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_code(&BOB, Some(recurse_ch)); - - let result = ctx.call( - BOB, - value, - &mut GasMeter::::with_limit(100000, 1), - &[], - EmptyOutputBuf::new(), - ); - - assert_matches!(result, Ok(_)); - }); - } - - #[test] - fn caller_returns_proper_values() { - let origin = ALICE; - let dest = BOB; - - let vm = MockVm::new(); - - let witnessed_caller_bob = RefCell::new(None::); - let witnessed_caller_charlie = RefCell::new(None::); - - let mut loader = MockLoader::empty(); - let bob_ch = loader.insert(|ctx| { - // Record the caller for bob. - *witnessed_caller_bob.borrow_mut() = Some(*ctx.ext.caller()); - - // Call into CHARLIE contract. - assert_matches!( - ctx.ext - .call(&CHARLIE, 0, ctx.gas_meter, &[], EmptyOutputBuf::new()), - Ok(_) - ); - VmExecResult::Ok - }); - let charlie_ch = loader.insert(|ctx| { - // Record the caller for charlie. - *witnessed_caller_charlie.borrow_mut() = Some(*ctx.ext.caller()); - VmExecResult::Ok - }); - - with_externalities(&mut ExtBuilder::default().build(), || { - let cfg = Config::preload(); - - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.set_code(&dest, Some(bob_ch)); - ctx.overlay.set_code(&CHARLIE, Some(charlie_ch)); - - let result = ctx.call( - dest, - 0, - &mut GasMeter::::with_limit(10000, 1), - &[], - EmptyOutputBuf::new(), - ); - - assert_matches!(result, Ok(_)); - }); - - assert_eq!(&*witnessed_caller_bob.borrow(), &Some(origin)); - assert_eq!(&*witnessed_caller_charlie.borrow(), &Some(dest)); - } - - #[test] - fn address_returns_proper_values() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let bob_ch = loader.insert(|ctx| { - // Verify that address matches BOB. - assert_eq!(*ctx.ext.address(), BOB); - - // Call into charlie contract. - assert_matches!( - ctx.ext - .call(&CHARLIE, 0, ctx.gas_meter, &[], EmptyOutputBuf::new()), - Ok(_) - ); - VmExecResult::Ok - }); - let charlie_ch = loader.insert(|ctx| { - assert_eq!(*ctx.ext.address(), CHARLIE); - VmExecResult::Ok - }); - - with_externalities(&mut ExtBuilder::default().build(), || { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_code(&BOB, Some(bob_ch)); - ctx.overlay.set_code(&CHARLIE, Some(charlie_ch)); - - let result = ctx.call( - BOB, - 0, - &mut GasMeter::::with_limit(10000, 1), - &[], - EmptyOutputBuf::new(), - ); - - assert_matches!(result, Ok(_)); - }); - } - - #[test] - fn refuse_instantiate_with_value_below_existential_deposit() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let dummy_ch = loader.insert(|_| VmExecResult::Ok); - - with_externalities( - &mut ExtBuilder::default().existential_deposit(15).build(), - || { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - - assert_matches!( - ctx.instantiate( - 0, // <- zero endowment - &mut GasMeter::::with_limit(10000, 1), - &dummy_ch, - &[], - ), - Err(_) - ); - } - ); - } - - #[test] - fn instantiation() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let dummy_ch = loader.insert(|_| VmExecResult::Ok); - - with_externalities( - &mut ExtBuilder::default().existential_deposit(15).build(), - || { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_balance(&ALICE, 1000); - - let created_contract_address = assert_matches!( - ctx.instantiate( - 100, - &mut GasMeter::::with_limit(10000, 1), - &dummy_ch, - &[], - ), - Ok(InstantiateReceipt { address }) => address - ); - - // Check that the newly created account has the expected code hash and - // there are instantiation event. - assert_eq!(ctx.overlay.get_code(&created_contract_address).unwrap(), dummy_ch); - assert_eq!(&ctx.events, &[ - RawEvent::Transfer(ALICE, created_contract_address, 100), - RawEvent::Instantiated(ALICE, created_contract_address), - ]); - } - ); - } - - #[test] - fn instantiation_from_contract() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let dummy_ch = loader.insert(|_| VmExecResult::Ok); - let created_contract_address = Rc::new(RefCell::new(None::)); - let creator_ch = loader.insert({ - let dummy_ch = dummy_ch.clone(); - let created_contract_address = Rc::clone(&created_contract_address); - move |ctx| { - // Instantiate a contract and save it's address in `created_contract_address`. - *created_contract_address.borrow_mut() = - ctx.ext.instantiate( - &dummy_ch, - 15u64, - ctx.gas_meter, - &[] - ) - .unwrap() - .address.into(); - - VmExecResult::Ok - } - }); - - with_externalities( - &mut ExtBuilder::default().existential_deposit(15).build(), - || { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_balance(&ALICE, 1000); - ctx.overlay.set_code(&BOB, Some(creator_ch)); - - assert_matches!( - ctx.call(BOB, 20, &mut GasMeter::::with_limit(1000, 1), &[], EmptyOutputBuf::new()), - Ok(_) - ); - - let created_contract_address = created_contract_address.borrow().as_ref().unwrap().clone(); - - // Check that the newly created account has the expected code hash and - // there are instantiation event. - assert_eq!(ctx.overlay.get_code(&created_contract_address).unwrap(), dummy_ch); - assert_eq!(&ctx.events, &[ - RawEvent::Transfer(ALICE, BOB, 20), - RawEvent::Transfer(BOB, created_contract_address, 15), - RawEvent::Instantiated(BOB, created_contract_address), - ]); - } - ); - } - - #[test] - fn instantiation_fails() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let dummy_ch = loader.insert(|_| VmExecResult::Trap("It's a trap!")); - let creator_ch = loader.insert({ - let dummy_ch = dummy_ch.clone(); - move |ctx| { - // Instantiate a contract and save it's address in `created_contract_address`. - assert_matches!( - ctx.ext.instantiate( - &dummy_ch, - 15u64, - ctx.gas_meter, - &[] - ), - Err("It's a trap!") - ); - - VmExecResult::Ok - } - }); - - with_externalities( - &mut ExtBuilder::default().existential_deposit(15).build(), - || { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_balance(&ALICE, 1000); - ctx.overlay.set_code(&BOB, Some(creator_ch)); - - assert_matches!( - ctx.call(BOB, 20, &mut GasMeter::::with_limit(1000, 1), &[], EmptyOutputBuf::new()), - Ok(_) - ); - - // The contract wasn't created so we don't expect to see an instantiation - // event here. - assert_eq!(&ctx.events, &[ - RawEvent::Transfer(ALICE, BOB, 20), - ]); - } - ); - } + use super::{ + EmptyOutputBuf, ExecFeeToken, ExecutionContext, Ext, InstantiateReceipt, Loader, RawEvent, + TransferFeeKind, TransferFeeToken, Vm, VmExecResult, + }; + use crate::account_db::AccountDb; + use crate::gas::GasMeter; + use crate::tests::{ExtBuilder, Test}; + use crate::{CodeHash, Config}; + use assert_matches::assert_matches; + use runtime_io::with_externalities; + use std::cell::RefCell; + use std::collections::HashMap; + use std::marker::PhantomData; + use std::rc::Rc; + + const ALICE: u64 = 1; + const BOB: u64 = 2; + const CHARLIE: u64 = 3; + + struct MockCtx<'a> { + ext: &'a mut dyn Ext, + input_data: &'a [u8], + empty_output_buf: Option, + gas_meter: &'a mut GasMeter, + } + + #[derive(Clone)] + struct MockExecutable<'a>(Rc VmExecResult + 'a>); + + impl<'a> MockExecutable<'a> { + fn new(f: impl Fn(MockCtx) -> VmExecResult + 'a) -> Self { + MockExecutable(Rc::new(f)) + } + } + + struct MockLoader<'a> { + map: HashMap, MockExecutable<'a>>, + counter: u64, + } + + impl<'a> MockLoader<'a> { + fn empty() -> Self { + MockLoader { + map: HashMap::new(), + counter: 0, + } + } + + fn insert(&mut self, f: impl Fn(MockCtx) -> VmExecResult + 'a) -> CodeHash { + // Generate code hashes as monotonically increasing values. + let code_hash = ::Hash::from_low_u64_be(self.counter); + + self.counter += 1; + self.map.insert(code_hash, MockExecutable::new(f)); + code_hash + } + } + + struct MockVm<'a> { + _marker: PhantomData<&'a ()>, + } + + impl<'a> MockVm<'a> { + fn new() -> Self { + MockVm { + _marker: PhantomData, + } + } + } + + impl<'a> Loader for MockLoader<'a> { + type Executable = MockExecutable<'a>; + + fn load_init(&self, code_hash: &CodeHash) -> Result { + self.map + .get(code_hash) + .cloned() + .ok_or_else(|| "code not found") + } + fn load_main(&self, code_hash: &CodeHash) -> Result { + self.map + .get(code_hash) + .cloned() + .ok_or_else(|| "code not found") + } + } + + impl<'a> Vm for MockVm<'a> { + type Executable = MockExecutable<'a>; + + fn execute>( + &self, + exec: &MockExecutable, + ext: &mut E, + input_data: &[u8], + empty_output_buf: EmptyOutputBuf, + gas_meter: &mut GasMeter, + ) -> VmExecResult { + (exec.0)(MockCtx { + ext, + input_data, + empty_output_buf: Some(empty_output_buf), + gas_meter, + }) + } + } + + #[test] + fn it_works() { + let value = Default::default(); + let mut gas_meter = GasMeter::::with_limit(10000, 1); + let data = vec![]; + + let vm = MockVm::new(); + + let test_data = Rc::new(RefCell::new(vec![0usize])); + + let mut loader = MockLoader::empty(); + let exec_ch = loader.insert(|_ctx| { + test_data.borrow_mut().push(1); + VmExecResult::Ok + }); + + with_externalities(&mut ExtBuilder::default().build(), || { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + ctx.overlay.set_code(&BOB, Some(exec_ch)); + + assert_matches!( + ctx.call(BOB, value, &mut gas_meter, &data, EmptyOutputBuf::new()), + Ok(_) + ); + }); + + assert_eq!(&*test_data.borrow(), &vec![0, 1]); + } + + #[test] + fn base_fees() { + let origin = ALICE; + let dest = BOB; + + // This test verifies that base fee for call is taken. + with_externalities(&mut ExtBuilder::default().build(), || { + let vm = MockVm::new(); + let loader = MockLoader::empty(); + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + ctx.overlay.set_balance(&origin, 100); + ctx.overlay.set_balance(&dest, 0); + + let mut gas_meter = GasMeter::::with_limit(1000, 1); + + let result = ctx.call(dest, 0, &mut gas_meter, &[], EmptyOutputBuf::new()); + assert_matches!(result, Ok(_)); + + let mut toks = gas_meter.tokens().iter(); + match_tokens!(toks, ExecFeeToken::Call,); + }); + + // This test verifies that base fee for instantiation is taken. + with_externalities(&mut ExtBuilder::default().build(), || { + let mut loader = MockLoader::empty(); + let code = loader.insert(|_| VmExecResult::Ok); + + let vm = MockVm::new(); + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + + ctx.overlay.set_balance(&origin, 100); + + let mut gas_meter = GasMeter::::with_limit(1000, 1); + + let result = ctx.instantiate(0, &mut gas_meter, &code, &[]); + assert_matches!(result, Ok(_)); + + let mut toks = gas_meter.tokens().iter(); + match_tokens!(toks, ExecFeeToken::Instantiate,); + }); + } + + #[test] + fn transfer_works() { + // This test verifies that a contract is able to transfer + // some funds to another account. + let origin = ALICE; + let dest = BOB; + + let vm = MockVm::new(); + let loader = MockLoader::empty(); + + with_externalities(&mut ExtBuilder::default().build(), || { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + ctx.overlay.set_balance(&origin, 100); + ctx.overlay.set_balance(&dest, 0); + + let result = ctx.call( + dest, + 55, + &mut GasMeter::::with_limit(1000, 1), + &[], + EmptyOutputBuf::new(), + ); + assert_matches!(result, Ok(_)); + assert_eq!(ctx.overlay.get_balance(&origin), 45); + assert_eq!(ctx.overlay.get_balance(&dest), 55); + }); + } + + #[test] + fn transfer_fees() { + let origin = ALICE; + let dest = BOB; + + // This test sends 50 units of currency to a non-existent account. + // This should create lead to creation of a new account thus + // a fee should be charged. + with_externalities( + &mut ExtBuilder::default().existential_deposit(15).build(), + || { + let vm = MockVm::new(); + let loader = MockLoader::empty(); + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + ctx.overlay.set_balance(&origin, 100); + ctx.overlay.set_balance(&dest, 0); + + let mut gas_meter = GasMeter::::with_limit(1000, 1); + + let result = ctx.call(dest, 50, &mut gas_meter, &[], EmptyOutputBuf::new()); + assert_matches!(result, Ok(_)); + + let mut toks = gas_meter.tokens().iter(); + match_tokens!( + toks, + ExecFeeToken::Call, + TransferFeeToken { + kind: TransferFeeKind::AccountCreate, + gas_price: 1u64 + }, + ); + }, + ); + + // This one is similar to the previous one but transfer to an existing account. + // In this test we expect that a regular transfer fee is charged. + with_externalities( + &mut ExtBuilder::default().existential_deposit(15).build(), + || { + let vm = MockVm::new(); + let loader = MockLoader::empty(); + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + ctx.overlay.set_balance(&origin, 100); + ctx.overlay.set_balance(&dest, 15); + + let mut gas_meter = GasMeter::::with_limit(1000, 1); + + let result = ctx.call(dest, 50, &mut gas_meter, &[], EmptyOutputBuf::new()); + assert_matches!(result, Ok(_)); + + let mut toks = gas_meter.tokens().iter(); + match_tokens!( + toks, + ExecFeeToken::Call, + TransferFeeToken { + kind: TransferFeeKind::Transfer, + gas_price: 1u64 + }, + ); + }, + ); + + // This test sends 50 units of currency as an endownment to a newly + // created contract. + with_externalities( + &mut ExtBuilder::default().existential_deposit(15).build(), + || { + let mut loader = MockLoader::empty(); + let code = loader.insert(|_| VmExecResult::Ok); + + let vm = MockVm::new(); + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + + ctx.overlay.set_balance(&origin, 100); + ctx.overlay.set_balance(&dest, 15); + + let mut gas_meter = GasMeter::::with_limit(1000, 1); + + let result = ctx.instantiate(50, &mut gas_meter, &code, &[]); + assert_matches!(result, Ok(_)); + + let mut toks = gas_meter.tokens().iter(); + match_tokens!( + toks, + ExecFeeToken::Instantiate, + TransferFeeToken { + kind: TransferFeeKind::ContractInstantiate, + gas_price: 1u64 + }, + ); + }, + ); + } + + #[test] + fn balance_too_low() { + // This test verifies that a contract can't send value if it's + // balance is too low. + let origin = ALICE; + let dest = BOB; + + let vm = MockVm::new(); + let loader = MockLoader::empty(); + + with_externalities(&mut ExtBuilder::default().build(), || { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + ctx.overlay.set_balance(&origin, 0); + + let result = ctx.call( + dest, + 100, + &mut GasMeter::::with_limit(1000, 1), + &[], + EmptyOutputBuf::new(), + ); + + assert_matches!(result, Err("balance too low to send value")); + assert_eq!(ctx.overlay.get_balance(&origin), 0); + assert_eq!(ctx.overlay.get_balance(&dest), 0); + }); + } + + #[test] + fn output_is_returned() { + // Verifies that if a contract returns data, this data + // is returned from the execution context. + let origin = ALICE; + let dest = BOB; + + let vm = MockVm::new(); + let mut loader = MockLoader::empty(); + let return_ch = loader.insert(|mut ctx| { + #[derive(Debug)] + enum Void {} + let empty_output_buf = ctx.empty_output_buf.take().unwrap(); + let output_buf = empty_output_buf + .fill::(4, |data| { + data.copy_from_slice(&[1, 2, 3, 4]); + Ok(()) + }) + .expect("Ok is always returned"); + VmExecResult::Returned(output_buf) + }); + + with_externalities(&mut ExtBuilder::default().build(), || { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + ctx.overlay.set_code(&BOB, Some(return_ch)); + + let result = ctx.call( + dest, + 0, + &mut GasMeter::::with_limit(1000, 1), + &[], + EmptyOutputBuf::new(), + ); + + let output_data = result.unwrap().output_data; + assert_eq!(&output_data, &[1, 2, 3, 4]); + }); + } + + #[test] + fn input_data() { + let vm = MockVm::new(); + let mut loader = MockLoader::empty(); + let input_data_ch = loader.insert(|ctx| { + assert_eq!(ctx.input_data, &[1, 2, 3, 4]); + VmExecResult::Ok + }); + + // This one tests passing the input data into a contract via call. + with_externalities(&mut ExtBuilder::default().build(), || { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + ctx.overlay.set_code(&BOB, Some(input_data_ch)); + + let result = ctx.call( + BOB, + 0, + &mut GasMeter::::with_limit(10000, 1), + &[1, 2, 3, 4], + EmptyOutputBuf::new(), + ); + assert_matches!(result, Ok(_)); + }); + + // This one tests passing the input data into a contract via call. + with_externalities(&mut ExtBuilder::default().build(), || { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + + let result = ctx.instantiate( + 0, + &mut GasMeter::::with_limit(10000, 1), + &input_data_ch, + &[1, 2, 3, 4], + ); + assert_matches!(result, Ok(_)); + }); + } + + #[test] + fn max_depth() { + // This test verifies that when we reach the maximal depth creation of an + // yet another context fails. + let value = Default::default(); + let reached_bottom = RefCell::new(false); + + let vm = MockVm::new(); + let mut loader = MockLoader::empty(); + let recurse_ch = loader.insert(|ctx| { + // Try to call into yourself. + let r = ctx + .ext + .call(&BOB, 0, ctx.gas_meter, &[], EmptyOutputBuf::new()); + + let mut reached_bottom = reached_bottom.borrow_mut(); + if !*reached_bottom { + // We are first time here, it means we just reached bottom. + // Verify that we've got proper error and set `reached_bottom`. + assert_matches!(r, Err("reached maximum depth, cannot make a call")); + *reached_bottom = true; + } else { + // We just unwinding stack here. + assert_matches!(r, Ok(_)); + } + + VmExecResult::Ok + }); + + with_externalities(&mut ExtBuilder::default().build(), || { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + ctx.overlay.set_code(&BOB, Some(recurse_ch)); + + let result = ctx.call( + BOB, + value, + &mut GasMeter::::with_limit(100000, 1), + &[], + EmptyOutputBuf::new(), + ); + + assert_matches!(result, Ok(_)); + }); + } + + #[test] + fn caller_returns_proper_values() { + let origin = ALICE; + let dest = BOB; + + let vm = MockVm::new(); + + let witnessed_caller_bob = RefCell::new(None::); + let witnessed_caller_charlie = RefCell::new(None::); + + let mut loader = MockLoader::empty(); + let bob_ch = loader.insert(|ctx| { + // Record the caller for bob. + *witnessed_caller_bob.borrow_mut() = Some(*ctx.ext.caller()); + + // Call into CHARLIE contract. + assert_matches!( + ctx.ext + .call(&CHARLIE, 0, ctx.gas_meter, &[], EmptyOutputBuf::new()), + Ok(_) + ); + VmExecResult::Ok + }); + let charlie_ch = loader.insert(|ctx| { + // Record the caller for charlie. + *witnessed_caller_charlie.borrow_mut() = Some(*ctx.ext.caller()); + VmExecResult::Ok + }); + + with_externalities(&mut ExtBuilder::default().build(), || { + let cfg = Config::preload(); + + let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + ctx.overlay.set_code(&dest, Some(bob_ch)); + ctx.overlay.set_code(&CHARLIE, Some(charlie_ch)); + + let result = ctx.call( + dest, + 0, + &mut GasMeter::::with_limit(10000, 1), + &[], + EmptyOutputBuf::new(), + ); + + assert_matches!(result, Ok(_)); + }); + + assert_eq!(&*witnessed_caller_bob.borrow(), &Some(origin)); + assert_eq!(&*witnessed_caller_charlie.borrow(), &Some(dest)); + } + + #[test] + fn address_returns_proper_values() { + let vm = MockVm::new(); + + let mut loader = MockLoader::empty(); + let bob_ch = loader.insert(|ctx| { + // Verify that address matches BOB. + assert_eq!(*ctx.ext.address(), BOB); + + // Call into charlie contract. + assert_matches!( + ctx.ext + .call(&CHARLIE, 0, ctx.gas_meter, &[], EmptyOutputBuf::new()), + Ok(_) + ); + VmExecResult::Ok + }); + let charlie_ch = loader.insert(|ctx| { + assert_eq!(*ctx.ext.address(), CHARLIE); + VmExecResult::Ok + }); + + with_externalities(&mut ExtBuilder::default().build(), || { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + ctx.overlay.set_code(&BOB, Some(bob_ch)); + ctx.overlay.set_code(&CHARLIE, Some(charlie_ch)); + + let result = ctx.call( + BOB, + 0, + &mut GasMeter::::with_limit(10000, 1), + &[], + EmptyOutputBuf::new(), + ); + + assert_matches!(result, Ok(_)); + }); + } + + #[test] + fn refuse_instantiate_with_value_below_existential_deposit() { + let vm = MockVm::new(); + + let mut loader = MockLoader::empty(); + let dummy_ch = loader.insert(|_| VmExecResult::Ok); + + with_externalities( + &mut ExtBuilder::default().existential_deposit(15).build(), + || { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + + assert_matches!( + ctx.instantiate( + 0, // <- zero endowment + &mut GasMeter::::with_limit(10000, 1), + &dummy_ch, + &[], + ), + Err(_) + ); + }, + ); + } + + #[test] + fn instantiation() { + let vm = MockVm::new(); + + let mut loader = MockLoader::empty(); + let dummy_ch = loader.insert(|_| VmExecResult::Ok); + + with_externalities( + &mut ExtBuilder::default().existential_deposit(15).build(), + || { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + ctx.overlay.set_balance(&ALICE, 1000); + + let created_contract_address = assert_matches!( + ctx.instantiate( + 100, + &mut GasMeter::::with_limit(10000, 1), + &dummy_ch, + &[], + ), + Ok(InstantiateReceipt { address }) => address + ); + + // Check that the newly created account has the expected code hash and + // there are instantiation event. + assert_eq!( + ctx.overlay.get_code(&created_contract_address).unwrap(), + dummy_ch + ); + assert_eq!( + &ctx.events, + &[ + RawEvent::Transfer(ALICE, created_contract_address, 100), + RawEvent::Instantiated(ALICE, created_contract_address), + ] + ); + }, + ); + } + + #[test] + fn instantiation_from_contract() { + let vm = MockVm::new(); + + let mut loader = MockLoader::empty(); + let dummy_ch = loader.insert(|_| VmExecResult::Ok); + let created_contract_address = Rc::new(RefCell::new(None::)); + let creator_ch = loader.insert({ + let dummy_ch = dummy_ch.clone(); + let created_contract_address = Rc::clone(&created_contract_address); + move |ctx| { + // Instantiate a contract and save it's address in `created_contract_address`. + *created_contract_address.borrow_mut() = ctx + .ext + .instantiate(&dummy_ch, 15u64, ctx.gas_meter, &[]) + .unwrap() + .address + .into(); + + VmExecResult::Ok + } + }); + + with_externalities( + &mut ExtBuilder::default().existential_deposit(15).build(), + || { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + ctx.overlay.set_balance(&ALICE, 1000); + ctx.overlay.set_code(&BOB, Some(creator_ch)); + + assert_matches!( + ctx.call( + BOB, + 20, + &mut GasMeter::::with_limit(1000, 1), + &[], + EmptyOutputBuf::new() + ), + Ok(_) + ); + + let created_contract_address = + created_contract_address.borrow().as_ref().unwrap().clone(); + + // Check that the newly created account has the expected code hash and + // there are instantiation event. + assert_eq!( + ctx.overlay.get_code(&created_contract_address).unwrap(), + dummy_ch + ); + assert_eq!( + &ctx.events, + &[ + RawEvent::Transfer(ALICE, BOB, 20), + RawEvent::Transfer(BOB, created_contract_address, 15), + RawEvent::Instantiated(BOB, created_contract_address), + ] + ); + }, + ); + } + + #[test] + fn instantiation_fails() { + let vm = MockVm::new(); + + let mut loader = MockLoader::empty(); + let dummy_ch = loader.insert(|_| VmExecResult::Trap("It's a trap!")); + let creator_ch = loader.insert({ + let dummy_ch = dummy_ch.clone(); + move |ctx| { + // Instantiate a contract and save it's address in `created_contract_address`. + assert_matches!( + ctx.ext.instantiate(&dummy_ch, 15u64, ctx.gas_meter, &[]), + Err("It's a trap!") + ); + + VmExecResult::Ok + } + }); + + with_externalities( + &mut ExtBuilder::default().existential_deposit(15).build(), + || { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + ctx.overlay.set_balance(&ALICE, 1000); + ctx.overlay.set_code(&BOB, Some(creator_ch)); + + assert_matches!( + ctx.call( + BOB, + 20, + &mut GasMeter::::with_limit(1000, 1), + &[], + EmptyOutputBuf::new() + ), + Ok(_) + ); + + // The contract wasn't created so we don't expect to see an instantiation + // event here. + assert_eq!(&ctx.events, &[RawEvent::Transfer(ALICE, BOB, 20),]); + }, + ); + } } diff --git a/srml/contract/src/gas.rs b/srml/contract/src/gas.rs index 54199042bc..5ee93dd5c4 100644 --- a/srml/contract/src/gas.rs +++ b/srml/contract/src/gas.rs @@ -14,10 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use crate::{GasSpent, Module, Trait, BalanceOf, NegativeImbalanceOf}; -use runtime_primitives::BLOCK_FULL; +use crate::{BalanceOf, GasSpent, Module, NegativeImbalanceOf, Trait}; use runtime_primitives::traits::{As, CheckedMul, CheckedSub, Zero}; -use srml_support::{StorageValue, traits::{OnUnbalanced, ExistenceRequirement, WithdrawReason, Currency, Imbalance}}; +use runtime_primitives::BLOCK_FULL; +use srml_support::{ + traits::{Currency, ExistenceRequirement, Imbalance, OnUnbalanced, WithdrawReason}, + StorageValue, +}; #[cfg(test)] use std::{any::Any, fmt::Debug}; @@ -25,17 +28,17 @@ use std::{any::Any, fmt::Debug}; #[must_use] #[derive(Debug, PartialEq, Eq)] pub enum GasMeterResult { - Proceed, - OutOfGas, + Proceed, + OutOfGas, } impl GasMeterResult { - pub fn is_out_of_gas(&self) -> bool { - match *self { - GasMeterResult::OutOfGas => true, - GasMeterResult::Proceed => false, - } - } + pub fn is_out_of_gas(&self) -> bool { + match *self { + GasMeterResult::OutOfGas => true, + GasMeterResult::Proceed => false, + } + } } #[cfg(not(test))] @@ -55,143 +58,143 @@ impl TestAuxiliaries for T {} /// for consistency). If inlined there should be no observable difference compared /// to a hand-written code. pub trait Token: Copy + Clone + TestAuxiliaries { - /// Metadata type, which the token can require for calculating the amount - /// of gas to charge. Can be a some configuration type or - /// just the `()`. - type Metadata; - - /// Calculate amount of gas that should be taken by this token. - /// - /// This function should be really lightweight and must not fail. It is not - /// expected that implementors will query the storage or do any kinds of heavy operations. - /// - /// That said, implementors of this function still can run into overflows - /// while calculating the amount. In this case it is ok to use saturating operations - /// since on overflow they will return `max_value` which should consume all gas. - fn calculate_amount(&self, metadata: &Self::Metadata) -> T::Gas; + /// Metadata type, which the token can require for calculating the amount + /// of gas to charge. Can be a some configuration type or + /// just the `()`. + type Metadata; + + /// Calculate amount of gas that should be taken by this token. + /// + /// This function should be really lightweight and must not fail. It is not + /// expected that implementors will query the storage or do any kinds of heavy operations. + /// + /// That said, implementors of this function still can run into overflows + /// while calculating the amount. In this case it is ok to use saturating operations + /// since on overflow they will return `max_value` which should consume all gas. + fn calculate_amount(&self, metadata: &Self::Metadata) -> T::Gas; } /// A wrapper around a type-erased trait object of what used to be a `Token`. #[cfg(test)] pub struct ErasedToken { - pub description: String, - pub token: Box, + pub description: String, + pub token: Box, } pub struct GasMeter { - limit: T::Gas, - /// Amount of gas left from initial gas limit. Can reach zero. - gas_left: T::Gas, - gas_price: BalanceOf, + limit: T::Gas, + /// Amount of gas left from initial gas limit. Can reach zero. + gas_left: T::Gas, + gas_price: BalanceOf, - #[cfg(test)] - tokens: Vec, + #[cfg(test)] + tokens: Vec, } impl GasMeter { - #[cfg(test)] - pub fn with_limit(gas_limit: T::Gas, gas_price: BalanceOf) -> GasMeter { - GasMeter { - limit: gas_limit, - gas_left: gas_limit, - gas_price, - #[cfg(test)] - tokens: Vec::new(), - } - } - - /// Account for used gas. - /// - /// Amount is calculated by the given `token`. - /// - /// Returns `OutOfGas` if there is not enough gas or addition of the specified - /// amount of gas has lead to overflow. On success returns `Proceed`. - /// - /// NOTE that amount is always consumed, i.e. if there is not enough gas - /// then the counter will be set to zero. - #[inline] - pub fn charge>( - &mut self, - metadata: &Tok::Metadata, - token: Tok, - ) -> GasMeterResult { - #[cfg(test)] - { - // Unconditionally add the token to the storage. - let erased_tok = ErasedToken { - description: format!("{:?}", token), - token: Box::new(token), - }; - self.tokens.push(erased_tok); - } - - let amount = token.calculate_amount(metadata); - let new_value = match self.gas_left.checked_sub(&amount) { - None => None, - Some(val) if val.is_zero() => None, - Some(val) => Some(val), - }; - - // We always consume the gas even if there is not enough gas. - self.gas_left = new_value.unwrap_or_else(Zero::zero); - - match new_value { - Some(_) => GasMeterResult::Proceed, - None => GasMeterResult::OutOfGas, - } - } - - /// Allocate some amount of gas and perform some work with - /// a newly created nested gas meter. - /// - /// Invokes `f` with either the gas meter that has `amount` gas left or - /// with `None`, if this gas meter has not enough gas to allocate given `amount`. - /// - /// All unused gas in the nested gas meter is returned to this gas meter. - pub fn with_nested>) -> R>( - &mut self, - amount: T::Gas, - f: F, - ) -> R { - // NOTE that it is ok to allocate all available gas since it still ensured - // by `charge` that it doesn't reach zero. - if self.gas_left < amount { - f(None) - } else { - self.gas_left = self.gas_left - amount; - let mut nested = GasMeter { - limit: amount, - gas_left: amount, - gas_price: self.gas_price, - #[cfg(test)] - tokens: Vec::new(), - }; - - let r = f(Some(&mut nested)); - - self.gas_left = self.gas_left + nested.gas_left; - - r - } - } - - pub fn gas_price(&self) -> BalanceOf { - self.gas_price - } - - /// Returns how much gas left from the initial budget. - pub fn gas_left(&self) -> T::Gas { - self.gas_left - } - - /// Returns how much gas was spent. - fn spent(&self) -> T::Gas { - self.limit - self.gas_left - } - - #[cfg(test)] - pub fn tokens(&self) -> &[ErasedToken] { - &self.tokens - } + #[cfg(test)] + pub fn with_limit(gas_limit: T::Gas, gas_price: BalanceOf) -> GasMeter { + GasMeter { + limit: gas_limit, + gas_left: gas_limit, + gas_price, + #[cfg(test)] + tokens: Vec::new(), + } + } + + /// Account for used gas. + /// + /// Amount is calculated by the given `token`. + /// + /// Returns `OutOfGas` if there is not enough gas or addition of the specified + /// amount of gas has lead to overflow. On success returns `Proceed`. + /// + /// NOTE that amount is always consumed, i.e. if there is not enough gas + /// then the counter will be set to zero. + #[inline] + pub fn charge>( + &mut self, + metadata: &Tok::Metadata, + token: Tok, + ) -> GasMeterResult { + #[cfg(test)] + { + // Unconditionally add the token to the storage. + let erased_tok = ErasedToken { + description: format!("{:?}", token), + token: Box::new(token), + }; + self.tokens.push(erased_tok); + } + + let amount = token.calculate_amount(metadata); + let new_value = match self.gas_left.checked_sub(&amount) { + None => None, + Some(val) if val.is_zero() => None, + Some(val) => Some(val), + }; + + // We always consume the gas even if there is not enough gas. + self.gas_left = new_value.unwrap_or_else(Zero::zero); + + match new_value { + Some(_) => GasMeterResult::Proceed, + None => GasMeterResult::OutOfGas, + } + } + + /// Allocate some amount of gas and perform some work with + /// a newly created nested gas meter. + /// + /// Invokes `f` with either the gas meter that has `amount` gas left or + /// with `None`, if this gas meter has not enough gas to allocate given `amount`. + /// + /// All unused gas in the nested gas meter is returned to this gas meter. + pub fn with_nested>) -> R>( + &mut self, + amount: T::Gas, + f: F, + ) -> R { + // NOTE that it is ok to allocate all available gas since it still ensured + // by `charge` that it doesn't reach zero. + if self.gas_left < amount { + f(None) + } else { + self.gas_left = self.gas_left - amount; + let mut nested = GasMeter { + limit: amount, + gas_left: amount, + gas_price: self.gas_price, + #[cfg(test)] + tokens: Vec::new(), + }; + + let r = f(Some(&mut nested)); + + self.gas_left = self.gas_left + nested.gas_left; + + r + } + } + + pub fn gas_price(&self) -> BalanceOf { + self.gas_price + } + + /// Returns how much gas left from the initial budget. + pub fn gas_left(&self) -> T::Gas { + self.gas_left + } + + /// Returns how much gas was spent. + fn spent(&self) -> T::Gas { + self.limit - self.gas_left + } + + #[cfg(test)] + pub fn tokens(&self) -> &[ErasedToken] { + &self.tokens + } } /// Buy the given amount of gas. @@ -199,67 +202,70 @@ impl GasMeter { /// Cost is calculated by multiplying the gas cost (taken from the storage) by the `gas_limit`. /// The funds are deducted from `transactor`. pub fn buy_gas( - transactor: &T::AccountId, - gas_limit: T::Gas, + transactor: &T::AccountId, + gas_limit: T::Gas, ) -> Result<(GasMeter, NegativeImbalanceOf), &'static str> { - // Check if the specified amount of gas is available in the current block. - // This cannot underflow since `gas_spent` is never greater than `block_gas_limit`. - let gas_available = >::block_gas_limit() - >::gas_spent(); - if gas_limit > gas_available { - // gas limit reached, revert the transaction and retry again in the future - return Err(BLOCK_FULL); - } - - // Buy the specified amount of gas. - let gas_price = >::gas_price(); - let cost = >>::as_(gas_limit.clone()) - .checked_mul(&gas_price) - .ok_or("overflow multiplying gas limit by price")?; - - let imbalance = T::Currency::withdraw( - transactor, - cost, - WithdrawReason::Fee, - ExistenceRequirement::KeepAlive - )?; - - Ok((GasMeter { - limit: gas_limit, - gas_left: gas_limit, - gas_price, - - #[cfg(test)] - tokens: Vec::new(), - }, imbalance)) + // Check if the specified amount of gas is available in the current block. + // This cannot underflow since `gas_spent` is never greater than `block_gas_limit`. + let gas_available = >::block_gas_limit() - >::gas_spent(); + if gas_limit > gas_available { + // gas limit reached, revert the transaction and retry again in the future + return Err(BLOCK_FULL); + } + + // Buy the specified amount of gas. + let gas_price = >::gas_price(); + let cost = >>::as_(gas_limit.clone()) + .checked_mul(&gas_price) + .ok_or("overflow multiplying gas limit by price")?; + + let imbalance = T::Currency::withdraw( + transactor, + cost, + WithdrawReason::Fee, + ExistenceRequirement::KeepAlive, + )?; + + Ok(( + GasMeter { + limit: gas_limit, + gas_left: gas_limit, + gas_price, + + #[cfg(test)] + tokens: Vec::new(), + }, + imbalance, + )) } /// Refund the unused gas. pub fn refund_unused_gas( - transactor: &T::AccountId, - gas_meter: GasMeter, - imbalance: NegativeImbalanceOf, + transactor: &T::AccountId, + gas_meter: GasMeter, + imbalance: NegativeImbalanceOf, ) { - let gas_spent = gas_meter.spent(); - let gas_left = gas_meter.gas_left(); - - // Increase total spent gas. - // This cannot overflow, since `gas_spent` is never greater than `block_gas_limit`, which - // also has T::Gas type. - >::mutate(|block_gas_spent| *block_gas_spent += gas_spent); - - // Refund gas left by the price it was bought. - let refund = >>::as_(gas_left) * gas_meter.gas_price; - let refund_imbalance = T::Currency::deposit_creating(transactor, refund); - if let Ok(imbalance) = imbalance.offset(refund_imbalance) { - T::GasPayment::on_unbalanced(imbalance); - } + let gas_spent = gas_meter.spent(); + let gas_left = gas_meter.gas_left(); + + // Increase total spent gas. + // This cannot overflow, since `gas_spent` is never greater than `block_gas_limit`, which + // also has T::Gas type. + >::mutate(|block_gas_spent| *block_gas_spent += gas_spent); + + // Refund gas left by the price it was bought. + let refund = >>::as_(gas_left) * gas_meter.gas_price; + let refund_imbalance = T::Currency::deposit_creating(transactor, refund); + if let Ok(imbalance) = imbalance.offset(refund_imbalance) { + T::GasPayment::on_unbalanced(imbalance); + } } /// A little handy utility for converting a value in balance units into approximitate value in gas units /// at the given gas price. pub fn approx_gas_for_balance(gas_price: BalanceOf, balance: BalanceOf) -> T::Gas { - let amount_in_gas: BalanceOf = balance / gas_price; - >>::sa(amount_in_gas) + let amount_in_gas: BalanceOf = balance / gas_price; + >>::sa(amount_in_gas) } /// A simple utility macro that helps to match against a @@ -302,60 +308,62 @@ macro_rules! match_tokens { #[cfg(test)] mod tests { - use super::{GasMeter, Token}; - use crate::tests::Test; - - /// A trivial token that charges 1 unit of gas. - #[derive(Copy, Clone, PartialEq, Eq, Debug)] - struct UnitToken; - impl Token for UnitToken { - type Metadata = (); - fn calculate_amount(&self, _metadata: &()) -> u64 { 1 } - } - - struct DoubleTokenMetadata { - multiplier: u64, - } - /// A simple token that charges for the given amount multipled to - /// a multiplier taken from a given metadata. - #[derive(Copy, Clone, PartialEq, Eq, Debug)] - struct DoubleToken(u64); - - impl Token for DoubleToken { - type Metadata = DoubleTokenMetadata; - fn calculate_amount(&self, metadata: &DoubleTokenMetadata) -> u64 { - // Probably you want to use saturating mul in producation code. - self.0 * metadata.multiplier - } - } - - #[test] - fn it_works() { - let gas_meter = GasMeter::::with_limit(50000, 10); - assert_eq!(gas_meter.gas_left(), 50000); - } - - #[test] - fn simple() { - let mut gas_meter = GasMeter::::with_limit(50000, 10); - - let result = gas_meter.charge(&DoubleTokenMetadata { multiplier: 3 }, DoubleToken(10)); - assert!(!result.is_out_of_gas()); - - assert_eq!(gas_meter.gas_left(), 49_970); - assert_eq!(gas_meter.spent(), 30); - assert_eq!(gas_meter.gas_price(), 10); - } - - #[test] - fn tracing() { - let mut gas_meter = GasMeter::::with_limit(50000, 10); - assert!(!gas_meter.charge(&(), UnitToken).is_out_of_gas()); - assert!(!gas_meter - .charge(&DoubleTokenMetadata { multiplier: 3 }, DoubleToken(10)) - .is_out_of_gas()); - - let mut tokens = gas_meter.tokens()[0..2].iter(); - match_tokens!(tokens, UnitToken, DoubleToken(10),); - } + use super::{GasMeter, Token}; + use crate::tests::Test; + + /// A trivial token that charges 1 unit of gas. + #[derive(Copy, Clone, PartialEq, Eq, Debug)] + struct UnitToken; + impl Token for UnitToken { + type Metadata = (); + fn calculate_amount(&self, _metadata: &()) -> u64 { + 1 + } + } + + struct DoubleTokenMetadata { + multiplier: u64, + } + /// A simple token that charges for the given amount multipled to + /// a multiplier taken from a given metadata. + #[derive(Copy, Clone, PartialEq, Eq, Debug)] + struct DoubleToken(u64); + + impl Token for DoubleToken { + type Metadata = DoubleTokenMetadata; + fn calculate_amount(&self, metadata: &DoubleTokenMetadata) -> u64 { + // Probably you want to use saturating mul in producation code. + self.0 * metadata.multiplier + } + } + + #[test] + fn it_works() { + let gas_meter = GasMeter::::with_limit(50000, 10); + assert_eq!(gas_meter.gas_left(), 50000); + } + + #[test] + fn simple() { + let mut gas_meter = GasMeter::::with_limit(50000, 10); + + let result = gas_meter.charge(&DoubleTokenMetadata { multiplier: 3 }, DoubleToken(10)); + assert!(!result.is_out_of_gas()); + + assert_eq!(gas_meter.gas_left(), 49_970); + assert_eq!(gas_meter.spent(), 30); + assert_eq!(gas_meter.gas_price(), 10); + } + + #[test] + fn tracing() { + let mut gas_meter = GasMeter::::with_limit(50000, 10); + assert!(!gas_meter.charge(&(), UnitToken).is_out_of_gas()); + assert!(!gas_meter + .charge(&DoubleTokenMetadata { multiplier: 3 }, DoubleToken(10)) + .is_out_of_gas()); + + let mut tokens = gas_meter.tokens()[0..2].iter(); + match_tokens!(tokens, UnitToken, DoubleToken(10),); + } } diff --git a/srml/contract/src/lib.rs b/srml/contract/src/lib.rs index da3fe4dbf0..69eb5486c4 100644 --- a/srml/contract/src/lib.rs +++ b/srml/contract/src/lib.rs @@ -92,19 +92,21 @@ mod wasm; #[cfg(test)] mod tests; -use crate::exec::ExecutionContext; use crate::account_db::{AccountDb, DirectAccountDb}; +use crate::exec::ExecutionContext; +use parity_codec::{Codec, Decode, Encode}; +use rstd::marker::PhantomData; +use rstd::prelude::*; +use runtime_primitives::traits::{As, Bounded, Hash, SimpleArithmetic, StaticLookup}; #[cfg(feature = "std")] -use serde_derive::{Serialize, Deserialize}; +use serde_derive::{Deserialize, Serialize}; +use srml_support::dispatch::{Dispatchable, Result}; +use srml_support::traits::{Currency, OnFreeBalanceZero, OnUnbalanced}; +use srml_support::{ + decl_event, decl_module, decl_storage, storage::child, Parameter, StorageMap, StorageValue, +}; use substrate_primitives::crypto::UncheckedFrom; -use rstd::prelude::*; -use rstd::marker::PhantomData; -use parity_codec::{Codec, Encode, Decode}; -use runtime_primitives::traits::{Hash, As, SimpleArithmetic,Bounded, StaticLookup}; -use srml_support::dispatch::{Result, Dispatchable}; -use srml_support::{Parameter, StorageMap, StorageValue, decl_module, decl_event, decl_storage, storage::child}; -use srml_support::traits::{OnFreeBalanceZero, OnUnbalanced, Currency}; use system::{ensure_signed, RawOrigin}; use timestamp; @@ -113,33 +115,33 @@ pub type TrieId = Vec; /// A function that generates an `AccountId` for a contract upon instantiation. pub trait ContractAddressFor { - fn contract_address_for(code_hash: &CodeHash, data: &[u8], origin: &AccountId) -> AccountId; + fn contract_address_for(code_hash: &CodeHash, data: &[u8], origin: &AccountId) -> AccountId; } /// A function that returns the fee for dispatching a `Call`. pub trait ComputeDispatchFee { - fn compute_dispatch_fee(call: &Call) -> Balance; + fn compute_dispatch_fee(call: &Call) -> Balance; } -#[derive(Encode,Decode,Clone,Debug)] +#[derive(Encode, Decode, Clone, Debug)] /// Information for managing an acocunt and its sub trie abstraction. /// This is the required info to cache for an account pub struct AccountInfo { - /// unique ID for the subtree encoded as a byte - pub trie_id: TrieId, - /// the size of stored value in octet - pub current_mem_stored: u64, + /// unique ID for the subtree encoded as a byte + pub trie_id: TrieId, + /// the size of stored value in octet + pub current_mem_stored: u64, } /// Get a trie id (trie id must be unique and collision resistant depending upon its context) /// Note that it is different than encode because trie id should have collision resistance /// property (being a proper uniqueid). pub trait TrieIdGenerator { - /// get a trie id for an account, using reference to parent account trie id to ensure - /// uniqueness of trie id - /// The implementation must ensure every new trie id is unique: two consecutive call with the - /// same parameter needs to return different trie id values. - fn trie_id(account_id: &AccountId) -> TrieId; + /// get a trie id for an account, using reference to parent account trie id to ensure + /// uniqueness of trie id + /// The implementation must ensure every new trie id is unique: two consecutive call with the + /// same parameter needs to return different trie id values. + fn trie_id(account_id: &AccountId) -> TrieId; } /// Get trie id from `account_id` @@ -149,49 +151,59 @@ pub struct TrieIdFromParentCounter(PhantomData); /// accountid_counter` impl TrieIdGenerator for TrieIdFromParentCounter where - T::AccountId: AsRef<[u8]> + T::AccountId: AsRef<[u8]>, { - fn trie_id(account_id: &T::AccountId) -> TrieId { - // note that skipping a value due to error is not an issue here. - // we only need uniqueness, not sequence. - let new_seed = >::mutate(|v| v.wrapping_add(1)); - - let mut buf = Vec::new(); - buf.extend_from_slice(account_id.as_ref()); - buf.extend_from_slice(&new_seed.to_le_bytes()[..]); - T::Hashing::hash(&buf[..]).as_ref().into() - } + fn trie_id(account_id: &T::AccountId) -> TrieId { + // note that skipping a value due to error is not an issue here. + // we only need uniqueness, not sequence. + let new_seed = >::mutate(|v| v.wrapping_add(1)); + + let mut buf = Vec::new(); + buf.extend_from_slice(account_id.as_ref()); + buf.extend_from_slice(&new_seed.to_le_bytes()[..]); + T::Hashing::hash(&buf[..]).as_ref().into() + } } -pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -pub type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; +pub type NegativeImbalanceOf = + <::Currency as Currency<::AccountId>>::NegativeImbalance; pub trait Trait: timestamp::Trait { - type Currency: Currency; - - /// The outer call dispatch type. - type Call: Parameter + Dispatchable::Origin>; - - /// The overarching event type. - type Event: From> + Into<::Event>; - - // As is needed for wasm-utils - type Gas: Parameter + Default + Codec + SimpleArithmetic + Bounded + Copy + As> + As + As; - - /// A function type to get the contract address given the creator. - type DetermineContractAddress: ContractAddressFor, Self::AccountId>; - - /// A function type that computes the fee for dispatching the given `Call`. - /// - /// It is recommended (though not required) for this function to return a fee that would be taken - /// by executive module for regular dispatch. - type ComputeDispatchFee: ComputeDispatchFee>; - - /// trieid id generator - type TrieIdGenerator: TrieIdGenerator; - - /// Handler for the unbalanced reduction when making a gas payment. - type GasPayment: OnUnbalanced>; + type Currency: Currency; + + /// The outer call dispatch type. + type Call: Parameter + Dispatchable::Origin>; + + /// The overarching event type. + type Event: From> + Into<::Event>; + + // As is needed for wasm-utils + type Gas: Parameter + + Default + + Codec + + SimpleArithmetic + + Bounded + + Copy + + As> + + As + + As; + + /// A function type to get the contract address given the creator. + type DetermineContractAddress: ContractAddressFor, Self::AccountId>; + + /// A function type that computes the fee for dispatching the given `Call`. + /// + /// It is recommended (though not required) for this function to return a fee that would be taken + /// by executive module for regular dispatch. + type ComputeDispatchFee: ComputeDispatchFee>; + + /// trieid id generator + type TrieIdGenerator: TrieIdGenerator; + + /// Handler for the unbalanced reduction when making a gas payment. + type GasPayment: OnUnbalanced>; } /// Simple contract address determintator. @@ -203,257 +215,261 @@ pub trait Trait: timestamp::Trait { pub struct SimpleAddressDeterminator(PhantomData); impl ContractAddressFor, T::AccountId> for SimpleAddressDeterminator where - T::AccountId: UncheckedFrom + AsRef<[u8]> + T::AccountId: UncheckedFrom + AsRef<[u8]>, { - fn contract_address_for(code_hash: &CodeHash, data: &[u8], origin: &T::AccountId) -> T::AccountId { - let data_hash = T::Hashing::hash(data); - - let mut buf = Vec::new(); - buf.extend_from_slice(code_hash.as_ref()); - buf.extend_from_slice(data_hash.as_ref()); - buf.extend_from_slice(origin.as_ref()); - - UncheckedFrom::unchecked_from(T::Hashing::hash(&buf[..])) - } + fn contract_address_for( + code_hash: &CodeHash, + data: &[u8], + origin: &T::AccountId, + ) -> T::AccountId { + let data_hash = T::Hashing::hash(data); + + let mut buf = Vec::new(); + buf.extend_from_slice(code_hash.as_ref()); + buf.extend_from_slice(data_hash.as_ref()); + buf.extend_from_slice(origin.as_ref()); + + UncheckedFrom::unchecked_from(T::Hashing::hash(&buf[..])) + } } /// The default dispatch fee computor computes the fee in the same way that /// implementation of `MakePayment` for balances module does. pub struct DefaultDispatchFeeComputor(PhantomData); impl ComputeDispatchFee> for DefaultDispatchFeeComputor { - fn compute_dispatch_fee(call: &T::Call) -> BalanceOf { - let encoded_len = call.using_encoded(|encoded| encoded.len()); - let base_fee = >::transaction_base_fee(); - let byte_fee = >::transaction_byte_fee(); - base_fee + byte_fee * as As>::sa(encoded_len as u64) - } + fn compute_dispatch_fee(call: &T::Call) -> BalanceOf { + let encoded_len = call.using_encoded(|encoded| encoded.len()); + let base_fee = >::transaction_base_fee(); + let byte_fee = >::transaction_byte_fee(); + base_fee + byte_fee * as As>::sa(encoded_len as u64) + } } decl_module! { - /// Contracts module. - pub struct Module for enum Call where origin: ::Origin { - fn deposit_event() = default; - - /// Updates the schedule for metering contracts. - /// - /// The schedule must have a greater version than the stored schedule. - fn update_schedule(schedule: Schedule) -> Result { - if >::current_schedule().version >= schedule.version { - return Err("new schedule must have a greater version than current"); - } - - Self::deposit_event(RawEvent::ScheduleUpdated(schedule.version)); - >::put(schedule); - - Ok(()) - } - - /// Stores the given binary Wasm code into the chains storage and returns its `codehash`. - /// You can instantiate contracts only with stored code. - fn put_code( - origin, - #[compact] gas_limit: T::Gas, - code: Vec - ) -> Result { - let origin = ensure_signed(origin)?; - let schedule = >::current_schedule(); - - let (mut gas_meter, imbalance) = gas::buy_gas::(&origin, gas_limit)?; - - let result = wasm::save_code::(code, &mut gas_meter, &schedule); - if let Ok(code_hash) = result { - Self::deposit_event(RawEvent::CodeStored(code_hash)); - } - - gas::refund_unused_gas::(&origin, gas_meter, imbalance); - - result.map(|_| ()) - } - - /// Makes a call to an account, optionally transferring some balance. - /// - /// * If the account is a smart-contract account, the associated code will be - /// executed and any value will be transferred. - /// * If the account is a regular account, any value will be transferred. - /// * If no account exists and the call value is not less than `existential_deposit`, - /// a regular account will be created and any value will be transferred. - fn call( - origin, - dest: ::Source, - #[compact] value: BalanceOf, - #[compact] gas_limit: T::Gas, - data: Vec - ) -> Result { - let origin = ensure_signed(origin)?; - let dest = T::Lookup::lookup(dest)?; - - // Pay for the gas upfront. - // - // NOTE: it is very important to avoid any state changes before - // paying for the gas. - let (mut gas_meter, imbalance) = gas::buy_gas::(&origin, gas_limit)?; - - let cfg = Config::preload(); - let vm = crate::wasm::WasmVm::new(&cfg.schedule); - let loader = crate::wasm::WasmLoader::new(&cfg.schedule); - let mut ctx = ExecutionContext::top_level(origin.clone(), &cfg, &vm, &loader); - - let result = ctx.call(dest, value, &mut gas_meter, &data, exec::EmptyOutputBuf::new()); - - if let Ok(_) = result { - // Commit all changes that made it thus far into the persistant storage. - DirectAccountDb.commit(ctx.overlay.into_change_set()); - - // Then deposit all events produced. - ctx.events.into_iter().for_each(Self::deposit_event); - } - - // Refund cost of the unused gas. - // - // NOTE: this should go after the commit to the storage, since the storage changes - // can alter the balance of the caller. - gas::refund_unused_gas::(&origin, gas_meter, imbalance); - - // Dispatch every recorded call with an appropriate origin. - ctx.calls.into_iter().for_each(|(who, call)| { - let result = call.dispatch(RawOrigin::Signed(who.clone()).into()); - Self::deposit_event(RawEvent::Dispatched(who, result.is_ok())); - }); - - result.map(|_| ()) - } - - /// Creates a new contract from the `codehash` generated by `put_code`, optionally transferring some balance. - /// - /// Creation is executed as follows: - /// - /// - the destination address is computed based on the sender and hash of the code. - /// - the smart-contract account is created at the computed address. - /// - the `ctor_code` is executed in the context of the newly created account. Buffer returned - /// after the execution is saved as the `code` of the account. That code will be invoked - /// upon any call received by this account. - /// - The contract is initialized. - fn create( - origin, - #[compact] endowment: BalanceOf, - #[compact] gas_limit: T::Gas, - code_hash: CodeHash, - data: Vec - ) -> Result { - let origin = ensure_signed(origin)?; - - // Commit the gas upfront. - // - // NOTE: it is very important to avoid any state changes before - // paying for the gas. - let (mut gas_meter, imbalance) = gas::buy_gas::(&origin, gas_limit)?; - - let cfg = Config::preload(); - let vm = crate::wasm::WasmVm::new(&cfg.schedule); - let loader = crate::wasm::WasmLoader::new(&cfg.schedule); - let mut ctx = ExecutionContext::top_level(origin.clone(), &cfg, &vm, &loader); - let result = ctx.instantiate(endowment, &mut gas_meter, &code_hash, &data); - - if let Ok(_) = result { - // Commit all changes that made it thus far into the persistant storage. - DirectAccountDb.commit(ctx.overlay.into_change_set()); - - // Then deposit all events produced. - ctx.events.into_iter().for_each(Self::deposit_event); - } - - // Refund cost of the unused gas. - // - // NOTE: this should go after the commit to the storage, since the storage changes - // can alter the balance of the caller. - gas::refund_unused_gas::(&origin, gas_meter, imbalance); - - // Dispatch every recorded call with an appropriate origin. - ctx.calls.into_iter().for_each(|(who, call)| { - let result = call.dispatch(RawOrigin::Signed(who.clone()).into()); - Self::deposit_event(RawEvent::Dispatched(who, result.is_ok())); - }); - - result.map(|_| ()) - } - - fn on_finalize() { - >::kill(); - } - } + /// Contracts module. + pub struct Module for enum Call where origin: ::Origin { + fn deposit_event() = default; + + /// Updates the schedule for metering contracts. + /// + /// The schedule must have a greater version than the stored schedule. + fn update_schedule(schedule: Schedule) -> Result { + if >::current_schedule().version >= schedule.version { + return Err("new schedule must have a greater version than current"); + } + + Self::deposit_event(RawEvent::ScheduleUpdated(schedule.version)); + >::put(schedule); + + Ok(()) + } + + /// Stores the given binary Wasm code into the chains storage and returns its `codehash`. + /// You can instantiate contracts only with stored code. + fn put_code( + origin, + #[compact] gas_limit: T::Gas, + code: Vec + ) -> Result { + let origin = ensure_signed(origin)?; + let schedule = >::current_schedule(); + + let (mut gas_meter, imbalance) = gas::buy_gas::(&origin, gas_limit)?; + + let result = wasm::save_code::(code, &mut gas_meter, &schedule); + if let Ok(code_hash) = result { + Self::deposit_event(RawEvent::CodeStored(code_hash)); + } + + gas::refund_unused_gas::(&origin, gas_meter, imbalance); + + result.map(|_| ()) + } + + /// Makes a call to an account, optionally transferring some balance. + /// + /// * If the account is a smart-contract account, the associated code will be + /// executed and any value will be transferred. + /// * If the account is a regular account, any value will be transferred. + /// * If no account exists and the call value is not less than `existential_deposit`, + /// a regular account will be created and any value will be transferred. + fn call( + origin, + dest: ::Source, + #[compact] value: BalanceOf, + #[compact] gas_limit: T::Gas, + data: Vec + ) -> Result { + let origin = ensure_signed(origin)?; + let dest = T::Lookup::lookup(dest)?; + + // Pay for the gas upfront. + // + // NOTE: it is very important to avoid any state changes before + // paying for the gas. + let (mut gas_meter, imbalance) = gas::buy_gas::(&origin, gas_limit)?; + + let cfg = Config::preload(); + let vm = crate::wasm::WasmVm::new(&cfg.schedule); + let loader = crate::wasm::WasmLoader::new(&cfg.schedule); + let mut ctx = ExecutionContext::top_level(origin.clone(), &cfg, &vm, &loader); + + let result = ctx.call(dest, value, &mut gas_meter, &data, exec::EmptyOutputBuf::new()); + + if let Ok(_) = result { + // Commit all changes that made it thus far into the persistant storage. + DirectAccountDb.commit(ctx.overlay.into_change_set()); + + // Then deposit all events produced. + ctx.events.into_iter().for_each(Self::deposit_event); + } + + // Refund cost of the unused gas. + // + // NOTE: this should go after the commit to the storage, since the storage changes + // can alter the balance of the caller. + gas::refund_unused_gas::(&origin, gas_meter, imbalance); + + // Dispatch every recorded call with an appropriate origin. + ctx.calls.into_iter().for_each(|(who, call)| { + let result = call.dispatch(RawOrigin::Signed(who.clone()).into()); + Self::deposit_event(RawEvent::Dispatched(who, result.is_ok())); + }); + + result.map(|_| ()) + } + + /// Creates a new contract from the `codehash` generated by `put_code`, optionally transferring some balance. + /// + /// Creation is executed as follows: + /// + /// - the destination address is computed based on the sender and hash of the code. + /// - the smart-contract account is created at the computed address. + /// - the `ctor_code` is executed in the context of the newly created account. Buffer returned + /// after the execution is saved as the `code` of the account. That code will be invoked + /// upon any call received by this account. + /// - The contract is initialized. + fn create( + origin, + #[compact] endowment: BalanceOf, + #[compact] gas_limit: T::Gas, + code_hash: CodeHash, + data: Vec + ) -> Result { + let origin = ensure_signed(origin)?; + + // Commit the gas upfront. + // + // NOTE: it is very important to avoid any state changes before + // paying for the gas. + let (mut gas_meter, imbalance) = gas::buy_gas::(&origin, gas_limit)?; + + let cfg = Config::preload(); + let vm = crate::wasm::WasmVm::new(&cfg.schedule); + let loader = crate::wasm::WasmLoader::new(&cfg.schedule); + let mut ctx = ExecutionContext::top_level(origin.clone(), &cfg, &vm, &loader); + let result = ctx.instantiate(endowment, &mut gas_meter, &code_hash, &data); + + if let Ok(_) = result { + // Commit all changes that made it thus far into the persistant storage. + DirectAccountDb.commit(ctx.overlay.into_change_set()); + + // Then deposit all events produced. + ctx.events.into_iter().for_each(Self::deposit_event); + } + + // Refund cost of the unused gas. + // + // NOTE: this should go after the commit to the storage, since the storage changes + // can alter the balance of the caller. + gas::refund_unused_gas::(&origin, gas_meter, imbalance); + + // Dispatch every recorded call with an appropriate origin. + ctx.calls.into_iter().for_each(|(who, call)| { + let result = call.dispatch(RawOrigin::Signed(who.clone()).into()); + Self::deposit_event(RawEvent::Dispatched(who, result.is_ok())); + }); + + result.map(|_| ()) + } + + fn on_finalize() { + >::kill(); + } + } } decl_event! { - pub enum Event - where - Balance = BalanceOf, - ::AccountId, - ::Hash - { - /// Transfer happened `from` to `to` with given `value` as part of a `call` or `create`. - Transfer(AccountId, AccountId, Balance), - - /// Contract deployed by address at the specified address. - Instantiated(AccountId, AccountId), - - /// Code with the specified hash has been stored. - CodeStored(Hash), - - /// Triggered when the current schedule is updated. - ScheduleUpdated(u32), - - /// A call was dispatched from the given account. The bool signals whether it was - /// successful execution or not. - Dispatched(AccountId, bool), - } + pub enum Event + where + Balance = BalanceOf, + ::AccountId, + ::Hash + { + /// Transfer happened `from` to `to` with given `value` as part of a `call` or `create`. + Transfer(AccountId, AccountId, Balance), + + /// Contract deployed by address at the specified address. + Instantiated(AccountId, AccountId), + + /// Code with the specified hash has been stored. + CodeStored(Hash), + + /// Triggered when the current schedule is updated. + ScheduleUpdated(u32), + + /// A call was dispatched from the given account. The bool signals whether it was + /// successful execution or not. + Dispatched(AccountId, bool), + } } decl_storage! { - trait Store for Module as Contract { - /// The fee required to make a transfer. - TransferFee get(transfer_fee) config(): BalanceOf; - /// The fee required to create an account. - CreationFee get(creation_fee) config(): BalanceOf; - /// The fee to be paid for making a transaction; the base. - TransactionBaseFee get(transaction_base_fee) config(): BalanceOf; - /// The fee to be paid for making a transaction; the per-byte portion. - TransactionByteFee get(transaction_byte_fee) config(): BalanceOf; - /// The fee required to create a contract instance. - ContractFee get(contract_fee) config(): BalanceOf = BalanceOf::::sa(21); - /// The base fee charged for calling into a contract. - CallBaseFee get(call_base_fee) config(): T::Gas = T::Gas::sa(135); - /// The base fee charged for creating a contract. - CreateBaseFee get(create_base_fee) config(): T::Gas = T::Gas::sa(175); - /// The price of one unit of gas. - GasPrice get(gas_price) config(): BalanceOf = BalanceOf::::sa(1); - /// The maximum nesting level of a call/create stack. - MaxDepth get(max_depth) config(): u32 = 100; - /// The maximum amount of gas that could be expended per block. - BlockGasLimit get(block_gas_limit) config(): T::Gas = T::Gas::sa(1_000_000); - /// Gas spent so far in this block. - GasSpent get(gas_spent): T::Gas; - /// Current cost schedule for contracts. - CurrentSchedule get(current_schedule) config(): Schedule = Schedule::default(); - /// The code associated with a given account. - pub CodeHashOf: map T::AccountId => Option>; - /// A mapping from an original code hash to the original code, untouched by instrumentation. - pub PristineCode: map CodeHash => Option>; - /// A mapping between an original code hash and instrumented wasm code, ready for the execution. - pub CodeStorage: map CodeHash => Option; - /// The subtrie counter - pub AccountCounter: u64 = 0; - /// The code associated with a given account. - pub AccountInfoOf: map T::AccountId => Option; - } + trait Store for Module as Contract { + /// The fee required to make a transfer. + TransferFee get(transfer_fee) config(): BalanceOf; + /// The fee required to create an account. + CreationFee get(creation_fee) config(): BalanceOf; + /// The fee to be paid for making a transaction; the base. + TransactionBaseFee get(transaction_base_fee) config(): BalanceOf; + /// The fee to be paid for making a transaction; the per-byte portion. + TransactionByteFee get(transaction_byte_fee) config(): BalanceOf; + /// The fee required to create a contract instance. + ContractFee get(contract_fee) config(): BalanceOf = BalanceOf::::sa(21); + /// The base fee charged for calling into a contract. + CallBaseFee get(call_base_fee) config(): T::Gas = T::Gas::sa(135); + /// The base fee charged for creating a contract. + CreateBaseFee get(create_base_fee) config(): T::Gas = T::Gas::sa(175); + /// The price of one unit of gas. + GasPrice get(gas_price) config(): BalanceOf = BalanceOf::::sa(1); + /// The maximum nesting level of a call/create stack. + MaxDepth get(max_depth) config(): u32 = 100; + /// The maximum amount of gas that could be expended per block. + BlockGasLimit get(block_gas_limit) config(): T::Gas = T::Gas::sa(1_000_000); + /// Gas spent so far in this block. + GasSpent get(gas_spent): T::Gas; + /// Current cost schedule for contracts. + CurrentSchedule get(current_schedule) config(): Schedule = Schedule::default(); + /// The code associated with a given account. + pub CodeHashOf: map T::AccountId => Option>; + /// A mapping from an original code hash to the original code, untouched by instrumentation. + pub PristineCode: map CodeHash => Option>; + /// A mapping between an original code hash and instrumented wasm code, ready for the execution. + pub CodeStorage: map CodeHash => Option; + /// The subtrie counter + pub AccountCounter: u64 = 0; + /// The code associated with a given account. + pub AccountInfoOf: map T::AccountId => Option; + } } impl OnFreeBalanceZero for Module { - fn on_free_balance_zero(who: &T::AccountId) { - >::remove(who); - >::get_account_info(&DirectAccountDb, who).map(|subtrie| { - child::kill_storage(&subtrie.trie_id); - }); - } + fn on_free_balance_zero(who: &T::AccountId) { + >::remove(who); + >::get_account_info(&DirectAccountDb, who).map(|subtrie| { + child::kill_storage(&subtrie.trie_id); + }); + } } /// In-memory cache of configuration values. @@ -461,79 +477,79 @@ impl OnFreeBalanceZero for Module { /// We assume that these values can't be changed in the /// course of transaction execution. pub struct Config { - pub schedule: Schedule, - pub existential_deposit: BalanceOf, - pub max_depth: u32, - pub contract_account_instantiate_fee: BalanceOf, - pub account_create_fee: BalanceOf, - pub transfer_fee: BalanceOf, - pub call_base_fee: T::Gas, - pub instantiate_base_fee: T::Gas, + pub schedule: Schedule, + pub existential_deposit: BalanceOf, + pub max_depth: u32, + pub contract_account_instantiate_fee: BalanceOf, + pub account_create_fee: BalanceOf, + pub transfer_fee: BalanceOf, + pub call_base_fee: T::Gas, + pub instantiate_base_fee: T::Gas, } impl Config { - fn preload() -> Config { - Config { - schedule: >::current_schedule(), - existential_deposit: T::Currency::minimum_balance(), - max_depth: >::max_depth(), - contract_account_instantiate_fee: >::contract_fee(), - account_create_fee: >::creation_fee(), - transfer_fee: >::transfer_fee(), - call_base_fee: >::call_base_fee(), - instantiate_base_fee: >::create_base_fee(), - } - } + fn preload() -> Config { + Config { + schedule: >::current_schedule(), + existential_deposit: T::Currency::minimum_balance(), + max_depth: >::max_depth(), + contract_account_instantiate_fee: >::contract_fee(), + account_create_fee: >::creation_fee(), + transfer_fee: >::transfer_fee(), + call_base_fee: >::call_base_fee(), + instantiate_base_fee: >::create_base_fee(), + } + } } /// Definition of the cost schedule and other parameterizations for wasm vm. #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))] #[derive(Clone, Encode, Decode, PartialEq, Eq)] pub struct Schedule { - /// Version of the schedule. - pub version: u32, + /// Version of the schedule. + pub version: u32, - /// Cost of putting a byte of code into the storage. - pub put_code_per_byte_cost: Gas, + /// Cost of putting a byte of code into the storage. + pub put_code_per_byte_cost: Gas, - /// Gas cost of a growing memory by single page. - pub grow_mem_cost: Gas, + /// Gas cost of a growing memory by single page. + pub grow_mem_cost: Gas, - /// Gas cost of a regular operation. - pub regular_op_cost: Gas, + /// Gas cost of a regular operation. + pub regular_op_cost: Gas, - /// Gas cost per one byte returned. - pub return_data_per_byte_cost: Gas, + /// Gas cost per one byte returned. + pub return_data_per_byte_cost: Gas, - /// Gas cost per one byte read from the sandbox memory. - pub sandbox_data_read_cost: Gas, + /// Gas cost per one byte read from the sandbox memory. + pub sandbox_data_read_cost: Gas, - /// Gas cost per one byte written to the sandbox memory. - pub sandbox_data_write_cost: Gas, + /// Gas cost per one byte written to the sandbox memory. + pub sandbox_data_write_cost: Gas, - /// How tall the stack is allowed to grow? - /// - /// See https://wiki.parity.io/WebAssembly-StackHeight to find out - /// how the stack frame cost is calculated. - pub max_stack_height: u32, + /// How tall the stack is allowed to grow? + /// + /// See https://wiki.parity.io/WebAssembly-StackHeight to find out + /// how the stack frame cost is calculated. + pub max_stack_height: u32, - /// What is the maximal memory pages amount is allowed to have for - /// a contract. - pub max_memory_pages: u32, + /// What is the maximal memory pages amount is allowed to have for + /// a contract. + pub max_memory_pages: u32, } impl> Default for Schedule { - fn default() -> Schedule { - Schedule { - version: 0, - put_code_per_byte_cost: Gas::sa(1), - grow_mem_cost: Gas::sa(1), - regular_op_cost: Gas::sa(1), - return_data_per_byte_cost: Gas::sa(1), - sandbox_data_read_cost: Gas::sa(1), - sandbox_data_write_cost: Gas::sa(1), - max_stack_height: 64 * 1024, - max_memory_pages: 16, - } - } + fn default() -> Schedule { + Schedule { + version: 0, + put_code_per_byte_cost: Gas::sa(1), + grow_mem_cost: Gas::sa(1), + regular_op_cost: Gas::sa(1), + return_data_per_byte_cost: Gas::sa(1), + sandbox_data_read_cost: Gas::sa(1), + sandbox_data_write_cost: Gas::sa(1), + max_stack_height: 64 * 1024, + max_memory_pages: 16, + } + } } diff --git a/srml/contract/src/tests.rs b/srml/contract/src/tests.rs index 1c18329bf2..c02a6f5670 100644 --- a/srml/contract/src/tests.rs +++ b/srml/contract/src/tests.rs @@ -19,91 +19,92 @@ #![allow(unused)] +use crate::{ + AccountInfo, AccountInfoOf, ComputeDispatchFee, ContractAddressFor, GenesisConfig, Module, + RawEvent, Trait, TrieId, TrieIdGenerator, +}; +use assert_matches::assert_matches; +use hex_literal::*; +use parity_codec::{Decode, Encode, KeyedVec}; +use runtime_io; use runtime_io::with_externalities; -use runtime_primitives::testing::{Digest, DigestItem, H256, Header, UintAuthorityId}; +use runtime_primitives::testing::{Digest, DigestItem, Header, UintAuthorityId, H256}; use runtime_primitives::traits::{BlakeTwo256, IdentityLookup}; use runtime_primitives::BuildStorage; -use runtime_io; -use srml_support::{storage::child, StorageMap, assert_ok, impl_outer_event, impl_outer_dispatch, - impl_outer_origin, traits::Currency}; -use substrate_primitives::Blake2Hasher; -use system::{self, Phase, EventRecord}; -use {wabt, balances, consensus}; -use hex_literal::*; -use assert_matches::assert_matches; -use crate::{ - ContractAddressFor, GenesisConfig, Module, RawEvent, - Trait, ComputeDispatchFee, TrieIdGenerator, TrieId, - AccountInfo, AccountInfoOf, +use srml_support::{ + assert_ok, impl_outer_dispatch, impl_outer_event, impl_outer_origin, storage::child, + traits::Currency, StorageMap, }; -use substrate_primitives::storage::well_known_keys; -use parity_codec::{Encode, Decode, KeyedVec}; use std::sync::atomic::{AtomicUsize, Ordering}; +use substrate_primitives::storage::well_known_keys; +use substrate_primitives::Blake2Hasher; +use system::{self, EventRecord, Phase}; +use {balances, consensus, wabt}; mod contract { - // Re-export contents of the root. This basically - // needs to give a name for the current crate. - // This hack is required for `impl_outer_event!`. - pub use super::super::*; - use srml_support::impl_outer_event; + // Re-export contents of the root. This basically + // needs to give a name for the current crate. + // This hack is required for `impl_outer_event!`. + pub use super::super::*; + use srml_support::impl_outer_event; } impl_outer_event! { - pub enum MetaEvent for Test { - balances, contract, - } + pub enum MetaEvent for Test { + balances, contract, + } } impl_outer_origin! { - pub enum Origin for Test { } + pub enum Origin for Test { } } impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - balances::Balances, - contract::Contract, - } + pub enum Call for Test where origin: Origin { + balances::Balances, + contract::Contract, + } } #[derive(Clone, Eq, PartialEq)] pub struct Test; impl system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type Digest = Digest; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = MetaEvent; - type Log = DigestItem; + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type Digest = Digest; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = MetaEvent; + type Log = DigestItem; } impl balances::Trait for Test { - type Balance = u64; - type OnFreeBalanceZero = Contract; - type OnNewAccount = (); - type Event = MetaEvent; - type TransactionPayment = (); - type DustRemoval = (); - type TransferPayment = (); + type Balance = u64; + type OnFreeBalanceZero = Contract; + type OnNewAccount = (); + type Event = MetaEvent; + type TransactionPayment = (); + type DustRemoval = (); + type TransferPayment = (); } impl timestamp::Trait for Test { - type Moment = u64; - type OnTimestampSet = (); + type Moment = u64; + type OnTimestampSet = (); } impl consensus::Trait for Test { - type Log = DigestItem; - type SessionKey = UintAuthorityId; - type InherentOfflineReport = (); + type Log = DigestItem; + type SessionKey = UintAuthorityId; + type InherentOfflineReport = (); } impl Trait for Test { - type Currency = Balances; - type Call = Call; - type Gas = u64; - type DetermineContractAddress = DummyContractAddressFor; - type Event = MetaEvent; - type ComputeDispatchFee = DummyComputeDispatchFee; - type TrieIdGenerator = DummyTrieIdGenerator; - type GasPayment = (); + type Currency = Balances; + type Call = Call; + type Gas = u64; + type DetermineContractAddress = DummyContractAddressFor; + type Event = MetaEvent; + type ComputeDispatchFee = DummyComputeDispatchFee; + type TrieIdGenerator = DummyTrieIdGenerator; + type GasPayment = (); } type Balances = balances::Module; @@ -112,27 +113,30 @@ type System = system::Module; pub struct DummyContractAddressFor; impl ContractAddressFor for DummyContractAddressFor { - fn contract_address_for(_code_hash: &H256, _data: &[u8], origin: &u64) -> u64 { - *origin + 1 - } + fn contract_address_for(_code_hash: &H256, _data: &[u8], origin: &u64) -> u64 { + *origin + 1 + } } static KEY_COUNTER: AtomicUsize = AtomicUsize::new(0); pub struct DummyTrieIdGenerator; impl TrieIdGenerator for DummyTrieIdGenerator { - fn trie_id(account_id: &u64) -> TrieId { - let mut res = KEY_COUNTER.fetch_add(1, Ordering::Relaxed).to_le_bytes().to_vec(); - res.extend_from_slice(&account_id.to_le_bytes()); - res - } + fn trie_id(account_id: &u64) -> TrieId { + let mut res = KEY_COUNTER + .fetch_add(1, Ordering::Relaxed) + .to_le_bytes() + .to_vec(); + res.extend_from_slice(&account_id.to_le_bytes()); + res + } } pub struct DummyComputeDispatchFee; impl ComputeDispatchFee for DummyComputeDispatchFee { - fn compute_dispatch_fee(call: &Call) -> u64 { - 69 - } + fn compute_dispatch_fee(call: &Call) -> u64 { + 69 + } } const ALICE: u64 = 1; @@ -140,152 +144,155 @@ const BOB: u64 = 2; const CHARLIE: u64 = 3; pub struct ExtBuilder { - existential_deposit: u64, - gas_price: u64, - block_gas_limit: u64, - transfer_fee: u64, - creation_fee: u64, + existential_deposit: u64, + gas_price: u64, + block_gas_limit: u64, + transfer_fee: u64, + creation_fee: u64, } impl Default for ExtBuilder { - fn default() -> Self { - Self { - existential_deposit: 0, - gas_price: 2, - block_gas_limit: 100_000_000, - transfer_fee: 0, - creation_fee: 0, - } - } + fn default() -> Self { + Self { + existential_deposit: 0, + gas_price: 2, + block_gas_limit: 100_000_000, + transfer_fee: 0, + creation_fee: 0, + } + } } impl ExtBuilder { - pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { - self.existential_deposit = existential_deposit; - self - } - pub fn gas_price(mut self, gas_price: u64) -> Self { - self.gas_price = gas_price; - self - } - pub fn block_gas_limit(mut self, block_gas_limit: u64) -> Self { - self.block_gas_limit = block_gas_limit; - self - } - pub fn transfer_fee(mut self, transfer_fee: u64) -> Self { - self.transfer_fee = transfer_fee; - self - } - pub fn creation_fee(mut self, creation_fee: u64) -> Self { - self.creation_fee = creation_fee; - self - } - pub fn build(self) -> runtime_io::TestExternalities { - let mut t = system::GenesisConfig::::default() - .build_storage() - .unwrap() - .0; - t.extend( - balances::GenesisConfig:: { - transaction_base_fee: 0, - transaction_byte_fee: 0, - balances: vec![], - existential_deposit: self.existential_deposit, - transfer_fee: self.transfer_fee, - creation_fee: self.creation_fee, - vesting: vec![], - } - .build_storage() - .unwrap() - .0, - ); - t.extend( - GenesisConfig:: { - transaction_base_fee: 0, - transaction_byte_fee: 0, - transfer_fee: self.transfer_fee, - creation_fee: self.creation_fee, - contract_fee: 21, - call_base_fee: 135, - create_base_fee: 175, - gas_price: self.gas_price, - max_depth: 100, - block_gas_limit: self.block_gas_limit, - current_schedule: Default::default(), - } - .build_storage() - .unwrap() - .0, - ); - runtime_io::TestExternalities::new(t) - } + pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { + self.existential_deposit = existential_deposit; + self + } + pub fn gas_price(mut self, gas_price: u64) -> Self { + self.gas_price = gas_price; + self + } + pub fn block_gas_limit(mut self, block_gas_limit: u64) -> Self { + self.block_gas_limit = block_gas_limit; + self + } + pub fn transfer_fee(mut self, transfer_fee: u64) -> Self { + self.transfer_fee = transfer_fee; + self + } + pub fn creation_fee(mut self, creation_fee: u64) -> Self { + self.creation_fee = creation_fee; + self + } + pub fn build(self) -> runtime_io::TestExternalities { + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0; + t.extend( + balances::GenesisConfig:: { + transaction_base_fee: 0, + transaction_byte_fee: 0, + balances: vec![], + existential_deposit: self.existential_deposit, + transfer_fee: self.transfer_fee, + creation_fee: self.creation_fee, + vesting: vec![], + } + .build_storage() + .unwrap() + .0, + ); + t.extend( + GenesisConfig:: { + transaction_base_fee: 0, + transaction_byte_fee: 0, + transfer_fee: self.transfer_fee, + creation_fee: self.creation_fee, + contract_fee: 21, + call_base_fee: 135, + create_base_fee: 175, + gas_price: self.gas_price, + max_depth: 100, + block_gas_limit: self.block_gas_limit, + current_schedule: Default::default(), + } + .build_storage() + .unwrap() + .0, + ); + runtime_io::TestExternalities::new(t) + } } #[test] fn refunds_unused_gas() { - with_externalities(&mut ExtBuilder::default().build(), || { - Balances::deposit_creating(&0, 100_000_000); - - assert_ok!(Contract::call( - Origin::signed(0), - 1, - 0, - 100_000, - Vec::new() - )); - - assert_eq!(Balances::free_balance(&0), 100_000_000 - (2 * 135)); - }); + with_externalities(&mut ExtBuilder::default().build(), || { + Balances::deposit_creating(&0, 100_000_000); + + assert_ok!(Contract::call(Origin::signed(0), 1, 0, 100_000, Vec::new())); + + assert_eq!(Balances::free_balance(&0), 100_000_000 - (2 * 135)); + }); } #[test] fn account_removal_removes_storage() { - let unique_id1 = b"unique_id1"; - let unique_id2 = b"unique_id2"; - with_externalities( - &mut ExtBuilder::default().existential_deposit(100).build(), - || { - // Set up two accounts with free balance above the existential threshold. - { - Balances::deposit_creating(&1, 110); - AccountInfoOf::::insert(1, &AccountInfo { - trie_id: unique_id1.to_vec(), - current_mem_stored: 0, - }); - child::put(&unique_id1[..], &b"foo".to_vec(), &b"1".to_vec()); - assert_eq!(child::get(&unique_id1[..], &b"foo".to_vec()), Some(b"1".to_vec())); - child::put(&unique_id1[..], &b"bar".to_vec(), &b"2".to_vec()); - - Balances::deposit_creating(&2, 110); - AccountInfoOf::::insert(2, &AccountInfo { - trie_id: unique_id2.to_vec(), - current_mem_stored: 0, - }); - child::put(&unique_id2[..], &b"hello".to_vec(), &b"3".to_vec()); - child::put(&unique_id2[..], &b"world".to_vec(), &b"4".to_vec()); - } - - // Transfer funds from account 1 of such amount that after this transfer - // the balance of account 1 will be below the existential threshold. - // - // This should lead to the removal of all storage associated with this account. - assert_ok!(Balances::transfer(Origin::signed(1), 2, 20)); - - // Verify that all entries from account 1 is removed, while - // entries from account 2 is in place. - { - assert_eq!(child::get_raw(&unique_id1[..], &b"foo".to_vec()), None); - assert_eq!(child::get_raw(&unique_id1[..], &b"bar".to_vec()), None); - - assert_eq!( - child::get(&unique_id2[..], &b"hello".to_vec()), - Some(b"3".to_vec()) - ); - assert_eq!( - child::get(&unique_id2[..], &b"world".to_vec()), - Some(b"4".to_vec()) - ); - } - }, - ); + let unique_id1 = b"unique_id1"; + let unique_id2 = b"unique_id2"; + with_externalities( + &mut ExtBuilder::default().existential_deposit(100).build(), + || { + // Set up two accounts with free balance above the existential threshold. + { + Balances::deposit_creating(&1, 110); + AccountInfoOf::::insert( + 1, + &AccountInfo { + trie_id: unique_id1.to_vec(), + current_mem_stored: 0, + }, + ); + child::put(&unique_id1[..], &b"foo".to_vec(), &b"1".to_vec()); + assert_eq!( + child::get(&unique_id1[..], &b"foo".to_vec()), + Some(b"1".to_vec()) + ); + child::put(&unique_id1[..], &b"bar".to_vec(), &b"2".to_vec()); + + Balances::deposit_creating(&2, 110); + AccountInfoOf::::insert( + 2, + &AccountInfo { + trie_id: unique_id2.to_vec(), + current_mem_stored: 0, + }, + ); + child::put(&unique_id2[..], &b"hello".to_vec(), &b"3".to_vec()); + child::put(&unique_id2[..], &b"world".to_vec(), &b"4".to_vec()); + } + + // Transfer funds from account 1 of such amount that after this transfer + // the balance of account 1 will be below the existential threshold. + // + // This should lead to the removal of all storage associated with this account. + assert_ok!(Balances::transfer(Origin::signed(1), 2, 20)); + + // Verify that all entries from account 1 is removed, while + // entries from account 2 is in place. + { + assert_eq!(child::get_raw(&unique_id1[..], &b"foo".to_vec()), None); + assert_eq!(child::get_raw(&unique_id1[..], &b"bar".to_vec()), None); + + assert_eq!( + child::get(&unique_id2[..], &b"hello".to_vec()), + Some(b"3".to_vec()) + ); + assert_eq!( + child::get(&unique_id2[..], &b"world".to_vec()), + Some(b"4".to_vec()) + ); + } + }, + ); } const CODE_RETURN_FROM_START_FN: &str = r#" @@ -310,57 +317,57 @@ const CODE_RETURN_FROM_START_FN: &str = r#" (data (i32.const 8) "\01\02\03\04") ) "#; -const HASH_RETURN_FROM_START_FN: [u8; 32] = hex!("e6411d12daa2a19e4e9c7d8306c31c7d53a352cb8ed84385c8a1d48fc232e708"); +const HASH_RETURN_FROM_START_FN: [u8; 32] = + hex!("e6411d12daa2a19e4e9c7d8306c31c7d53a352cb8ed84385c8a1d48fc232e708"); #[test] fn instantiate_and_call() { - let wasm = wabt::wat2wasm(CODE_RETURN_FROM_START_FN).unwrap(); - - with_externalities( - &mut ExtBuilder::default().existential_deposit(100).build(), - || { - Balances::deposit_creating(&ALICE, 1_000_000); - - assert_ok!(Contract::put_code( - Origin::signed(ALICE), - 100_000, - wasm, - )); - - assert_ok!(Contract::create( - Origin::signed(ALICE), - 100, - 100_000, - HASH_RETURN_FROM_START_FN.into(), - vec![], - )); - - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: MetaEvent::balances(balances::RawEvent::NewAccount(1, 1_000_000)), - }, - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: MetaEvent::contract(RawEvent::CodeStored(HASH_RETURN_FROM_START_FN.into())), - }, - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: MetaEvent::balances( - balances::RawEvent::NewAccount(BOB, 100) - ) - }, - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: MetaEvent::contract(RawEvent::Transfer(ALICE, BOB, 100)) - }, - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: MetaEvent::contract(RawEvent::Instantiated(ALICE, BOB)) - } - ]); - }, - ); + let wasm = wabt::wat2wasm(CODE_RETURN_FROM_START_FN).unwrap(); + + with_externalities( + &mut ExtBuilder::default().existential_deposit(100).build(), + || { + Balances::deposit_creating(&ALICE, 1_000_000); + + assert_ok!(Contract::put_code(Origin::signed(ALICE), 100_000, wasm,)); + + assert_ok!(Contract::create( + Origin::signed(ALICE), + 100, + 100_000, + HASH_RETURN_FROM_START_FN.into(), + vec![], + )); + + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: MetaEvent::balances(balances::RawEvent::NewAccount(1, 1_000_000)), + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: MetaEvent::contract(RawEvent::CodeStored( + HASH_RETURN_FROM_START_FN.into() + )), + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: MetaEvent::balances(balances::RawEvent::NewAccount(BOB, 100)) + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: MetaEvent::contract(RawEvent::Transfer(ALICE, BOB, 100)) + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: MetaEvent::contract(RawEvent::Instantiated(ALICE, BOB)) + } + ] + ); + }, + ); } const CODE_DISPATCH_CALL: &str = r#" @@ -379,101 +386,99 @@ const CODE_DISPATCH_CALL: &str = r#" (data (i32.const 8) "\00\00\03\00\00\00\00\00\00\00\C8") ) "#; -const HASH_DISPATCH_CALL: [u8; 32] = hex!("49dfdcaf9c1553be10634467e95b8e71a3bc15a4f8bf5563c0312b0902e0afb9"); +const HASH_DISPATCH_CALL: [u8; 32] = + hex!("49dfdcaf9c1553be10634467e95b8e71a3bc15a4f8bf5563c0312b0902e0afb9"); #[test] fn dispatch_call() { - // This test can fail due to the encoding changes. In case it becomes too annoying - // let's rewrite so as we use this module controlled call or we serialize it in runtime. - let encoded = parity_codec::Encode::encode(&Call::Balances(balances::Call::transfer(CHARLIE, 50))); - assert_eq!(&encoded[..], &hex!("00000300000000000000C8")[..]); - - let wasm = wabt::wat2wasm(CODE_DISPATCH_CALL).unwrap(); - - with_externalities( - &mut ExtBuilder::default().existential_deposit(50).build(), - || { - Balances::deposit_creating(&ALICE, 1_000_000); - - assert_ok!(Contract::put_code( - Origin::signed(ALICE), - 100_000, - wasm, - )); - - // Let's keep this assert even though it's redundant. If you ever need to update the - // wasm source this test will fail and will show you the actual hash. - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: MetaEvent::balances(balances::RawEvent::NewAccount(1, 1_000_000)), - }, - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: MetaEvent::contract(RawEvent::CodeStored(HASH_DISPATCH_CALL.into())), - }, - ]); - - assert_ok!(Contract::create( - Origin::signed(ALICE), - 100, - 100_000, - HASH_DISPATCH_CALL.into(), - vec![], - )); - - assert_ok!(Contract::call( - Origin::signed(ALICE), - BOB, // newly created account - 0, - 100_000, - vec![], - )); - - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: MetaEvent::balances(balances::RawEvent::NewAccount(1, 1_000_000)), - }, - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: MetaEvent::contract(RawEvent::CodeStored(HASH_DISPATCH_CALL.into())), - }, - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: MetaEvent::balances( - balances::RawEvent::NewAccount(BOB, 100) - ) - }, - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: MetaEvent::contract(RawEvent::Transfer(ALICE, BOB, 100)) - }, - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: MetaEvent::contract(RawEvent::Instantiated(ALICE, BOB)) - }, - - // Dispatching the call. - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: MetaEvent::balances( - balances::RawEvent::NewAccount(CHARLIE, 50) - ) - }, - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: MetaEvent::balances( - balances::RawEvent::Transfer(BOB, CHARLIE, 50, 0) - ) - }, - - // Event emited as a result of dispatch. - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: MetaEvent::contract(RawEvent::Dispatched(BOB, true)) - } - ]); - }, - ); + // This test can fail due to the encoding changes. In case it becomes too annoying + // let's rewrite so as we use this module controlled call or we serialize it in runtime. + let encoded = + parity_codec::Encode::encode(&Call::Balances(balances::Call::transfer(CHARLIE, 50))); + assert_eq!(&encoded[..], &hex!("00000300000000000000C8")[..]); + + let wasm = wabt::wat2wasm(CODE_DISPATCH_CALL).unwrap(); + + with_externalities( + &mut ExtBuilder::default().existential_deposit(50).build(), + || { + Balances::deposit_creating(&ALICE, 1_000_000); + + assert_ok!(Contract::put_code(Origin::signed(ALICE), 100_000, wasm,)); + + // Let's keep this assert even though it's redundant. If you ever need to update the + // wasm source this test will fail and will show you the actual hash. + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: MetaEvent::balances(balances::RawEvent::NewAccount(1, 1_000_000)), + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: MetaEvent::contract(RawEvent::CodeStored(HASH_DISPATCH_CALL.into())), + }, + ] + ); + + assert_ok!(Contract::create( + Origin::signed(ALICE), + 100, + 100_000, + HASH_DISPATCH_CALL.into(), + vec![], + )); + + assert_ok!(Contract::call( + Origin::signed(ALICE), + BOB, // newly created account + 0, + 100_000, + vec![], + )); + + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: MetaEvent::balances(balances::RawEvent::NewAccount(1, 1_000_000)), + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: MetaEvent::contract(RawEvent::CodeStored(HASH_DISPATCH_CALL.into())), + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: MetaEvent::balances(balances::RawEvent::NewAccount(BOB, 100)) + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: MetaEvent::contract(RawEvent::Transfer(ALICE, BOB, 100)) + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: MetaEvent::contract(RawEvent::Instantiated(ALICE, BOB)) + }, + // Dispatching the call. + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: MetaEvent::balances(balances::RawEvent::NewAccount(CHARLIE, 50)) + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: MetaEvent::balances(balances::RawEvent::Transfer( + BOB, CHARLIE, 50, 0 + )) + }, + // Event emited as a result of dispatch. + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: MetaEvent::contract(RawEvent::Dispatched(BOB, true)) + } + ] + ); + }, + ); } diff --git a/srml/contract/src/wasm/code_cache.rs b/srml/contract/src/wasm/code_cache.rs index dab8c4bfa4..6a2d62a8f1 100644 --- a/srml/contract/src/wasm/code_cache.rs +++ b/srml/contract/src/wasm/code_cache.rs @@ -29,7 +29,7 @@ use crate::gas::{GasMeter, Token}; use crate::wasm::{prepare, runtime::Env, PrefabWasmModule}; use crate::{CodeHash, CodeStorage, PristineCode, Schedule, Trait}; use rstd::prelude::*; -use runtime_primitives::traits::{As, CheckedMul, Hash, Bounded}; +use runtime_primitives::traits::{As, Bounded, CheckedMul, Hash}; use srml_support::StorageMap; /// Gas metering token that used for charging storing code into the code storage. @@ -40,15 +40,15 @@ use srml_support::StorageMap; pub struct PutCodeToken(u64); impl Token for PutCodeToken { - type Metadata = Schedule; + type Metadata = Schedule; - fn calculate_amount(&self, metadata: &Schedule) -> T::Gas { - let code_len_in_gas = >::sa(self.0); - metadata - .put_code_per_byte_cost - .checked_mul(&code_len_in_gas) - .unwrap_or_else(|| Bounded::max_value()) - } + fn calculate_amount(&self, metadata: &Schedule) -> T::Gas { + let code_len_in_gas = >::sa(self.0); + metadata + .put_code_per_byte_cost + .checked_mul(&code_len_in_gas) + .unwrap_or_else(|| Bounded::max_value()) + } } /// Put code in the storage. The hash of code is used as a key and is returned @@ -56,28 +56,28 @@ impl Token for PutCodeToken { /// /// This function instruments the given code and caches it in the storage. pub fn save( - original_code: Vec, - gas_meter: &mut GasMeter, - schedule: &Schedule, + original_code: Vec, + gas_meter: &mut GasMeter, + schedule: &Schedule, ) -> Result, &'static str> { - // The first time instrumentation is on the user. However, consequent reinstrumentation - // due to the schedule changes is on governance system. - if gas_meter - .charge(schedule, PutCodeToken(original_code.len() as u64)) - .is_out_of_gas() - { - return Err("there is not enough gas for storing the code"); - } + // The first time instrumentation is on the user. However, consequent reinstrumentation + // due to the schedule changes is on governance system. + if gas_meter + .charge(schedule, PutCodeToken(original_code.len() as u64)) + .is_out_of_gas() + { + return Err("there is not enough gas for storing the code"); + } - let prefab_module = prepare::prepare_contract::(&original_code, schedule)?; - let code_hash = T::Hashing::hash(&original_code); + let prefab_module = prepare::prepare_contract::(&original_code, schedule)?; + let code_hash = T::Hashing::hash(&original_code); - // TODO: #1416 validate the code. If the code is not valid, then don't store it. + // TODO: #1416 validate the code. If the code is not valid, then don't store it. - >::insert(code_hash, prefab_module); - >::insert(code_hash, original_code); + >::insert(code_hash, prefab_module); + >::insert(code_hash, original_code); - Ok(code_hash) + Ok(code_hash) } /// Load code with the given code hash. @@ -86,21 +86,20 @@ pub fn save( /// the current one given as an argument, then this function will perform /// re-instrumentation and update the cache in the storage. pub fn load( - code_hash: &CodeHash, - schedule: &Schedule, + code_hash: &CodeHash, + schedule: &Schedule, ) -> Result { - let mut prefab_module = - >::get(code_hash).ok_or_else(|| "code is not found")?; + let mut prefab_module = >::get(code_hash).ok_or_else(|| "code is not found")?; - if prefab_module.schedule_version < schedule.version { - // The current schedule version is greater than the version of the one cached - // in the storage. - // - // We need to re-instrument the code with the latest schedule here. - let original_code = - >::get(code_hash).ok_or_else(|| "pristine code is not found")?; - prefab_module = prepare::prepare_contract::(&original_code, schedule)?; - >::insert(code_hash, prefab_module.clone()); - } - Ok(prefab_module) + if prefab_module.schedule_version < schedule.version { + // The current schedule version is greater than the version of the one cached + // in the storage. + // + // We need to re-instrument the code with the latest schedule here. + let original_code = + >::get(code_hash).ok_or_else(|| "pristine code is not found")?; + prefab_module = prepare::prepare_contract::(&original_code, schedule)?; + >::insert(code_hash, prefab_module.clone()); + } + Ok(prefab_module) } diff --git a/srml/contract/src/wasm/env_def/macros.rs b/srml/contract/src/wasm/env_def/macros.rs index 0b112a8258..04b90d4563 100644 --- a/srml/contract/src/wasm/env_def/macros.rs +++ b/srml/contract/src/wasm/env_def/macros.rs @@ -96,9 +96,9 @@ macro_rules! unmarshall_then_body { #[inline(always)] pub fn constrain_closure(f: F) -> F where - F: FnOnce() -> Result, + F: FnOnce() -> Result, { - f + f } #[macro_export] @@ -193,131 +193,139 @@ macro_rules! define_env { #[cfg(test)] mod tests { - use parity_wasm::elements::FunctionType; - use parity_wasm::elements::ValueType; - use runtime_primitives::traits::{As, Zero}; - use sandbox::{self, ReturnValue, TypedValue}; - use crate::wasm::tests::MockExt; - use crate::wasm::Runtime; - use crate::exec::Ext; - use crate::Trait; - - #[test] - fn macro_unmarshall_then_body_then_marshall_value_or_trap() { - fn test_value( - _ctx: &mut u32, - args: &[sandbox::TypedValue], - ) -> Result { - let mut args = args.iter(); - unmarshall_then_body_then_marshall!( - args, - _ctx, - (a: u32, b: u32) -> u32 => { - if b == 0 { - Err(sandbox::HostError) - } else { - Ok(a / b) - } - } - ) - } - - let ctx = &mut 0; - assert_eq!( - test_value(ctx, &[TypedValue::I32(15), TypedValue::I32(3)]).unwrap(), - ReturnValue::Value(TypedValue::I32(5)), - ); - assert!(test_value(ctx, &[TypedValue::I32(15), TypedValue::I32(0)]).is_err()); - } - - #[test] - fn macro_unmarshall_then_body_then_marshall_unit() { - fn test_unit( - ctx: &mut u32, - args: &[sandbox::TypedValue], - ) -> Result { - let mut args = args.iter(); - unmarshall_then_body_then_marshall!( - args, - ctx, - (a: u32, b: u32) => { - *ctx = a + b; - Ok(()) - } - ) - } - - let ctx = &mut 0; - let result = test_unit(ctx, &[TypedValue::I32(2), TypedValue::I32(3)]).unwrap(); - assert_eq!(result, ReturnValue::Unit); - assert_eq!(*ctx, 5); - } - - #[test] - fn macro_define_func() { - define_func!( ext_gas (_ctx, amount: u32) => { - let amount = <::Gas as As>::sa(amount); - if !amount.is_zero() { - Ok(()) - } else { - Err(sandbox::HostError) - } - }); - let _f: fn(&mut Runtime, &[sandbox::TypedValue]) - -> Result = ext_gas::; - } - - #[test] - fn macro_gen_signature() { - assert_eq!( - gen_signature!((i32)), - FunctionType::new(vec![ValueType::I32], None), - ); - - assert_eq!( - gen_signature!( (i32, u32) -> u32 ), - FunctionType::new(vec![ValueType::I32, ValueType::I32], Some(ValueType::I32)), - ); - } - - #[test] - fn macro_unmarshall_then_body() { - let args = vec![TypedValue::I32(5), TypedValue::I32(3)]; - let mut args = args.iter(); - - let ctx: &mut u32 = &mut 0; - - let r = unmarshall_then_body!( - { - *ctx = a + b; - a * b - }, - ctx, - args, - a: u32, - b: u32 - ); - - assert_eq!(*ctx, 8); - assert_eq!(r, 15); - } - - #[test] - fn macro_define_env() { - use crate::wasm::env_def::ImportSatisfyCheck; - - define_env!(Env, , - ext_gas( _ctx, amount: u32 ) => { - let amount = <::Gas as As>::sa(amount); - if !amount.is_zero() { - Ok(()) - } else { - Err(sandbox::HostError) - } - }, - ); - - assert!(Env::can_satisfy(b"ext_gas", &FunctionType::new(vec![ValueType::I32], None))); - assert!(!Env::can_satisfy(b"not_exists", &FunctionType::new(vec![], None))); - } + use crate::exec::Ext; + use crate::wasm::tests::MockExt; + use crate::wasm::Runtime; + use crate::Trait; + use parity_wasm::elements::FunctionType; + use parity_wasm::elements::ValueType; + use runtime_primitives::traits::{As, Zero}; + use sandbox::{self, ReturnValue, TypedValue}; + + #[test] + fn macro_unmarshall_then_body_then_marshall_value_or_trap() { + fn test_value( + _ctx: &mut u32, + args: &[sandbox::TypedValue], + ) -> Result { + let mut args = args.iter(); + unmarshall_then_body_then_marshall!( + args, + _ctx, + (a: u32, b: u32) -> u32 => { + if b == 0 { + Err(sandbox::HostError) + } else { + Ok(a / b) + } + } + ) + } + + let ctx = &mut 0; + assert_eq!( + test_value(ctx, &[TypedValue::I32(15), TypedValue::I32(3)]).unwrap(), + ReturnValue::Value(TypedValue::I32(5)), + ); + assert!(test_value(ctx, &[TypedValue::I32(15), TypedValue::I32(0)]).is_err()); + } + + #[test] + fn macro_unmarshall_then_body_then_marshall_unit() { + fn test_unit( + ctx: &mut u32, + args: &[sandbox::TypedValue], + ) -> Result { + let mut args = args.iter(); + unmarshall_then_body_then_marshall!( + args, + ctx, + (a: u32, b: u32) => { + *ctx = a + b; + Ok(()) + } + ) + } + + let ctx = &mut 0; + let result = test_unit(ctx, &[TypedValue::I32(2), TypedValue::I32(3)]).unwrap(); + assert_eq!(result, ReturnValue::Unit); + assert_eq!(*ctx, 5); + } + + #[test] + fn macro_define_func() { + define_func!( ext_gas (_ctx, amount: u32) => { + let amount = <::Gas as As>::sa(amount); + if !amount.is_zero() { + Ok(()) + } else { + Err(sandbox::HostError) + } + }); + let _f: fn( + &mut Runtime, + &[sandbox::TypedValue], + ) -> Result = ext_gas::; + } + + #[test] + fn macro_gen_signature() { + assert_eq!( + gen_signature!((i32)), + FunctionType::new(vec![ValueType::I32], None), + ); + + assert_eq!( + gen_signature!( (i32, u32) -> u32 ), + FunctionType::new(vec![ValueType::I32, ValueType::I32], Some(ValueType::I32)), + ); + } + + #[test] + fn macro_unmarshall_then_body() { + let args = vec![TypedValue::I32(5), TypedValue::I32(3)]; + let mut args = args.iter(); + + let ctx: &mut u32 = &mut 0; + + let r = unmarshall_then_body!( + { + *ctx = a + b; + a * b + }, + ctx, + args, + a: u32, + b: u32 + ); + + assert_eq!(*ctx, 8); + assert_eq!(r, 15); + } + + #[test] + fn macro_define_env() { + use crate::wasm::env_def::ImportSatisfyCheck; + + define_env!(Env, , + ext_gas( _ctx, amount: u32 ) => { + let amount = <::Gas as As>::sa(amount); + if !amount.is_zero() { + Ok(()) + } else { + Err(sandbox::HostError) + } + }, + ); + + assert!(Env::can_satisfy( + b"ext_gas", + &FunctionType::new(vec![ValueType::I32], None) + )); + assert!(!Env::can_satisfy( + b"not_exists", + &FunctionType::new(vec![], None) + )); + } } diff --git a/srml/contract/src/wasm/env_def/mod.rs b/srml/contract/src/wasm/env_def/mod.rs index d51a157910..4a0b53e7f6 100644 --- a/srml/contract/src/wasm/env_def/mod.rs +++ b/srml/contract/src/wasm/env_def/mod.rs @@ -17,70 +17,67 @@ use super::Runtime; use crate::exec::Ext; -use sandbox::{self, TypedValue}; use parity_wasm::elements::{FunctionType, ValueType}; +use sandbox::{self, TypedValue}; #[macro_use] pub(crate) mod macros; pub trait ConvertibleToWasm: Sized { - const VALUE_TYPE: ValueType; - type NativeType; - fn to_typed_value(self) -> TypedValue; - fn from_typed_value(_: TypedValue) -> Option; + const VALUE_TYPE: ValueType; + type NativeType; + fn to_typed_value(self) -> TypedValue; + fn from_typed_value(_: TypedValue) -> Option; } impl ConvertibleToWasm for i32 { - type NativeType = i32; - const VALUE_TYPE: ValueType = ValueType::I32; - fn to_typed_value(self) -> TypedValue { - TypedValue::I32(self) - } - fn from_typed_value(v: TypedValue) -> Option { - v.as_i32() - } + type NativeType = i32; + const VALUE_TYPE: ValueType = ValueType::I32; + fn to_typed_value(self) -> TypedValue { + TypedValue::I32(self) + } + fn from_typed_value(v: TypedValue) -> Option { + v.as_i32() + } } impl ConvertibleToWasm for u32 { - type NativeType = u32; - const VALUE_TYPE: ValueType = ValueType::I32; - fn to_typed_value(self) -> TypedValue { - TypedValue::I32(self as i32) - } - fn from_typed_value(v: TypedValue) -> Option { - match v { - TypedValue::I32(v) => Some(v as u32), - _ => None, - } - } + type NativeType = u32; + const VALUE_TYPE: ValueType = ValueType::I32; + fn to_typed_value(self) -> TypedValue { + TypedValue::I32(self as i32) + } + fn from_typed_value(v: TypedValue) -> Option { + match v { + TypedValue::I32(v) => Some(v as u32), + _ => None, + } + } } impl ConvertibleToWasm for u64 { - type NativeType = u64; - const VALUE_TYPE: ValueType = ValueType::I64; - fn to_typed_value(self) -> TypedValue { - TypedValue::I64(self as i64) - } - fn from_typed_value(v: TypedValue) -> Option { - match v { - TypedValue::I64(v) => Some(v as u64), - _ => None, - } - } + type NativeType = u64; + const VALUE_TYPE: ValueType = ValueType::I64; + fn to_typed_value(self) -> TypedValue { + TypedValue::I64(self as i64) + } + fn from_typed_value(v: TypedValue) -> Option { + match v { + TypedValue::I64(v) => Some(v as u64), + _ => None, + } + } } pub(crate) type HostFunc = - fn( - &mut Runtime, - &[sandbox::TypedValue] - ) -> Result; + fn(&mut Runtime, &[sandbox::TypedValue]) -> Result; pub(crate) trait FunctionImplProvider { - fn impls)>(f: &mut F); + fn impls)>(f: &mut F); } /// This trait can be used to check whether the host environment can satisfy /// a requested function import. pub trait ImportSatisfyCheck { - /// Returns `true` if the host environment contains a function with - /// the specified name and its type matches to the given type, or `false` - /// otherwise. - fn can_satisfy(name: &[u8], func_type: &FunctionType) -> bool; + /// Returns `true` if the host environment contains a function with + /// the specified name and its type matches to the given type, or `false` + /// otherwise. + fn can_satisfy(name: &[u8], func_type: &FunctionType) -> bool; } diff --git a/srml/contract/src/wasm/mod.rs b/srml/contract/src/wasm/mod.rs index 04428280d0..eed0ee5c36 100644 --- a/srml/contract/src/wasm/mod.rs +++ b/srml/contract/src/wasm/mod.rs @@ -17,13 +17,13 @@ //! This module provides a means for executing contracts //! represented in wasm. -use crate::{CodeHash, Schedule, Trait}; -use crate::wasm::env_def::FunctionImplProvider; -use crate::exec::{Ext, EmptyOutputBuf, VmExecResult}; +use crate::exec::{EmptyOutputBuf, Ext, VmExecResult}; use crate::gas::GasMeter; +use crate::wasm::env_def::FunctionImplProvider; +use crate::{CodeHash, Schedule, Trait}; +use parity_codec::{Decode, Encode}; use rstd::prelude::*; -use parity_codec::{Encode, Decode}; use sandbox; #[macro_use] @@ -32,283 +32,283 @@ mod code_cache; mod prepare; mod runtime; -use self::runtime::{to_execution_result, Runtime}; use self::code_cache::load as load_code; +use self::runtime::{to_execution_result, Runtime}; pub use self::code_cache::save as save_code; /// A prepared wasm module ready for execution. #[derive(Clone, Encode, Decode)] pub struct PrefabWasmModule { - /// Version of the schedule with which the code was instrumented. - #[codec(compact)] - schedule_version: u32, - #[codec(compact)] - initial: u32, - #[codec(compact)] - maximum: u32, - /// This field is reserved for future evolution of format. - /// - /// Basically, for now this field will be serialized as `None`. In the future - /// we would be able to extend this structure with. - _reserved: Option<()>, - /// Code instrumented with the latest schedule. - code: Vec, + /// Version of the schedule with which the code was instrumented. + #[codec(compact)] + schedule_version: u32, + #[codec(compact)] + initial: u32, + #[codec(compact)] + maximum: u32, + /// This field is reserved for future evolution of format. + /// + /// Basically, for now this field will be serialized as `None`. In the future + /// we would be able to extend this structure with. + _reserved: Option<()>, + /// Code instrumented with the latest schedule. + code: Vec, } /// Wasm executable loaded by `WasmLoader` and executed by `WasmVm`. pub struct WasmExecutable { - entrypoint_name: &'static [u8], - prefab_module: PrefabWasmModule, + entrypoint_name: &'static [u8], + prefab_module: PrefabWasmModule, } /// Loader which fetches `WasmExecutable` from the code cache. pub struct WasmLoader<'a, T: Trait> { - schedule: &'a Schedule, + schedule: &'a Schedule, } impl<'a, T: Trait> WasmLoader<'a, T> { - pub fn new(schedule: &'a Schedule) -> Self { - WasmLoader { schedule } - } + pub fn new(schedule: &'a Schedule) -> Self { + WasmLoader { schedule } + } } impl<'a, T: Trait> crate::exec::Loader for WasmLoader<'a, T> { - type Executable = WasmExecutable; - - fn load_init(&self, code_hash: &CodeHash) -> Result { - let prefab_module = load_code::(code_hash, self.schedule)?; - Ok(WasmExecutable { - entrypoint_name: b"deploy", - prefab_module, - }) - } - fn load_main(&self, code_hash: &CodeHash) -> Result { - let prefab_module = load_code::(code_hash, self.schedule)?; - Ok(WasmExecutable { - entrypoint_name: b"call", - prefab_module, - }) - } + type Executable = WasmExecutable; + + fn load_init(&self, code_hash: &CodeHash) -> Result { + let prefab_module = load_code::(code_hash, self.schedule)?; + Ok(WasmExecutable { + entrypoint_name: b"deploy", + prefab_module, + }) + } + fn load_main(&self, code_hash: &CodeHash) -> Result { + let prefab_module = load_code::(code_hash, self.schedule)?; + Ok(WasmExecutable { + entrypoint_name: b"call", + prefab_module, + }) + } } /// Implementation of `Vm` that takes `WasmExecutable` and executes it. pub struct WasmVm<'a, T: Trait> { - schedule: &'a Schedule, + schedule: &'a Schedule, } impl<'a, T: Trait> WasmVm<'a, T> { - pub fn new(schedule: &'a Schedule) -> Self { - WasmVm { schedule } - } + pub fn new(schedule: &'a Schedule) -> Self { + WasmVm { schedule } + } } impl<'a, T: Trait> crate::exec::Vm for WasmVm<'a, T> { - type Executable = WasmExecutable; - - fn execute>( - &self, - exec: &WasmExecutable, - ext: &mut E, - input_data: &[u8], - empty_output_buf: EmptyOutputBuf, - gas_meter: &mut GasMeter, - ) -> VmExecResult { - let memory = - sandbox::Memory::new(exec.prefab_module.initial, Some(exec.prefab_module.maximum)) - .unwrap_or_else(|_| { - // unlike `.expect`, explicit panic preserves the source location. - // Needed as we can't use `RUST_BACKTRACE` in here. - panic!( + type Executable = WasmExecutable; + + fn execute>( + &self, + exec: &WasmExecutable, + ext: &mut E, + input_data: &[u8], + empty_output_buf: EmptyOutputBuf, + gas_meter: &mut GasMeter, + ) -> VmExecResult { + let memory = + sandbox::Memory::new(exec.prefab_module.initial, Some(exec.prefab_module.maximum)) + .unwrap_or_else(|_| { + // unlike `.expect`, explicit panic preserves the source location. + // Needed as we can't use `RUST_BACKTRACE` in here. + panic!( "exec.prefab_module.initial can't be greater than exec.prefab_module.maximum; thus Memory::new must not fail; qed" ) - }); - - let mut imports = sandbox::EnvironmentDefinitionBuilder::new(); - imports.add_memory("env", "memory", memory.clone()); - runtime::Env::impls(&mut |name, func_ptr| { - imports.add_host_func("env", name, func_ptr); - }); - - let mut runtime = Runtime::new( - ext, - input_data, - empty_output_buf, - &self.schedule, - memory, - gas_meter, - ); - - // Instantiate the instance from the instrumented module code. - match sandbox::Instance::new(&exec.prefab_module.code, &imports, &mut runtime) { - // No errors or traps were generated on instantiation! That - // means we can now invoke the contract entrypoint. - Ok(mut instance) => { - let err = instance - .invoke(exec.entrypoint_name, &[], &mut runtime) - .err(); - to_execution_result(runtime, err) - } - // `start` function trapped. Treat it in the same manner as an execution error. - Err(err @ sandbox::Error::Execution) => to_execution_result(runtime, Some(err)), - Err(_err @ sandbox::Error::Module) => { - // `Error::Module` is returned only if instantiation or linking failed (i.e. - // wasm bianry tried to import a function that is not provided by the host). - // This shouldn't happen because validation proccess ought to reject such binaries. - // - // Because panics are really undesirable in the runtime code, we treat this as - // a trap for now. Eventually, we might want to revisit this. - return VmExecResult::Trap("validation error"); - } - // Other instantiation errors. - // Return without executing anything. - Err(_) => return VmExecResult::Trap("during start function"), - } - } + }); + + let mut imports = sandbox::EnvironmentDefinitionBuilder::new(); + imports.add_memory("env", "memory", memory.clone()); + runtime::Env::impls(&mut |name, func_ptr| { + imports.add_host_func("env", name, func_ptr); + }); + + let mut runtime = Runtime::new( + ext, + input_data, + empty_output_buf, + &self.schedule, + memory, + gas_meter, + ); + + // Instantiate the instance from the instrumented module code. + match sandbox::Instance::new(&exec.prefab_module.code, &imports, &mut runtime) { + // No errors or traps were generated on instantiation! That + // means we can now invoke the contract entrypoint. + Ok(mut instance) => { + let err = instance + .invoke(exec.entrypoint_name, &[], &mut runtime) + .err(); + to_execution_result(runtime, err) + } + // `start` function trapped. Treat it in the same manner as an execution error. + Err(err @ sandbox::Error::Execution) => to_execution_result(runtime, Some(err)), + Err(_err @ sandbox::Error::Module) => { + // `Error::Module` is returned only if instantiation or linking failed (i.e. + // wasm bianry tried to import a function that is not provided by the host). + // This shouldn't happen because validation proccess ought to reject such binaries. + // + // Because panics are really undesirable in the runtime code, we treat this as + // a trap for now. Eventually, we might want to revisit this. + return VmExecResult::Trap("validation error"); + } + // Other instantiation errors. + // Return without executing anything. + Err(_) => return VmExecResult::Trap("during start function"), + } + } } #[cfg(test)] mod tests { - use super::*; - use std::collections::HashMap; - use substrate_primitives::H256; - use crate::exec::{CallReceipt, Ext, InstantiateReceipt, EmptyOutputBuf}; - use crate::gas::GasMeter; - use crate::tests::{Test, Call}; - use wabt; - use crate::wasm::prepare::prepare_contract; - use crate::CodeHash; - - #[derive(Debug, PartialEq, Eq)] - struct DispatchEntry(Call); - #[derive(Debug, PartialEq, Eq)] - struct CreateEntry { - code_hash: H256, - endowment: u64, - data: Vec, - gas_left: u64, - } - #[derive(Debug, PartialEq, Eq)] - struct TransferEntry { - to: u64, - value: u64, - data: Vec, - gas_left: u64, - } - #[derive(Default)] - pub struct MockExt { - storage: HashMap, Vec>, - creates: Vec, - transfers: Vec, - dispatches: Vec, - next_account_id: u64, - random_seed: H256, - } - impl Ext for MockExt { - type T = Test; - - fn get_storage(&self, key: &[u8]) -> Option> { - self.storage.get(key).cloned() - } - fn set_storage(&mut self, key: &[u8], value: Option>) { - *self.storage.entry(key.to_vec()).or_insert(Vec::new()) = value.unwrap_or(Vec::new()); - } - fn instantiate( - &mut self, - code_hash: &CodeHash, - endowment: u64, - gas_meter: &mut GasMeter, - data: &[u8], - ) -> Result, &'static str> { - self.creates.push(CreateEntry { - code_hash: code_hash.clone(), - endowment, - data: data.to_vec(), - gas_left: gas_meter.gas_left(), - }); - let address = self.next_account_id; - self.next_account_id += 1; - - Ok(InstantiateReceipt { address }) - } - fn call( - &mut self, - to: &u64, - value: u64, - gas_meter: &mut GasMeter, - data: &[u8], - _output_data: EmptyOutputBuf, - ) -> Result { - self.transfers.push(TransferEntry { - to: *to, - value, - data: data.to_vec(), - gas_left: gas_meter.gas_left(), - }); - // Assume for now that it was just a plain transfer. - // TODO: Add tests for different call outcomes. - Ok(CallReceipt { - output_data: Vec::new(), - }) - } - fn note_dispatch_call(&mut self, call: Call) { - self.dispatches.push(DispatchEntry(call)); - } - fn caller(&self) -> &u64 { - &42 - } - fn address(&self) -> &u64 { - &69 - } - fn balance(&self) -> u64 { - 228 - } - fn value_transferred(&self) -> u64 { - 1337 - } - - fn now(&self) -> &u64 { - &1111 - } - - fn random_seed(&self) -> &H256{ - &self.random_seed - } - } - - fn execute( - wat: &str, - input_data: &[u8], - output_data: &mut Vec, - ext: &mut E, - gas_meter: &mut GasMeter, - ) -> Result<(), &'static str> { - use crate::exec::Vm; - - let wasm = wabt::wat2wasm(wat).unwrap(); - let schedule = crate::Schedule::::default(); - let prefab_module = - prepare_contract::(&wasm, &schedule).unwrap(); - - let exec = WasmExecutable { - // Use a "call" convention. - entrypoint_name: b"call", - prefab_module, - }; - - let cfg = Default::default(); - let vm = WasmVm::new(&cfg); - - *output_data = vm - .execute(&exec, ext, input_data, EmptyOutputBuf::new(), gas_meter) - .into_result()?; - - Ok(()) - } - - const CODE_TRANSFER: &str = r#" + use super::*; + use crate::exec::{CallReceipt, EmptyOutputBuf, Ext, InstantiateReceipt}; + use crate::gas::GasMeter; + use crate::tests::{Call, Test}; + use crate::wasm::prepare::prepare_contract; + use crate::CodeHash; + use std::collections::HashMap; + use substrate_primitives::H256; + use wabt; + + #[derive(Debug, PartialEq, Eq)] + struct DispatchEntry(Call); + #[derive(Debug, PartialEq, Eq)] + struct CreateEntry { + code_hash: H256, + endowment: u64, + data: Vec, + gas_left: u64, + } + #[derive(Debug, PartialEq, Eq)] + struct TransferEntry { + to: u64, + value: u64, + data: Vec, + gas_left: u64, + } + #[derive(Default)] + pub struct MockExt { + storage: HashMap, Vec>, + creates: Vec, + transfers: Vec, + dispatches: Vec, + next_account_id: u64, + random_seed: H256, + } + impl Ext for MockExt { + type T = Test; + + fn get_storage(&self, key: &[u8]) -> Option> { + self.storage.get(key).cloned() + } + fn set_storage(&mut self, key: &[u8], value: Option>) { + *self.storage.entry(key.to_vec()).or_insert(Vec::new()) = value.unwrap_or(Vec::new()); + } + fn instantiate( + &mut self, + code_hash: &CodeHash, + endowment: u64, + gas_meter: &mut GasMeter, + data: &[u8], + ) -> Result, &'static str> { + self.creates.push(CreateEntry { + code_hash: code_hash.clone(), + endowment, + data: data.to_vec(), + gas_left: gas_meter.gas_left(), + }); + let address = self.next_account_id; + self.next_account_id += 1; + + Ok(InstantiateReceipt { address }) + } + fn call( + &mut self, + to: &u64, + value: u64, + gas_meter: &mut GasMeter, + data: &[u8], + _output_data: EmptyOutputBuf, + ) -> Result { + self.transfers.push(TransferEntry { + to: *to, + value, + data: data.to_vec(), + gas_left: gas_meter.gas_left(), + }); + // Assume for now that it was just a plain transfer. + // TODO: Add tests for different call outcomes. + Ok(CallReceipt { + output_data: Vec::new(), + }) + } + fn note_dispatch_call(&mut self, call: Call) { + self.dispatches.push(DispatchEntry(call)); + } + fn caller(&self) -> &u64 { + &42 + } + fn address(&self) -> &u64 { + &69 + } + fn balance(&self) -> u64 { + 228 + } + fn value_transferred(&self) -> u64 { + 1337 + } + + fn now(&self) -> &u64 { + &1111 + } + + fn random_seed(&self) -> &H256 { + &self.random_seed + } + } + + fn execute( + wat: &str, + input_data: &[u8], + output_data: &mut Vec, + ext: &mut E, + gas_meter: &mut GasMeter, + ) -> Result<(), &'static str> { + use crate::exec::Vm; + + let wasm = wabt::wat2wasm(wat).unwrap(); + let schedule = crate::Schedule::::default(); + let prefab_module = + prepare_contract::(&wasm, &schedule).unwrap(); + + let exec = WasmExecutable { + // Use a "call" convention. + entrypoint_name: b"call", + prefab_module, + }; + + let cfg = Default::default(); + let vm = WasmVm::new(&cfg); + + *output_data = vm + .execute(&exec, ext, input_data, EmptyOutputBuf::new(), gas_meter) + .into_result()?; + + Ok(()) + } + + const CODE_TRANSFER: &str = r#" (module ;; ext_call( ;; callee_ptr: u32, @@ -347,30 +347,30 @@ mod tests { ) "#; - #[test] - fn contract_transfer() { - let mut mock_ext = MockExt::default(); - execute( - CODE_TRANSFER, - &[], - &mut Vec::new(), - &mut mock_ext, - &mut GasMeter::with_limit(50_000, 1), - ) - .unwrap(); - - assert_eq!( - &mock_ext.transfers, - &[TransferEntry { - to: 9, - value: 6, - data: vec![1, 2, 3, 4], - gas_left: 49970, - }] - ); - } - - const CODE_CREATE: &str = r#" + #[test] + fn contract_transfer() { + let mut mock_ext = MockExt::default(); + execute( + CODE_TRANSFER, + &[], + &mut Vec::new(), + &mut mock_ext, + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + + assert_eq!( + &mock_ext.transfers, + &[TransferEntry { + to: 9, + value: 6, + data: vec![1, 2, 3, 4], + gas_left: 49970, + }] + ); + } + + const CODE_CREATE: &str = r#" (module ;; ext_create( ;; code_ptr: u32, @@ -408,30 +408,30 @@ mod tests { ) "#; - #[test] - fn contract_create() { - let mut mock_ext = MockExt::default(); - execute( - CODE_CREATE, - &[], - &mut Vec::new(), - &mut mock_ext, - &mut GasMeter::with_limit(50_000, 1), - ) - .unwrap(); - - assert_eq!( - &mock_ext.creates, - &[CreateEntry { - code_hash: [0x11; 32].into(), - endowment: 3, - data: vec![1, 2, 3, 4], - gas_left: 49946, - }] - ); - } - - const CODE_TRANSFER_LIMITED_GAS: &str = r#" + #[test] + fn contract_create() { + let mut mock_ext = MockExt::default(); + execute( + CODE_CREATE, + &[], + &mut Vec::new(), + &mut mock_ext, + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + + assert_eq!( + &mock_ext.creates, + &[CreateEntry { + code_hash: [0x11; 32].into(), + endowment: 3, + data: vec![1, 2, 3, 4], + gas_left: 49946, + }] + ); + } + + const CODE_TRANSFER_LIMITED_GAS: &str = r#" (module ;; ext_call( ;; callee_ptr: u32, @@ -470,30 +470,30 @@ mod tests { ) "#; - #[test] - fn contract_call_limited_gas() { - let mut mock_ext = MockExt::default(); - execute( - &CODE_TRANSFER_LIMITED_GAS, - &[], - &mut Vec::new(), - &mut mock_ext, - &mut GasMeter::with_limit(50_000, 1), - ) - .unwrap(); - - assert_eq!( - &mock_ext.transfers, - &[TransferEntry { - to: 9, - value: 6, - data: vec![1, 2, 3, 4], - gas_left: 228, - }] - ); - } - - const CODE_GET_STORAGE: &str = r#" + #[test] + fn contract_call_limited_gas() { + let mut mock_ext = MockExt::default(); + execute( + &CODE_TRANSFER_LIMITED_GAS, + &[], + &mut Vec::new(), + &mut mock_ext, + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + + assert_eq!( + &mock_ext.transfers, + &[TransferEntry { + to: 9, + value: 6, + data: vec![1, 2, 3, 4], + gas_left: 228, + }] + ); + } + + const CODE_GET_STORAGE: &str = r#" (module (import "env" "ext_get_storage" (func $ext_get_storage (param i32) (result i32))) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -558,29 +558,29 @@ mod tests { ) "#; - #[test] - fn get_storage_puts_data_into_scratch_buf() { - let mut mock_ext = MockExt::default(); - mock_ext - .storage - .insert([0x11; 32].to_vec(), [0x22; 32].to_vec()); - - let mut return_buf = Vec::new(); - execute( - CODE_GET_STORAGE, - &[], - &mut return_buf, - &mut mock_ext, - &mut GasMeter::with_limit(50_000, 1), - ) - .unwrap(); - - assert_eq!(return_buf, [0x22; 32].to_vec()); - } - - /// calls `ext_caller`, loads the address from the scratch buffer and - /// compares it with the constant 42. - const CODE_CALLER: &str = r#" + #[test] + fn get_storage_puts_data_into_scratch_buf() { + let mut mock_ext = MockExt::default(); + mock_ext + .storage + .insert([0x11; 32].to_vec(), [0x22; 32].to_vec()); + + let mut return_buf = Vec::new(); + execute( + CODE_GET_STORAGE, + &[], + &mut return_buf, + &mut mock_ext, + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + + assert_eq!(return_buf, [0x22; 32].to_vec()); + } + + /// calls `ext_caller`, loads the address from the scratch buffer and + /// compares it with the constant 42. + const CODE_CALLER: &str = r#" (module (import "env" "ext_caller" (func $ext_caller)) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -630,22 +630,22 @@ mod tests { ) "#; - #[test] - fn caller() { - let mut mock_ext = MockExt::default(); - execute( - CODE_CALLER, - &[], - &mut Vec::new(), - &mut mock_ext, - &mut GasMeter::with_limit(50_000, 1), - ) - .unwrap(); - } - - /// calls `ext_address`, loads the address from the scratch buffer and - /// compares it with the constant 69. - const CODE_ADDRESS: &str = r#" + #[test] + fn caller() { + let mut mock_ext = MockExt::default(); + execute( + CODE_CALLER, + &[], + &mut Vec::new(), + &mut mock_ext, + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + } + + /// calls `ext_address`, loads the address from the scratch buffer and + /// compares it with the constant 69. + const CODE_ADDRESS: &str = r#" (module (import "env" "ext_address" (func $ext_address)) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -695,20 +695,20 @@ mod tests { ) "#; - #[test] - fn address() { - let mut mock_ext = MockExt::default(); - execute( - CODE_ADDRESS, - &[], - &mut Vec::new(), - &mut mock_ext, - &mut GasMeter::with_limit(50_000, 1), - ) - .unwrap(); - } - - const CODE_BALANCE: &str = r#" + #[test] + fn address() { + let mut mock_ext = MockExt::default(); + execute( + CODE_ADDRESS, + &[], + &mut Vec::new(), + &mut mock_ext, + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + } + + const CODE_BALANCE: &str = r#" (module (import "env" "ext_balance" (func $ext_balance)) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -757,21 +757,21 @@ mod tests { ) "#; - #[test] - fn balance() { - let mut mock_ext = MockExt::default(); - let mut gas_meter = GasMeter::with_limit(50_000, 1); - execute( - CODE_BALANCE, - &[], - &mut Vec::new(), - &mut mock_ext, - &mut gas_meter, - ) - .unwrap(); - } - - const CODE_GAS_PRICE: &str = r#" + #[test] + fn balance() { + let mut mock_ext = MockExt::default(); + let mut gas_meter = GasMeter::with_limit(50_000, 1); + execute( + CODE_BALANCE, + &[], + &mut Vec::new(), + &mut mock_ext, + &mut gas_meter, + ) + .unwrap(); + } + + const CODE_GAS_PRICE: &str = r#" (module (import "env" "ext_gas_price" (func $ext_gas_price)) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -820,21 +820,21 @@ mod tests { ) "#; - #[test] - fn gas_price() { - let mut mock_ext = MockExt::default(); - let mut gas_meter = GasMeter::with_limit(50_000, 1312); - execute( - CODE_GAS_PRICE, - &[], - &mut Vec::new(), - &mut mock_ext, - &mut gas_meter, - ) - .unwrap(); - } - - const CODE_GAS_LEFT: &str = r#" + #[test] + fn gas_price() { + let mut mock_ext = MockExt::default(); + let mut gas_meter = GasMeter::with_limit(50_000, 1312); + execute( + CODE_GAS_PRICE, + &[], + &mut Vec::new(), + &mut mock_ext, + &mut gas_meter, + ) + .unwrap(); + } + + const CODE_GAS_LEFT: &str = r#" (module (import "env" "ext_gas_left" (func $ext_gas_left)) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -881,21 +881,21 @@ mod tests { ) "#; - #[test] - fn gas_left() { - let mut mock_ext = MockExt::default(); - let mut gas_meter = GasMeter::with_limit(50_000, 1312); - execute( - CODE_GAS_LEFT, - &[], - &mut Vec::new(), - &mut mock_ext, - &mut gas_meter, - ) - .unwrap(); - } - - const CODE_VALUE_TRANSFERRED: &str = r#" + #[test] + fn gas_left() { + let mut mock_ext = MockExt::default(); + let mut gas_meter = GasMeter::with_limit(50_000, 1312); + execute( + CODE_GAS_LEFT, + &[], + &mut Vec::new(), + &mut mock_ext, + &mut gas_meter, + ) + .unwrap(); + } + + const CODE_VALUE_TRANSFERRED: &str = r#" (module (import "env" "ext_value_transferred" (func $ext_value_transferred)) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -944,21 +944,21 @@ mod tests { ) "#; - #[test] - fn value_transferred() { - let mut mock_ext = MockExt::default(); - let mut gas_meter = GasMeter::with_limit(50_000, 1); - execute( - CODE_VALUE_TRANSFERRED, - &[], - &mut Vec::new(), - &mut mock_ext, - &mut gas_meter, - ) - .unwrap(); - } - - const CODE_DISPATCH_CALL: &str = r#" + #[test] + fn value_transferred() { + let mut mock_ext = MockExt::default(); + let mut gas_meter = GasMeter::with_limit(50_000, 1); + execute( + CODE_VALUE_TRANSFERRED, + &[], + &mut Vec::new(), + &mut mock_ext, + &mut gas_meter, + ) + .unwrap(); + } + + const CODE_DISPATCH_CALL: &str = r#" (module (import "env" "ext_dispatch_call" (func $ext_dispatch_call (param i32 i32))) (import "env" "memory" (memory 1 1)) @@ -975,30 +975,30 @@ mod tests { ) "#; - #[test] - fn dispatch_call() { - // This test can fail due to the encoding changes. In case it becomes too annoying - // let's rewrite so as we use this module controlled call or we serialize it in runtime. - - let mut mock_ext = MockExt::default(); - execute( - CODE_DISPATCH_CALL, - &[], - &mut Vec::new(), - &mut mock_ext, - &mut GasMeter::with_limit(50_000, 1), - ) - .unwrap(); - - assert_eq!( - &mock_ext.dispatches, - &[DispatchEntry( - Call::Balances(balances::Call::set_balance(42, 1337, 0)), - )] - ); - } - - const CODE_RETURN_FROM_START_FN: &str = r#" + #[test] + fn dispatch_call() { + // This test can fail due to the encoding changes. In case it becomes too annoying + // let's rewrite so as we use this module controlled call or we serialize it in runtime. + + let mut mock_ext = MockExt::default(); + execute( + CODE_DISPATCH_CALL, + &[], + &mut Vec::new(), + &mut mock_ext, + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + + assert_eq!( + &mock_ext.dispatches, + &[DispatchEntry(Call::Balances(balances::Call::set_balance( + 42, 1337, 0 + )),)] + ); + } + + const CODE_RETURN_FROM_START_FN: &str = r#" (module (import "env" "ext_return" (func $ext_return (param i32 i32))) (import "env" "memory" (memory 1 1)) @@ -1021,23 +1021,23 @@ mod tests { ) "#; - #[test] - fn return_from_start_fn() { - let mut mock_ext = MockExt::default(); - let mut output_data = Vec::new(); - execute( - CODE_RETURN_FROM_START_FN, - &[], - &mut output_data, - &mut mock_ext, - &mut GasMeter::with_limit(50_000, 1), - ) - .unwrap(); - - assert_eq!(output_data, vec![1, 2, 3, 4]); - } - - const CODE_TIMESTAMP_NOW: &str = r#" + #[test] + fn return_from_start_fn() { + let mut mock_ext = MockExt::default(); + let mut output_data = Vec::new(); + execute( + CODE_RETURN_FROM_START_FN, + &[], + &mut output_data, + &mut mock_ext, + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + + assert_eq!(output_data, vec![1, 2, 3, 4]); + } + + const CODE_TIMESTAMP_NOW: &str = r#" (module (import "env" "ext_now" (func $ext_now)) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -1086,21 +1086,21 @@ mod tests { ) "#; - #[test] - fn now() { - let mut mock_ext = MockExt::default(); - let mut gas_meter = GasMeter::with_limit(50_000, 1); - execute( - CODE_TIMESTAMP_NOW, - &[], - &mut Vec::new(), - &mut mock_ext, - &mut gas_meter, - ) - .unwrap(); - } - - const CODE_RANDOM_SEED: &str = r#" + #[test] + fn now() { + let mut mock_ext = MockExt::default(); + let mut gas_meter = GasMeter::with_limit(50_000, 1); + execute( + CODE_TIMESTAMP_NOW, + &[], + &mut Vec::new(), + &mut mock_ext, + &mut gas_meter, + ) + .unwrap(); + } + + const CODE_RANDOM_SEED: &str = r#" (module (import "env" "ext_random_seed" (func $ext_random_seed)) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -1145,25 +1145,23 @@ mod tests { ) "#; - #[test] - fn random_seed() { - let mut mock_ext = MockExt::default(); - let seed: [u8; 32] = [ - 1,0,0,0,0,0,0,0, - 2,0,0,0,0,0,0,0, - 3,0,0,0,0,0,0,0, - 4,0,0,0,0,0,0,0, - ]; - mock_ext.random_seed = H256::from_slice(&seed); - let mut gas_meter = GasMeter::with_limit(50_000, 1); - execute( - CODE_RANDOM_SEED, - &[], - &mut Vec::new(), - &mut mock_ext, - &mut gas_meter, - ) - .unwrap(); - } + #[test] + fn random_seed() { + let mut mock_ext = MockExt::default(); + let seed: [u8; 32] = [ + 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, + 0, 0, 0, + ]; + mock_ext.random_seed = H256::from_slice(&seed); + let mut gas_meter = GasMeter::with_limit(50_000, 1); + execute( + CODE_RANDOM_SEED, + &[], + &mut Vec::new(), + &mut mock_ext, + &mut gas_meter, + ) + .unwrap(); + } } diff --git a/srml/contract/src/wasm/prepare.rs b/srml/contract/src/wasm/prepare.rs index 52f1580aa9..79992ada56 100644 --- a/srml/contract/src/wasm/prepare.rs +++ b/srml/contract/src/wasm/prepare.rs @@ -22,232 +22,230 @@ use crate::wasm::env_def::ImportSatisfyCheck; use crate::wasm::PrefabWasmModule; use crate::{Schedule, Trait}; -use parity_wasm::elements::{self, Internal, External, MemoryType, Type}; +use parity_wasm::elements::{self, External, Internal, MemoryType, Type}; use pwasm_utils; use pwasm_utils::rules; use rstd::prelude::*; use runtime_primitives::traits::As; struct ContractModule<'a, Gas: 'a> { - // An `Option` is used here for loaning (`take()`-ing) the module. - // Invariant: Can't be `None` (i.e. on enter and on exit from the function - // the value *must* be `Some`). - module: Option, - schedule: &'a Schedule, + // An `Option` is used here for loaning (`take()`-ing) the module. + // Invariant: Can't be `None` (i.e. on enter and on exit from the function + // the value *must* be `Some`). + module: Option, + schedule: &'a Schedule, } impl<'a, Gas: 'a + As + Clone> ContractModule<'a, Gas> { - fn new( - original_code: &[u8], - schedule: &'a Schedule, - ) -> Result, &'static str> { - let module = - elements::deserialize_buffer(original_code).map_err(|_| "can't decode wasm code")?; - Ok(ContractModule { - module: Some(module), - schedule, - }) - } - - /// Ensures that module doesn't declare internal memories. - /// - /// In this runtime we only allow wasm module to import memory from the environment. - /// Memory section contains declarations of internal linear memories, so if we find one - /// we reject such a module. - fn ensure_no_internal_memory(&self) -> Result<(), &'static str> { - let module = self - .module - .as_ref() - .expect("On entry to the function `module` can't be None; qed"); - if module - .memory_section() - .map_or(false, |ms| ms.entries().len() > 0) - { - return Err("module declares internal memory"); - } - Ok(()) - } - - fn inject_gas_metering(&mut self) -> Result<(), &'static str> { - let gas_rules = - rules::Set::new( - self.schedule.regular_op_cost.clone().as_(), - Default::default(), - ) - .with_grow_cost(self.schedule.grow_mem_cost.clone().as_()) - .with_forbidden_floats(); - - let module = self - .module - .take() - .expect("On entry to the function `module` can't be `None`; qed"); - - let contract_module = pwasm_utils::inject_gas_counter(module, &gas_rules) - .map_err(|_| "gas instrumentation failed")?; - - self.module = Some(contract_module); - Ok(()) - } - - fn inject_stack_height_metering(&mut self) -> Result<(), &'static str> { - let module = self - .module - .take() - .expect("On entry to the function `module` can't be `None`; qed"); - - let contract_module = - pwasm_utils::stack_height::inject_limiter(module, self.schedule.max_stack_height) - .map_err(|_| "stack height instrumentation failed")?; - - self.module = Some(contract_module); - Ok(()) - } - - /// Check that the module has required exported functions. For now - /// these are just entrypoints: - /// - /// - 'call' - /// - 'deploy' - fn scan_exports(&self) -> Result<(), &'static str> { - let mut deploy_found = false; - let mut call_found = false; - - let module = self - .module - .as_ref() - .expect("On entry to the function `module` can't be `None`; qed"); - - let types = module.type_section().map(|ts| ts.types()).unwrap_or(&[]); - let export_entries = module - .export_section() - .map(|is| is.entries()) - .unwrap_or(&[]); - let func_entries = module - .function_section() - .map(|fs| fs.entries()) - .unwrap_or(&[]); - - // Function index space consists of imported function following by - // declared functions. Calculate the total number of imported functions so - // we can use it to convert indexes from function space to declared function space. - let fn_space_offset = module - .import_section() - .map(|is| is.entries()) - .unwrap_or(&[]) - .iter() - .filter(|entry| { - match *entry.external() { - External::Function(_) => true, - _ => false, - } - }) - .count(); - - for export in export_entries { - match export.field() { - "call" => call_found = true, - "deploy" => deploy_found = true, - _ => continue, - } - - // Then check the export kind. "call" and "deploy" are - // functions. - let fn_idx = match export.internal() { - Internal::Function(ref fn_idx) => *fn_idx, - _ => return Err("expected a function"), - }; - - // convert index from function index space to declared index space. - let fn_idx = match fn_idx.checked_sub(fn_space_offset as u32) { - Some(fn_idx) => fn_idx, - None => { - // Underflow here means fn_idx points to imported function which we don't allow! - return Err("entry point points to an imported function"); - } - }; - - // Then check the signature. - // Both "call" and "deploy" has a () -> () function type. - let func_ty_idx = func_entries.get(fn_idx as usize) - .ok_or_else(|| "export refers to non-existent function")? - .type_ref(); - let Type::Function(ref func_ty) = types - .get(func_ty_idx as usize) - .ok_or_else(|| "function has a non-existent type")?; - if !(func_ty.params().is_empty() && func_ty.return_type().is_none()) { - return Err("entry point has wrong signature"); - } - } - - if !deploy_found { - return Err("deploy function isn't exported"); - } - if !call_found { - return Err("call function isn't exported"); - } - - Ok(()) - } - - /// Scan an import section if any. - /// - /// This accomplishes two tasks: - /// - /// - checks any imported function against defined host functions set, incl. - /// their signatures. - /// - if there is a memory import, returns it's descriptor - fn scan_imports(&self) -> Result, &'static str> { - let module = self - .module - .as_ref() - .expect("On entry to the function `module` can't be `None`; qed"); - - let types = module.type_section().map(|ts| ts.types()).unwrap_or(&[]); - let import_entries = module - .import_section() - .map(|is| is.entries()) - .unwrap_or(&[]); - - let mut imported_mem_type = None; - - for import in import_entries { - if import.module() != "env" { - // This import tries to import something from non-"env" module, - // but all imports are located in "env" at the moment. - return Err("module has imports from a non-'env' namespace"); - } - - let type_idx = match import.external() { - &External::Function(ref type_idx) => type_idx, - &External::Memory(ref memory_type) => { - imported_mem_type = Some(memory_type); - continue; - } - _ => continue, - }; - - let Type::Function(ref func_ty) = types - .get(*type_idx as usize) - .ok_or_else(|| "validation: import entry points to a non-existent type")?; - - // We disallow importing `gas` function here since it is treated as implementation detail. - if import.field().as_bytes() == b"gas" - || !C::can_satisfy(import.field().as_bytes(), func_ty) - { - return Err("module imports a non-existent function"); - } - } - Ok(imported_mem_type) - } - - fn into_wasm_code(mut self) -> Result, &'static str> { - elements::serialize( - self.module - .take() - .expect("On entry to the function `module` can't be `None`; qed"), - ) - .map_err(|_| "error serializing instrumented module") - } + fn new( + original_code: &[u8], + schedule: &'a Schedule, + ) -> Result, &'static str> { + let module = + elements::deserialize_buffer(original_code).map_err(|_| "can't decode wasm code")?; + Ok(ContractModule { + module: Some(module), + schedule, + }) + } + + /// Ensures that module doesn't declare internal memories. + /// + /// In this runtime we only allow wasm module to import memory from the environment. + /// Memory section contains declarations of internal linear memories, so if we find one + /// we reject such a module. + fn ensure_no_internal_memory(&self) -> Result<(), &'static str> { + let module = self + .module + .as_ref() + .expect("On entry to the function `module` can't be None; qed"); + if module + .memory_section() + .map_or(false, |ms| ms.entries().len() > 0) + { + return Err("module declares internal memory"); + } + Ok(()) + } + + fn inject_gas_metering(&mut self) -> Result<(), &'static str> { + let gas_rules = rules::Set::new( + self.schedule.regular_op_cost.clone().as_(), + Default::default(), + ) + .with_grow_cost(self.schedule.grow_mem_cost.clone().as_()) + .with_forbidden_floats(); + + let module = self + .module + .take() + .expect("On entry to the function `module` can't be `None`; qed"); + + let contract_module = pwasm_utils::inject_gas_counter(module, &gas_rules) + .map_err(|_| "gas instrumentation failed")?; + + self.module = Some(contract_module); + Ok(()) + } + + fn inject_stack_height_metering(&mut self) -> Result<(), &'static str> { + let module = self + .module + .take() + .expect("On entry to the function `module` can't be `None`; qed"); + + let contract_module = + pwasm_utils::stack_height::inject_limiter(module, self.schedule.max_stack_height) + .map_err(|_| "stack height instrumentation failed")?; + + self.module = Some(contract_module); + Ok(()) + } + + /// Check that the module has required exported functions. For now + /// these are just entrypoints: + /// + /// - 'call' + /// - 'deploy' + fn scan_exports(&self) -> Result<(), &'static str> { + let mut deploy_found = false; + let mut call_found = false; + + let module = self + .module + .as_ref() + .expect("On entry to the function `module` can't be `None`; qed"); + + let types = module.type_section().map(|ts| ts.types()).unwrap_or(&[]); + let export_entries = module + .export_section() + .map(|is| is.entries()) + .unwrap_or(&[]); + let func_entries = module + .function_section() + .map(|fs| fs.entries()) + .unwrap_or(&[]); + + // Function index space consists of imported function following by + // declared functions. Calculate the total number of imported functions so + // we can use it to convert indexes from function space to declared function space. + let fn_space_offset = module + .import_section() + .map(|is| is.entries()) + .unwrap_or(&[]) + .iter() + .filter(|entry| match *entry.external() { + External::Function(_) => true, + _ => false, + }) + .count(); + + for export in export_entries { + match export.field() { + "call" => call_found = true, + "deploy" => deploy_found = true, + _ => continue, + } + + // Then check the export kind. "call" and "deploy" are + // functions. + let fn_idx = match export.internal() { + Internal::Function(ref fn_idx) => *fn_idx, + _ => return Err("expected a function"), + }; + + // convert index from function index space to declared index space. + let fn_idx = match fn_idx.checked_sub(fn_space_offset as u32) { + Some(fn_idx) => fn_idx, + None => { + // Underflow here means fn_idx points to imported function which we don't allow! + return Err("entry point points to an imported function"); + } + }; + + // Then check the signature. + // Both "call" and "deploy" has a () -> () function type. + let func_ty_idx = func_entries + .get(fn_idx as usize) + .ok_or_else(|| "export refers to non-existent function")? + .type_ref(); + let Type::Function(ref func_ty) = types + .get(func_ty_idx as usize) + .ok_or_else(|| "function has a non-existent type")?; + if !(func_ty.params().is_empty() && func_ty.return_type().is_none()) { + return Err("entry point has wrong signature"); + } + } + + if !deploy_found { + return Err("deploy function isn't exported"); + } + if !call_found { + return Err("call function isn't exported"); + } + + Ok(()) + } + + /// Scan an import section if any. + /// + /// This accomplishes two tasks: + /// + /// - checks any imported function against defined host functions set, incl. + /// their signatures. + /// - if there is a memory import, returns it's descriptor + fn scan_imports(&self) -> Result, &'static str> { + let module = self + .module + .as_ref() + .expect("On entry to the function `module` can't be `None`; qed"); + + let types = module.type_section().map(|ts| ts.types()).unwrap_or(&[]); + let import_entries = module + .import_section() + .map(|is| is.entries()) + .unwrap_or(&[]); + + let mut imported_mem_type = None; + + for import in import_entries { + if import.module() != "env" { + // This import tries to import something from non-"env" module, + // but all imports are located in "env" at the moment. + return Err("module has imports from a non-'env' namespace"); + } + + let type_idx = match import.external() { + &External::Function(ref type_idx) => type_idx, + &External::Memory(ref memory_type) => { + imported_mem_type = Some(memory_type); + continue; + } + _ => continue, + }; + + let Type::Function(ref func_ty) = types + .get(*type_idx as usize) + .ok_or_else(|| "validation: import entry points to a non-existent type")?; + + // We disallow importing `gas` function here since it is treated as implementation detail. + if import.field().as_bytes() == b"gas" + || !C::can_satisfy(import.field().as_bytes(), func_ty) + { + return Err("module imports a non-existent function"); + } + } + Ok(imported_mem_type) + } + + fn into_wasm_code(mut self) -> Result, &'static str> { + elements::serialize( + self.module + .take() + .expect("On entry to the function `module` can't be `None`; qed"), + ) + .map_err(|_| "error serializing instrumented module") + } } /// Loads the given module given in `original_code`, performs some checks on it and @@ -261,77 +259,77 @@ impl<'a, Gas: 'a + As + Clone> ContractModule<'a, Gas> { /// /// The preprocessing includes injecting code for gas metering and metering the height of stack. pub fn prepare_contract( - original_code: &[u8], - schedule: &Schedule, + original_code: &[u8], + schedule: &Schedule, ) -> Result { - let mut contract_module = ContractModule::new(original_code, schedule)?; - contract_module.scan_exports()?; - contract_module.ensure_no_internal_memory()?; - - struct MemoryDefinition { - initial: u32, - maximum: u32, - } - - let memory_def = if let Some(memory_type) = contract_module.scan_imports::()? { - // Inspect the module to extract the initial and maximum page count. - let limits = memory_type.limits(); - match (limits.initial(), limits.maximum()) { - (initial, Some(maximum)) if initial > maximum => { - return Err( - "Requested initial number of pages should not exceed the requested maximum", - ); - } - (_, Some(maximum)) if maximum > schedule.max_memory_pages => { - return Err("Maximum number of pages should not exceed the configured maximum."); - } - (initial, Some(maximum)) => MemoryDefinition { initial, maximum }, - (_, None) => { - // Maximum number of pages should be always declared. - // This isn't a hard requirement and can be treated as a maxiumum set - // to configured maximum. - return Err("Maximum number of pages should be always declared."); - } - } - } else { - // If none memory imported then just crate an empty placeholder. - // Any access to it will lead to out of bounds trap. - MemoryDefinition { - initial: 0, - maximum: 0, - } - }; - - contract_module.inject_gas_metering()?; - contract_module.inject_stack_height_metering()?; - - Ok(PrefabWasmModule { - schedule_version: schedule.version, - initial: memory_def.initial, - maximum: memory_def.maximum, - _reserved: None, - code: contract_module.into_wasm_code()?, - }) + let mut contract_module = ContractModule::new(original_code, schedule)?; + contract_module.scan_exports()?; + contract_module.ensure_no_internal_memory()?; + + struct MemoryDefinition { + initial: u32, + maximum: u32, + } + + let memory_def = if let Some(memory_type) = contract_module.scan_imports::()? { + // Inspect the module to extract the initial and maximum page count. + let limits = memory_type.limits(); + match (limits.initial(), limits.maximum()) { + (initial, Some(maximum)) if initial > maximum => { + return Err( + "Requested initial number of pages should not exceed the requested maximum", + ); + } + (_, Some(maximum)) if maximum > schedule.max_memory_pages => { + return Err("Maximum number of pages should not exceed the configured maximum."); + } + (initial, Some(maximum)) => MemoryDefinition { initial, maximum }, + (_, None) => { + // Maximum number of pages should be always declared. + // This isn't a hard requirement and can be treated as a maxiumum set + // to configured maximum. + return Err("Maximum number of pages should be always declared."); + } + } + } else { + // If none memory imported then just crate an empty placeholder. + // Any access to it will lead to out of bounds trap. + MemoryDefinition { + initial: 0, + maximum: 0, + } + }; + + contract_module.inject_gas_metering()?; + contract_module.inject_stack_height_metering()?; + + Ok(PrefabWasmModule { + schedule_version: schedule.version, + initial: memory_def.initial, + maximum: memory_def.maximum, + _reserved: None, + code: contract_module.into_wasm_code()?, + }) } #[cfg(test)] mod tests { - use super::*; - use crate::tests::Test; - use crate::exec::Ext; - use std::fmt; - use wabt; - use assert_matches::assert_matches; - - impl fmt::Debug for PrefabWasmModule { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "PreparedContract {{ .. }}") - } - } - - // Define test environment for tests. We need ImportSatisfyCheck - // implementation from it. So actual implementations doesn't matter. - define_env!(TestEnv, , + use super::*; + use crate::exec::Ext; + use crate::tests::Test; + use assert_matches::assert_matches; + use std::fmt; + use wabt; + + impl fmt::Debug for PrefabWasmModule { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "PreparedContract {{ .. }}") + } + } + + // Define test environment for tests. We need ImportSatisfyCheck + // implementation from it. So actual implementations doesn't matter. + define_env!(TestEnv, , panic(_ctx) => { unreachable!(); }, // gas is an implementation defined function and a contract can't import it. @@ -340,7 +338,7 @@ mod tests { nop(_ctx, _unused: u64) => { unreachable!(); }, ); - macro_rules! prepare_test { + macro_rules! prepare_test { ($name:ident, $wat:expr, $($expected:tt)*) => { #[test] fn $name() { @@ -352,8 +350,9 @@ mod tests { }; } - prepare_test!(no_floats, - r#" + prepare_test!( + no_floats, + r#" (module (func (export "call") (drop @@ -365,20 +364,21 @@ mod tests { ) (func (export "deploy")) )"#, - Err("gas instrumentation failed") - ); + Err("gas instrumentation failed") + ); - mod memories { - use super::*; + mod memories { + use super::*; - // Tests below assumes that maximum page number is configured to a certain number. - #[test] - fn assume_memory_size() { - assert_eq!(Schedule::::default().max_memory_pages, 16); - } + // Tests below assumes that maximum page number is configured to a certain number. + #[test] + fn assume_memory_size() { + assert_eq!(Schedule::::default().max_memory_pages, 16); + } - prepare_test!(memory_with_one_page, - r#" + prepare_test!( + memory_with_one_page, + r#" (module (import "env" "memory" (memory 1 1)) @@ -386,11 +386,12 @@ mod tests { (func (export "deploy")) ) "#, - Ok(_) - ); + Ok(_) + ); - prepare_test!(internal_memory_declaration, - r#" + prepare_test!( + internal_memory_declaration, + r#" (module (memory 1 1) @@ -398,22 +399,24 @@ mod tests { (func (export "deploy")) ) "#, - Err("module declares internal memory") - ); + Err("module declares internal memory") + ); - prepare_test!(no_memory_import, - r#" + prepare_test!( + no_memory_import, + r#" (module ;; no memory imported (func (export "call")) (func (export "deploy")) )"#, - Ok(_) - ); + Ok(_) + ); - prepare_test!(initial_exceeds_maximum, - r#" + prepare_test!( + initial_exceeds_maximum, + r#" (module (import "env" "memory" (memory 16 1)) @@ -421,11 +424,12 @@ mod tests { (func (export "deploy")) ) "#, - Err("Requested initial number of pages should not exceed the requested maximum") - ); + Err("Requested initial number of pages should not exceed the requested maximum") + ); - prepare_test!(no_maximum, - r#" + prepare_test!( + no_maximum, + r#" (module (import "env" "memory" (memory 1)) @@ -433,11 +437,12 @@ mod tests { (func (export "deploy")) ) "#, - Err("Maximum number of pages should be always declared.") - ); + Err("Maximum number of pages should be always declared.") + ); - prepare_test!(requested_maximum_exceeds_configured_maximum, - r#" + prepare_test!( + requested_maximum_exceeds_configured_maximum, + r#" (module (import "env" "memory" (memory 1 17)) @@ -445,15 +450,16 @@ mod tests { (func (export "deploy")) ) "#, - Err("Maximum number of pages should not exceed the configured maximum.") - ); - } + Err("Maximum number of pages should not exceed the configured maximum.") + ); + } - mod imports { - use super::*; + mod imports { + use super::*; - prepare_test!(can_import_legit_function, - r#" + prepare_test!( + can_import_legit_function, + r#" (module (import "env" "nop" (func (param i64))) @@ -461,13 +467,14 @@ mod tests { (func (export "deploy")) ) "#, - Ok(_) - ); - - // even though gas is defined the contract can't import it since - // it is an implementation defined. - prepare_test!(can_not_import_gas_function, - r#" + Ok(_) + ); + + // even though gas is defined the contract can't import it since + // it is an implementation defined. + prepare_test!( + can_not_import_gas_function, + r#" (module (import "env" "gas" (func (param i32))) @@ -475,12 +482,13 @@ mod tests { (func (export "deploy")) ) "#, - Err("module imports a non-existent function") - ); + Err("module imports a non-existent function") + ); - // nothing can be imported from non-"env" module for now. - prepare_test!(non_env_import, - r#" + // nothing can be imported from non-"env" module for now. + prepare_test!( + non_env_import, + r#" (module (import "another_module" "memory" (memory 1 1)) @@ -488,12 +496,13 @@ mod tests { (func (export "deploy")) ) "#, - Err("module has imports from a non-'env' namespace") - ); + Err("module has imports from a non-'env' namespace") + ); - // wrong signature - prepare_test!(wrong_signature, - r#" + // wrong signature + prepare_test!( + wrong_signature, + r#" (module (import "env" "gas" (func (param i64))) @@ -501,11 +510,12 @@ mod tests { (func (export "deploy")) ) "#, - Err("module imports a non-existent function") - ); + Err("module imports a non-existent function") + ); - prepare_test!(unknown_func_name, - r#" + prepare_test!( + unknown_func_name, + r#" (module (import "env" "unknown_func" (func)) @@ -513,44 +523,48 @@ mod tests { (func (export "deploy")) ) "#, - Err("module imports a non-existent function") - ); - } + Err("module imports a non-existent function") + ); + } - mod entrypoints { - use super::*; + mod entrypoints { + use super::*; - prepare_test!(it_works, - r#" + prepare_test!( + it_works, + r#" (module (func (export "call")) (func (export "deploy")) ) "#, - Ok(_) - ); + Ok(_) + ); - prepare_test!(omit_deploy, - r#" + prepare_test!( + omit_deploy, + r#" (module (func (export "call")) ) "#, - Err("deploy function isn't exported") - ); + Err("deploy function isn't exported") + ); - prepare_test!(omit_call, - r#" + prepare_test!( + omit_call, + r#" (module (func (export "deploy")) ) "#, - Err("call function isn't exported") - ); + Err("call function isn't exported") + ); - // Try to use imported function as an entry point. - prepare_test!(try_sneak_export_as_entrypoint, - r#" + // Try to use imported function as an entry point. + prepare_test!( + try_sneak_export_as_entrypoint, + r#" (module (import "env" "panic" (func)) @@ -559,28 +573,30 @@ mod tests { (export "call" (func 0)) ) "#, - Err("entry point points to an imported function") - ); + Err("entry point points to an imported function") + ); - // Try to use imported function as an entry point. - prepare_test!(try_sneak_export_as_global, - r#" + // Try to use imported function as an entry point. + prepare_test!( + try_sneak_export_as_global, + r#" (module (func (export "deploy")) (global (export "call") i32 (i32.const 0)) ) "#, - Err("expected a function") - ); + Err("expected a function") + ); - prepare_test!(wrong_signature, - r#" + prepare_test!( + wrong_signature, + r#" (module (func (export "deploy")) (func (export "call") (param i32)) ) "#, - Err("entry point has wrong signature") - ); - } + Err("entry point has wrong signature") + ); + } } diff --git a/srml/contract/src/wasm/runtime.rs b/srml/contract/src/wasm/runtime.rs index b4a963f931..577bd3f4d3 100644 --- a/srml/contract/src/wasm/runtime.rs +++ b/srml/contract/src/wasm/runtime.rs @@ -16,135 +16,137 @@ //! Environment definition of the wasm smart-contract runtime. -use crate::{Schedule, Trait, CodeHash, ComputeDispatchFee, BalanceOf}; -use crate::exec::{Ext, VmExecResult, OutputBuf, EmptyOutputBuf, CallReceipt, InstantiateReceipt}; -use crate::gas::{GasMeter, Token, GasMeterResult, approx_gas_for_balance}; +use crate::exec::{CallReceipt, EmptyOutputBuf, Ext, InstantiateReceipt, OutputBuf, VmExecResult}; +use crate::gas::{approx_gas_for_balance, GasMeter, GasMeterResult, Token}; +use crate::{BalanceOf, CodeHash, ComputeDispatchFee, Schedule, Trait}; +use parity_codec::{Decode, Encode}; +use rstd::mem; +use rstd::prelude::*; +use runtime_primitives::traits::{As, Bounded, CheckedMul}; use sandbox; use system; -use rstd::prelude::*; -use rstd::mem; -use parity_codec::{Decode, Encode}; -use runtime_primitives::traits::{As, CheckedMul, Bounded}; /// Enumerates all possible *special* trap conditions. /// /// In this runtime traps used not only for signaling about errors but also /// to just terminate quickly in some cases. enum SpecialTrap { - /// Signals that trap was generated in response to call `ext_return` host function. - Return(OutputBuf), + /// Signals that trap was generated in response to call `ext_return` host function. + Return(OutputBuf), } /// Can only be used for one call. pub(crate) struct Runtime<'a, 'data, E: Ext + 'a> { - ext: &'a mut E, - input_data: &'data [u8], - // A VM can return a result only once and only by value. So - // we wrap output buffer to make it possible to take the buffer out. - empty_output_buf: Option, - scratch_buf: Vec, - schedule: &'a Schedule<::Gas>, - memory: sandbox::Memory, - gas_meter: &'a mut GasMeter, - special_trap: Option, + ext: &'a mut E, + input_data: &'data [u8], + // A VM can return a result only once and only by value. So + // we wrap output buffer to make it possible to take the buffer out. + empty_output_buf: Option, + scratch_buf: Vec, + schedule: &'a Schedule<::Gas>, + memory: sandbox::Memory, + gas_meter: &'a mut GasMeter, + special_trap: Option, } impl<'a, 'data, E: Ext + 'a> Runtime<'a, 'data, E> { - pub(crate) fn new( - ext: &'a mut E, - input_data: &'data [u8], - empty_output_buf: EmptyOutputBuf, - schedule: &'a Schedule<::Gas>, - memory: sandbox::Memory, - gas_meter: &'a mut GasMeter, - ) -> Self { - Runtime { - ext, - input_data, - empty_output_buf: Some(empty_output_buf), - scratch_buf: Vec::new(), - schedule, - memory, - gas_meter, - special_trap: None, - } - } - - fn memory(&self) -> &sandbox::Memory { - &self.memory - } + pub(crate) fn new( + ext: &'a mut E, + input_data: &'data [u8], + empty_output_buf: EmptyOutputBuf, + schedule: &'a Schedule<::Gas>, + memory: sandbox::Memory, + gas_meter: &'a mut GasMeter, + ) -> Self { + Runtime { + ext, + input_data, + empty_output_buf: Some(empty_output_buf), + scratch_buf: Vec::new(), + schedule, + memory, + gas_meter, + special_trap: None, + } + } + + fn memory(&self) -> &sandbox::Memory { + &self.memory + } } pub(crate) fn to_execution_result( - runtime: Runtime, - sandbox_err: Option, + runtime: Runtime, + sandbox_err: Option, ) -> VmExecResult { - // Check the exact type of the error. It could be plain trap or - // special runtime trap the we must recognize. - match (sandbox_err, runtime.special_trap) { - // No traps were generated. Proceed normally. - (None, None) => VmExecResult::Ok, - // Special case. The trap was the result of the execution `return` host function. - (Some(sandbox::Error::Execution), Some(SpecialTrap::Return(buf))) => VmExecResult::Returned(buf), - // Any other kind of a trap should result in a failure. - (Some(_), _) => VmExecResult::Trap("during execution"), - // Any other case (such as special trap flag without actual trap) signifies - // a logic error. - _ => unreachable!(), - } + // Check the exact type of the error. It could be plain trap or + // special runtime trap the we must recognize. + match (sandbox_err, runtime.special_trap) { + // No traps were generated. Proceed normally. + (None, None) => VmExecResult::Ok, + // Special case. The trap was the result of the execution `return` host function. + (Some(sandbox::Error::Execution), Some(SpecialTrap::Return(buf))) => { + VmExecResult::Returned(buf) + } + // Any other kind of a trap should result in a failure. + (Some(_), _) => VmExecResult::Trap("during execution"), + // Any other case (such as special trap flag without actual trap) signifies + // a logic error. + _ => unreachable!(), + } } #[cfg_attr(test, derive(Debug, PartialEq, Eq))] #[derive(Copy, Clone)] pub enum RuntimeToken { - /// Explicit call to the `gas` function. Charge the gas meter - /// with the value provided. - Explicit(u32), - /// The given number of bytes is read from the sandbox memory. - ReadMemory(u32), - /// The given number of bytes is written to the sandbox memory. - WriteMemory(u32), - /// The given number of bytes is read from the sandbox memory and - /// is returned as the return data buffer of the call. - ReturnData(u32), - /// Dispatch fee calculated by `T::ComputeDispatchFee`. - ComputedDispatchFee(Gas), + /// Explicit call to the `gas` function. Charge the gas meter + /// with the value provided. + Explicit(u32), + /// The given number of bytes is read from the sandbox memory. + ReadMemory(u32), + /// The given number of bytes is written to the sandbox memory. + WriteMemory(u32), + /// The given number of bytes is read from the sandbox memory and + /// is returned as the return data buffer of the call. + ReturnData(u32), + /// Dispatch fee calculated by `T::ComputeDispatchFee`. + ComputedDispatchFee(Gas), } impl Token for RuntimeToken { - type Metadata = Schedule; - - fn calculate_amount(&self, metadata: &Schedule) -> T::Gas { - use self::RuntimeToken::*; - let value = match *self { - Explicit(amount) => Some(>::sa(amount)), - ReadMemory(byte_count) => metadata - .sandbox_data_read_cost - .checked_mul(&>::sa(byte_count)), - WriteMemory(byte_count) => metadata - .sandbox_data_write_cost - .checked_mul(&>::sa(byte_count)), - ReturnData(byte_count) => metadata - .return_data_per_byte_cost - .checked_mul(&>::sa(byte_count)), - ComputedDispatchFee(gas) => Some(gas), - }; - - value.unwrap_or_else(|| Bounded::max_value()) - } + type Metadata = Schedule; + + fn calculate_amount(&self, metadata: &Schedule) -> T::Gas { + use self::RuntimeToken::*; + let value = match *self { + Explicit(amount) => Some(>::sa(amount)), + ReadMemory(byte_count) => metadata + .sandbox_data_read_cost + .checked_mul(&>::sa(byte_count)), + WriteMemory(byte_count) => metadata + .sandbox_data_write_cost + .checked_mul(&>::sa(byte_count)), + ReturnData(byte_count) => metadata + .return_data_per_byte_cost + .checked_mul(&>::sa(byte_count)), + ComputedDispatchFee(gas) => Some(gas), + }; + + value.unwrap_or_else(|| Bounded::max_value()) + } } /// Charge the gas meter with the specified token. /// /// Returns `Err(HostError)` if there is not enough gas. fn charge_gas>( - gas_meter: &mut GasMeter, - metadata: &Tok::Metadata, - token: Tok, + gas_meter: &mut GasMeter, + metadata: &Tok::Metadata, + token: Tok, ) -> Result<(), sandbox::HostError> { - match gas_meter.charge(metadata, token) { - GasMeterResult::Proceed => Ok(()), - GasMeterResult::OutOfGas => Err(sandbox::HostError), - } + match gas_meter.charge(metadata, token) { + GasMeterResult::Proceed => Ok(()), + GasMeterResult::OutOfGas => Err(sandbox::HostError), + } } /// Read designated chunk from the sandbox memory, consuming an appropriate amount of @@ -156,18 +158,18 @@ fn charge_gas>( /// - out of gas /// - requested buffer is not within the bounds of the sandbox memory. fn read_sandbox_memory( - ctx: &mut Runtime, - ptr: u32, - len: u32, + ctx: &mut Runtime, + ptr: u32, + len: u32, ) -> Result, sandbox::HostError> { - charge_gas(ctx.gas_meter, ctx.schedule, RuntimeToken::ReadMemory(len))?; + charge_gas(ctx.gas_meter, ctx.schedule, RuntimeToken::ReadMemory(len))?; - let mut buf = Vec::new(); - buf.resize(len as usize, 0); + let mut buf = Vec::new(); + buf.resize(len as usize, 0); - ctx.memory().get(ptr, &mut buf)?; + ctx.memory().get(ptr, &mut buf)?; - Ok(buf) + Ok(buf) } /// Write the given buffer to the designated location in the sandbox memory, consuming @@ -179,17 +181,21 @@ fn read_sandbox_memory( /// - out of gas /// - designated area is not within the bounds of the sandbox memory. fn write_sandbox_memory( - schedule: &Schedule, - gas_meter: &mut GasMeter, - memory: &sandbox::Memory, - ptr: u32, - buf: &[u8], + schedule: &Schedule, + gas_meter: &mut GasMeter, + memory: &sandbox::Memory, + ptr: u32, + buf: &[u8], ) -> Result<(), sandbox::HostError> { - charge_gas(gas_meter, schedule, RuntimeToken::WriteMemory(buf.len() as u32))?; + charge_gas( + gas_meter, + schedule, + RuntimeToken::WriteMemory(buf.len() as u32), + )?; - memory.set(ptr, buf)?; + memory.set(ptr, buf)?; - Ok(()) + Ok(()) } // *********************************************************** diff --git a/srml/council/src/lib.rs b/srml/council/src/lib.rs index a13eb7e280..5a0a89158a 100644 --- a/srml/council/src/lib.rs +++ b/srml/council/src/lib.rs @@ -18,134 +18,157 @@ #![cfg_attr(not(feature = "std"), no_std)] -pub mod voting; pub mod motions; pub mod seats; +pub mod voting; -pub use crate::seats::{Trait, Module, RawEvent, Event, VoteIndex}; +pub use crate::seats::{Event, Module, RawEvent, Trait, VoteIndex}; #[cfg(test)] mod tests { - // These re-exports are here for a reason, edit with care - pub use super::*; - pub use runtime_io::with_externalities; - use srml_support::{impl_outer_origin, impl_outer_event, impl_outer_dispatch}; - pub use substrate_primitives::H256; - pub use primitives::BuildStorage; - pub use primitives::traits::{BlakeTwo256, IdentityLookup}; - pub use primitives::testing::{Digest, DigestItem, Header}; - pub use substrate_primitives::{Blake2Hasher}; - pub use {seats, motions, voting}; + // These re-exports are here for a reason, edit with care + pub use super::*; + pub use primitives::testing::{Digest, DigestItem, Header}; + pub use primitives::traits::{BlakeTwo256, IdentityLookup}; + pub use primitives::BuildStorage; + pub use runtime_io::with_externalities; + use srml_support::{impl_outer_dispatch, impl_outer_event, impl_outer_origin}; + pub use substrate_primitives::Blake2Hasher; + pub use substrate_primitives::H256; + pub use {motions, seats, voting}; - impl_outer_origin! { - pub enum Origin for Test { - motions - } - } + impl_outer_origin! { + pub enum Origin for Test { + motions + } + } - impl_outer_event! { - pub enum Event for Test { - balances, democracy, seats, voting, motions, - } - } + impl_outer_event! { + pub enum Event for Test { + balances, democracy, seats, voting, motions, + } + } - impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - balances::Balances, - democracy::Democracy, - } - } + impl_outer_dispatch! { + pub enum Call for Test where origin: Origin { + balances::Balances, + democracy::Democracy, + } + } - // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. - #[derive(Clone, Eq, PartialEq, Debug)] - pub struct Test; - impl system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type Digest = Digest; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = Event; - type Log = DigestItem; - } - impl balances::Trait for Test { - type Balance = u64; - type OnFreeBalanceZero = (); - type OnNewAccount = (); - type Event = Event; - type TransactionPayment = (); - type TransferPayment = (); - type DustRemoval = (); - } - impl democracy::Trait for Test { - type Currency = balances::Module; - type Proposal = Call; - type Event = Event; - } - impl seats::Trait for Test { - type Event = Event; - type BadPresentation = (); - type BadReaper = (); - } - impl motions::Trait for Test { - type Origin = Origin; - type Proposal = Call; - type Event = Event; - } - impl voting::Trait for Test { - type Event = Event; - } + // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. + #[derive(Clone, Eq, PartialEq, Debug)] + pub struct Test; + impl system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type Digest = Digest; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type Log = DigestItem; + } + impl balances::Trait for Test { + type Balance = u64; + type OnFreeBalanceZero = (); + type OnNewAccount = (); + type Event = Event; + type TransactionPayment = (); + type TransferPayment = (); + type DustRemoval = (); + } + impl democracy::Trait for Test { + type Currency = balances::Module; + type Proposal = Call; + type Event = Event; + } + impl seats::Trait for Test { + type Event = Event; + type BadPresentation = (); + type BadReaper = (); + } + impl motions::Trait for Test { + type Origin = Origin; + type Proposal = Call; + type Event = Event; + } + impl voting::Trait for Test { + type Event = Event; + } - pub fn new_test_ext(with_council: bool) -> runtime_io::TestExternalities { - let mut t = system::GenesisConfig::::default().build_storage().unwrap().0; - t.extend(balances::GenesisConfig::{ - transaction_base_fee: 0, - transaction_byte_fee: 0, - balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], - existential_deposit: 0, - transfer_fee: 0, - creation_fee: 0, - vesting: vec![], - }.build_storage().unwrap().0); - t.extend(democracy::GenesisConfig::{ - launch_period: 1, - voting_period: 3, - minimum_deposit: 1, - public_delay: 0, - max_lock_periods: 6, - }.build_storage().unwrap().0); - t.extend(seats::GenesisConfig:: { - candidacy_bond: 9, - voter_bond: 3, - present_slash_per_voter: 1, - carry_count: 2, - inactive_grace_period: 1, - active_council: if with_council { vec![ - (1, 10), - (2, 10), - (3, 10) - ] } else { vec![] }, - approval_voting_period: 4, - presentation_duration: 2, - desired_seats: 2, - term_duration: 5, - }.build_storage().unwrap().0); - t.extend(voting::GenesisConfig:: { - cooloff_period: 2, - voting_period: 1, - enact_delay_period: 0, - }.build_storage().unwrap().0); - runtime_io::TestExternalities::new(t) - } + pub fn new_test_ext(with_council: bool) -> runtime_io::TestExternalities { + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0; + t.extend( + balances::GenesisConfig:: { + transaction_base_fee: 0, + transaction_byte_fee: 0, + balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], + existential_deposit: 0, + transfer_fee: 0, + creation_fee: 0, + vesting: vec![], + } + .build_storage() + .unwrap() + .0, + ); + t.extend( + democracy::GenesisConfig:: { + launch_period: 1, + voting_period: 3, + minimum_deposit: 1, + public_delay: 0, + max_lock_periods: 6, + } + .build_storage() + .unwrap() + .0, + ); + t.extend( + seats::GenesisConfig:: { + candidacy_bond: 9, + voter_bond: 3, + present_slash_per_voter: 1, + carry_count: 2, + inactive_grace_period: 1, + active_council: if with_council { + vec![(1, 10), (2, 10), (3, 10)] + } else { + vec![] + }, + approval_voting_period: 4, + presentation_duration: 2, + desired_seats: 2, + term_duration: 5, + } + .build_storage() + .unwrap() + .0, + ); + t.extend( + voting::GenesisConfig:: { + cooloff_period: 2, + voting_period: 1, + enact_delay_period: 0, + } + .build_storage() + .unwrap() + .0, + ); + runtime_io::TestExternalities::new(t) + } - pub type System = system::Module; - pub type Balances = balances::Module; - pub type Democracy = democracy::Module; - pub type Council = seats::Module; - pub type CouncilVoting = voting::Module; - pub type CouncilMotions = motions::Module; + pub type System = system::Module; + pub type Balances = balances::Module; + pub type Democracy = democracy::Module; + pub type Council = seats::Module; + pub type CouncilVoting = voting::Module; + pub type CouncilMotions = motions::Module; } diff --git a/srml/council/src/motions.rs b/srml/council/src/motions.rs index 3bbe463780..e72d3188c7 100644 --- a/srml/council/src/motions.rs +++ b/srml/council/src/motions.rs @@ -16,35 +16,35 @@ //! Council voting system. +use super::{Module as Council, Trait as CouncilTrait}; +use primitives::traits::{EnsureOrigin, Hash}; use rstd::prelude::*; use rstd::result; -use substrate_primitives::u32_trait::Value as U32; -use primitives::traits::{Hash, EnsureOrigin}; use srml_support::dispatch::{Dispatchable, Parameter}; -use srml_support::{StorageValue, StorageMap, decl_module, decl_event, decl_storage, ensure}; -use super::{Trait as CouncilTrait, Module as Council}; +use srml_support::{decl_event, decl_module, decl_storage, ensure, StorageMap, StorageValue}; +use substrate_primitives::u32_trait::Value as U32; use system::{self, ensure_signed}; /// Simple index type for proposal counting. pub type ProposalIndex = u32; pub trait Trait: CouncilTrait { - /// The outer origin type. - type Origin: From; + /// The outer origin type. + type Origin: From; - /// The outer call dispatch type. - type Proposal: Parameter + Dispatchable::Origin>; + /// The outer call dispatch type. + type Proposal: Parameter + Dispatchable::Origin>; - /// The outer event type. - type Event: From> + Into<::Event>; + /// The outer event type. + type Event: From> + Into<::Event>; } /// Origin for the council module. #[derive(PartialEq, Eq, Clone)] #[cfg_attr(feature = "std", derive(Debug))] pub enum Origin { - /// It has been condoned by a given number of council members. - Members(u32), + /// It has been condoned by a given number of council members. + Members(u32), } decl_event!( @@ -64,292 +64,444 @@ decl_event!( ); decl_module! { - pub struct Module for enum Call where origin: ::Origin { - fn deposit_event() = default; - fn propose(origin, #[compact] threshold: u32, proposal: Box<::Proposal>) { - let who = ensure_signed(origin)?; - - ensure!(Self::is_councillor(&who), "proposer not on council"); - - let proposal_hash = T::Hashing::hash_of(&proposal); - - ensure!(!>::exists(proposal_hash), "duplicate proposals not allowed"); - - if threshold < 2 { - let ok = proposal.dispatch(Origin::Members(1).into()).is_ok(); - Self::deposit_event(RawEvent::Executed(proposal_hash, ok)); - } else { - let index = Self::proposal_count(); - >::mutate(|i| *i += 1); - >::mutate(|proposals| proposals.push(proposal_hash)); - >::insert(proposal_hash, *proposal); - >::insert(proposal_hash, (index, threshold, vec![who.clone()], vec![])); - - Self::deposit_event(RawEvent::Proposed(who, index, proposal_hash, threshold)); - } - } - - fn vote(origin, proposal: T::Hash, #[compact] index: ProposalIndex, approve: bool) { - let who = ensure_signed(origin)?; - - ensure!(Self::is_councillor(&who), "voter not on council"); - - let mut voting = Self::voting(&proposal).ok_or("proposal must exist")?; - ensure!(voting.0 == index, "mismatched index"); - - let position_yes = voting.2.iter().position(|a| a == &who); - let position_no = voting.3.iter().position(|a| a == &who); - - if approve { - if position_yes.is_none() { - voting.2.push(who.clone()); - } else { - return Err("duplicate vote ignored") - } - if let Some(pos) = position_no { - voting.3.swap_remove(pos); - } - } else { - if position_no.is_none() { - voting.3.push(who.clone()); - } else { - return Err("duplicate vote ignored") - } - if let Some(pos) = position_yes { - voting.2.swap_remove(pos); - } - } - - let yes_votes = voting.2.len() as u32; - let no_votes = voting.3.len() as u32; - Self::deposit_event(RawEvent::Voted(who, proposal, approve, yes_votes, no_votes)); - - let threshold = voting.1; - let potential_votes = >::active_council().len() as u32; - let approved = yes_votes >= threshold; - let disapproved = potential_votes.saturating_sub(no_votes) < threshold; - if approved || disapproved { - if approved { - Self::deposit_event(RawEvent::Approved(proposal)); - - // execute motion, assuming it exists. - if let Some(p) = >::take(&proposal) { - let ok = p.dispatch(Origin::Members(threshold).into()).is_ok(); - Self::deposit_event(RawEvent::Executed(proposal, ok)); - } - } else { - // disapproved - Self::deposit_event(RawEvent::Disapproved(proposal)); - } - - // remove vote - >::remove(&proposal); - >::mutate(|proposals| proposals.retain(|h| h != &proposal)); - } else { - // update voting - >::insert(&proposal, voting); - } - } - } + pub struct Module for enum Call where origin: ::Origin { + fn deposit_event() = default; + fn propose(origin, #[compact] threshold: u32, proposal: Box<::Proposal>) { + let who = ensure_signed(origin)?; + + ensure!(Self::is_councillor(&who), "proposer not on council"); + + let proposal_hash = T::Hashing::hash_of(&proposal); + + ensure!(!>::exists(proposal_hash), "duplicate proposals not allowed"); + + if threshold < 2 { + let ok = proposal.dispatch(Origin::Members(1).into()).is_ok(); + Self::deposit_event(RawEvent::Executed(proposal_hash, ok)); + } else { + let index = Self::proposal_count(); + >::mutate(|i| *i += 1); + >::mutate(|proposals| proposals.push(proposal_hash)); + >::insert(proposal_hash, *proposal); + >::insert(proposal_hash, (index, threshold, vec![who.clone()], vec![])); + + Self::deposit_event(RawEvent::Proposed(who, index, proposal_hash, threshold)); + } + } + + fn vote(origin, proposal: T::Hash, #[compact] index: ProposalIndex, approve: bool) { + let who = ensure_signed(origin)?; + + ensure!(Self::is_councillor(&who), "voter not on council"); + + let mut voting = Self::voting(&proposal).ok_or("proposal must exist")?; + ensure!(voting.0 == index, "mismatched index"); + + let position_yes = voting.2.iter().position(|a| a == &who); + let position_no = voting.3.iter().position(|a| a == &who); + + if approve { + if position_yes.is_none() { + voting.2.push(who.clone()); + } else { + return Err("duplicate vote ignored") + } + if let Some(pos) = position_no { + voting.3.swap_remove(pos); + } + } else { + if position_no.is_none() { + voting.3.push(who.clone()); + } else { + return Err("duplicate vote ignored") + } + if let Some(pos) = position_yes { + voting.2.swap_remove(pos); + } + } + + let yes_votes = voting.2.len() as u32; + let no_votes = voting.3.len() as u32; + Self::deposit_event(RawEvent::Voted(who, proposal, approve, yes_votes, no_votes)); + + let threshold = voting.1; + let potential_votes = >::active_council().len() as u32; + let approved = yes_votes >= threshold; + let disapproved = potential_votes.saturating_sub(no_votes) < threshold; + if approved || disapproved { + if approved { + Self::deposit_event(RawEvent::Approved(proposal)); + + // execute motion, assuming it exists. + if let Some(p) = >::take(&proposal) { + let ok = p.dispatch(Origin::Members(threshold).into()).is_ok(); + Self::deposit_event(RawEvent::Executed(proposal, ok)); + } + } else { + // disapproved + Self::deposit_event(RawEvent::Disapproved(proposal)); + } + + // remove vote + >::remove(&proposal); + >::mutate(|proposals| proposals.retain(|h| h != &proposal)); + } else { + // update voting + >::insert(&proposal, voting); + } + } + } } decl_storage! { - trait Store for Module as CouncilMotions { - /// The (hashes of) the active proposals. - pub Proposals get(proposals): Vec; - /// Actual proposal for a given hash, if it's current. - pub ProposalOf get(proposal_of): map T::Hash => Option< ::Proposal >; - /// Votes for a given proposal: (required_yes_votes, yes_voters, no_voters). - pub Voting get(voting): map T::Hash => Option<(ProposalIndex, u32, Vec, Vec)>; - /// Proposals so far. - pub ProposalCount get(proposal_count): u32; - } - add_extra_genesis { - build(|_, _, _| {}); - } + trait Store for Module as CouncilMotions { + /// The (hashes of) the active proposals. + pub Proposals get(proposals): Vec; + /// Actual proposal for a given hash, if it's current. + pub ProposalOf get(proposal_of): map T::Hash => Option< ::Proposal >; + /// Votes for a given proposal: (required_yes_votes, yes_voters, no_voters). + pub Voting get(voting): map T::Hash => Option<(ProposalIndex, u32, Vec, Vec)>; + /// Proposals so far. + pub ProposalCount get(proposal_count): u32; + } + add_extra_genesis { + build(|_, _, _| {}); + } } impl Module { - pub fn is_councillor(who: &T::AccountId) -> bool { - >::active_council().iter() - .any(|&(ref a, _)| a == who) - } + pub fn is_councillor(who: &T::AccountId) -> bool { + >::active_council() + .iter() + .any(|&(ref a, _)| a == who) + } } /// Ensure that the origin `o` represents at least `n` council members. Returns /// `Ok` or an `Err` otherwise. -pub fn ensure_council_members(o: OuterOrigin, n: u32) -> result::Result - where OuterOrigin: Into> +pub fn ensure_council_members( + o: OuterOrigin, + n: u32, +) -> result::Result +where + OuterOrigin: Into>, { - match o.into() { - Some(Origin::Members(x)) if x >= n => Ok(n), - _ => Err("bad origin: expected to be a threshold number of council members"), - } + match o.into() { + Some(Origin::Members(x)) if x >= n => Ok(n), + _ => Err("bad origin: expected to be a threshold number of council members"), + } } pub struct EnsureMembers(::rstd::marker::PhantomData); impl EnsureOrigin for EnsureMembers - where O: Into> +where + O: Into>, { - type Success = u32; - fn ensure_origin(o: O) -> result::Result { - ensure_council_members(o, N::VALUE) - } + type Success = u32; + fn ensure_origin(o: O) -> result::Result { + ensure_council_members(o, N::VALUE) + } } #[cfg(test)] mod tests { - use super::*; - use super::RawEvent; - use crate::tests::*; - use crate::tests::{Call, Origin, Event as OuterEvent}; - use srml_support::{Hashable, assert_ok, assert_noop}; - use system::{EventRecord, Phase}; - use hex_literal::{hex, hex_impl}; - - #[test] - fn motions_basic_environment_works() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - assert_eq!(Balances::free_balance(&42), 0); - assert_eq!(CouncilMotions::proposals(), Vec::::new()); - }); - } - - fn set_balance_proposal(value: u64) -> Call { - Call::Balances(balances::Call::set_balance(42, value.into(), 0)) - } - - #[test] - fn motions_propose_works() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - let proposal = set_balance_proposal(42); - let hash = proposal.blake2_256().into(); - assert_ok!(CouncilMotions::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); - assert_eq!(CouncilMotions::proposals(), vec![hash]); - assert_eq!(CouncilMotions::proposal_of(&hash), Some(proposal)); - assert_eq!(CouncilMotions::voting(&hash), Some((0, 3, vec![1], Vec::::new()))); - - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: OuterEvent::motions(RawEvent::Proposed(1, 0, hex!["cd0b662a49f004093b80600415cf4126399af0d27ed6c185abeb1469c17eb5bf"].into(), 3)) - } - ]); - }); - } - - #[test] - fn motions_ignoring_non_council_proposals_works() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - let proposal = set_balance_proposal(42); - assert_noop!(CouncilMotions::propose(Origin::signed(42), 3, Box::new(proposal.clone())), "proposer not on council"); - }); - } - - #[test] - fn motions_ignoring_non_council_votes_works() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - let proposal = set_balance_proposal(42); - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(CouncilMotions::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); - assert_noop!(CouncilMotions::vote(Origin::signed(42), hash.clone(), 0, true), "voter not on council"); - }); - } - - #[test] - fn motions_ignoring_bad_index_council_vote_works() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(3); - let proposal = set_balance_proposal(42); - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(CouncilMotions::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); - assert_noop!(CouncilMotions::vote(Origin::signed(2), hash.clone(), 1, true), "mismatched index"); - }); - } - - #[test] - fn motions_revoting_works() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - let proposal = set_balance_proposal(42); - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(CouncilMotions::propose(Origin::signed(1), 2, Box::new(proposal.clone()))); - assert_eq!(CouncilMotions::voting(&hash), Some((0, 2, vec![1], Vec::::new()))); - assert_noop!(CouncilMotions::vote(Origin::signed(1), hash.clone(), 0, true), "duplicate vote ignored"); - assert_ok!(CouncilMotions::vote(Origin::signed(1), hash.clone(), 0, false)); - assert_eq!(CouncilMotions::voting(&hash), Some((0, 2, Vec::::new(), vec![1]))); - assert_noop!(CouncilMotions::vote(Origin::signed(1), hash.clone(), 0, false), "duplicate vote ignored"); - - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: OuterEvent::motions(RawEvent::Proposed(1, 0, hex!["cd0b662a49f004093b80600415cf4126399af0d27ed6c185abeb1469c17eb5bf"].into(), 2)) - }, - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: OuterEvent::motions(RawEvent::Voted(1, hex!["cd0b662a49f004093b80600415cf4126399af0d27ed6c185abeb1469c17eb5bf"].into(), false, 0, 1)) - } - ]); - }); - } - - #[test] - fn motions_disapproval_works() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - let proposal = set_balance_proposal(42); - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(CouncilMotions::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); - assert_ok!(CouncilMotions::vote(Origin::signed(2), hash.clone(), 0, false)); - - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: OuterEvent::motions(RawEvent::Proposed(1, 0, hex!["cd0b662a49f004093b80600415cf4126399af0d27ed6c185abeb1469c17eb5bf"].into(), 3)) - }, - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: OuterEvent::motions(RawEvent::Voted(2, hex!["cd0b662a49f004093b80600415cf4126399af0d27ed6c185abeb1469c17eb5bf"].into(), false, 1, 1)) - }, - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: OuterEvent::motions(RawEvent::Disapproved(hex!["cd0b662a49f004093b80600415cf4126399af0d27ed6c185abeb1469c17eb5bf"].into())) - } - ]); - }); - } - - #[test] - fn motions_approval_works() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - let proposal = set_balance_proposal(42); - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(CouncilMotions::propose(Origin::signed(1), 2, Box::new(proposal.clone()))); - assert_ok!(CouncilMotions::vote(Origin::signed(2), hash.clone(), 0, true)); - - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: OuterEvent::motions(RawEvent::Proposed(1, 0, hex!["cd0b662a49f004093b80600415cf4126399af0d27ed6c185abeb1469c17eb5bf"].into(), 2)) - }, - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: OuterEvent::motions(RawEvent::Voted(2, hex!["cd0b662a49f004093b80600415cf4126399af0d27ed6c185abeb1469c17eb5bf"].into(), true, 2, 0)) - }, - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: OuterEvent::motions(RawEvent::Approved(hex!["cd0b662a49f004093b80600415cf4126399af0d27ed6c185abeb1469c17eb5bf"].into())) - }, - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: OuterEvent::motions(RawEvent::Executed(hex!["cd0b662a49f004093b80600415cf4126399af0d27ed6c185abeb1469c17eb5bf"].into(), false)) - } - ]); - }); - } + use super::RawEvent; + use super::*; + use crate::tests::*; + use crate::tests::{Call, Event as OuterEvent, Origin}; + use hex_literal::{hex, hex_impl}; + use srml_support::{assert_noop, assert_ok, Hashable}; + use system::{EventRecord, Phase}; + + #[test] + fn motions_basic_environment_works() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + assert_eq!(Balances::free_balance(&42), 0); + assert_eq!(CouncilMotions::proposals(), Vec::::new()); + }); + } + + fn set_balance_proposal(value: u64) -> Call { + Call::Balances(balances::Call::set_balance(42, value.into(), 0)) + } + + #[test] + fn motions_propose_works() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + let proposal = set_balance_proposal(42); + let hash = proposal.blake2_256().into(); + assert_ok!(CouncilMotions::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()) + )); + assert_eq!(CouncilMotions::proposals(), vec![hash]); + assert_eq!(CouncilMotions::proposal_of(&hash), Some(proposal)); + assert_eq!( + CouncilMotions::voting(&hash), + Some((0, 3, vec![1], Vec::::new())) + ); + + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: OuterEvent::motions(RawEvent::Proposed( + 1, + 0, + hex!["cd0b662a49f004093b80600415cf4126399af0d27ed6c185abeb1469c17eb5bf"] + .into(), + 3 + )) + }] + ); + }); + } + + #[test] + fn motions_ignoring_non_council_proposals_works() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + let proposal = set_balance_proposal(42); + assert_noop!( + CouncilMotions::propose(Origin::signed(42), 3, Box::new(proposal.clone())), + "proposer not on council" + ); + }); + } + + #[test] + fn motions_ignoring_non_council_votes_works() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + let proposal = set_balance_proposal(42); + let hash: H256 = proposal.blake2_256().into(); + assert_ok!(CouncilMotions::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()) + )); + assert_noop!( + CouncilMotions::vote(Origin::signed(42), hash.clone(), 0, true), + "voter not on council" + ); + }); + } + + #[test] + fn motions_ignoring_bad_index_council_vote_works() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(3); + let proposal = set_balance_proposal(42); + let hash: H256 = proposal.blake2_256().into(); + assert_ok!(CouncilMotions::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()) + )); + assert_noop!( + CouncilMotions::vote(Origin::signed(2), hash.clone(), 1, true), + "mismatched index" + ); + }); + } + + #[test] + fn motions_revoting_works() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + let proposal = set_balance_proposal(42); + let hash: H256 = proposal.blake2_256().into(); + assert_ok!(CouncilMotions::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()) + )); + assert_eq!( + CouncilMotions::voting(&hash), + Some((0, 2, vec![1], Vec::::new())) + ); + assert_noop!( + CouncilMotions::vote(Origin::signed(1), hash.clone(), 0, true), + "duplicate vote ignored" + ); + assert_ok!(CouncilMotions::vote( + Origin::signed(1), + hash.clone(), + 0, + false + )); + assert_eq!( + CouncilMotions::voting(&hash), + Some((0, 2, Vec::::new(), vec![1])) + ); + assert_noop!( + CouncilMotions::vote(Origin::signed(1), hash.clone(), 0, false), + "duplicate vote ignored" + ); + + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: OuterEvent::motions(RawEvent::Proposed( + 1, + 0, + hex![ + "cd0b662a49f004093b80600415cf4126399af0d27ed6c185abeb1469c17eb5bf" + ] + .into(), + 2 + )) + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: OuterEvent::motions(RawEvent::Voted( + 1, + hex![ + "cd0b662a49f004093b80600415cf4126399af0d27ed6c185abeb1469c17eb5bf" + ] + .into(), + false, + 0, + 1 + )) + } + ] + ); + }); + } + + #[test] + fn motions_disapproval_works() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + let proposal = set_balance_proposal(42); + let hash: H256 = proposal.blake2_256().into(); + assert_ok!(CouncilMotions::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()) + )); + assert_ok!(CouncilMotions::vote( + Origin::signed(2), + hash.clone(), + 0, + false + )); + + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: OuterEvent::motions(RawEvent::Proposed( + 1, + 0, + hex![ + "cd0b662a49f004093b80600415cf4126399af0d27ed6c185abeb1469c17eb5bf" + ] + .into(), + 3 + )) + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: OuterEvent::motions(RawEvent::Voted( + 2, + hex![ + "cd0b662a49f004093b80600415cf4126399af0d27ed6c185abeb1469c17eb5bf" + ] + .into(), + false, + 1, + 1 + )) + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: OuterEvent::motions(RawEvent::Disapproved( + hex![ + "cd0b662a49f004093b80600415cf4126399af0d27ed6c185abeb1469c17eb5bf" + ] + .into() + )) + } + ] + ); + }); + } + + #[test] + fn motions_approval_works() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + let proposal = set_balance_proposal(42); + let hash: H256 = proposal.blake2_256().into(); + assert_ok!(CouncilMotions::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()) + )); + assert_ok!(CouncilMotions::vote( + Origin::signed(2), + hash.clone(), + 0, + true + )); + + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: OuterEvent::motions(RawEvent::Proposed( + 1, + 0, + hex![ + "cd0b662a49f004093b80600415cf4126399af0d27ed6c185abeb1469c17eb5bf" + ] + .into(), + 2 + )) + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: OuterEvent::motions(RawEvent::Voted( + 2, + hex![ + "cd0b662a49f004093b80600415cf4126399af0d27ed6c185abeb1469c17eb5bf" + ] + .into(), + true, + 2, + 0 + )) + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: OuterEvent::motions(RawEvent::Approved( + hex![ + "cd0b662a49f004093b80600415cf4126399af0d27ed6c185abeb1469c17eb5bf" + ] + .into() + )) + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: OuterEvent::motions(RawEvent::Executed( + hex![ + "cd0b662a49f004093b80600415cf4126399af0d27ed6c185abeb1469c17eb5bf" + ] + .into(), + false + )) + } + ] + ); + }); + } } diff --git a/srml/council/src/seats.rs b/srml/council/src/seats.rs index 9ace6227da..970fb90033 100644 --- a/srml/council/src/seats.rs +++ b/srml/council/src/seats.rs @@ -16,14 +16,17 @@ //! Council system: Handles the voting in and maintenance of council members. +use democracy; +use primitives::traits::{As, One, StaticLookup, Zero}; use rstd::prelude::*; -use primitives::traits::{Zero, One, As, StaticLookup}; use runtime_io::print; use srml_support::{ - StorageValue, StorageMap, dispatch::Result, decl_storage, decl_event, ensure, - traits::{Currency, ReservableCurrency, OnUnbalanced} + decl_event, decl_storage, + dispatch::Result, + ensure, + traits::{Currency, OnUnbalanced, ReservableCurrency}, + StorageMap, StorageValue, }; -use democracy; use system::{self, ensure_signed}; // no polynomial attacks: @@ -84,286 +87,289 @@ use srml_support::decl_module; pub type VoteIndex = u32; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; pub trait Trait: democracy::Trait { - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; - /// Handler for the unbalanced reduction when slashing a validator. - type BadPresentation: OnUnbalanced>; + /// Handler for the unbalanced reduction when slashing a validator. + type BadPresentation: OnUnbalanced>; - /// Handler for the unbalanced reduction when slashing an invalid reaping attempt. - type BadReaper: OnUnbalanced>; + /// Handler for the unbalanced reduction when slashing an invalid reaping attempt. + type BadReaper: OnUnbalanced>; } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - fn deposit_event() = default; - - /// Set candidate approvals. Approval slots stay valid as long as candidates in those slots - /// are registered. - fn set_approvals(origin, votes: Vec, #[compact] index: VoteIndex) -> Result { - let who = ensure_signed(origin)?; - Self::do_set_approvals(who, votes, index) - } - - /// Set candidate approvals from a proxy. Approval slots stay valid as long as candidates in those slots - /// are registered. - fn proxy_set_approvals(origin, votes: Vec, #[compact] index: VoteIndex) -> Result { - let who = >::proxy(ensure_signed(origin)?).ok_or("not a proxy")?; - Self::do_set_approvals(who, votes, index) - } - - /// Remove a voter. For it not to be a bond-consuming no-op, all approved candidate indices - /// must now be either unregistered or registered to a candidate that registered the slot after - /// the voter gave their last approval set. - /// - /// May be called by anyone. Returns the voter deposit to `signed`. - fn reap_inactive_voter( - origin, - #[compact] reporter_index: u32, - who: ::Source, - #[compact] who_index: u32, - #[compact] assumed_vote_index: VoteIndex - ) { - let reporter = ensure_signed(origin)?; - - let who = T::Lookup::lookup(who)?; - ensure!(!Self::presentation_active(), "cannot reap during presentation period"); - ensure!(Self::voter_last_active(&reporter).is_some(), "reporter must be a voter"); - let last_active = Self::voter_last_active(&who).ok_or("target for inactivity cleanup must be active")?; - ensure!(assumed_vote_index == Self::vote_index(), "vote index not current"); - ensure!(assumed_vote_index > last_active + Self::inactivity_grace_period(), "cannot reap during grace period"); - let voters = Self::voters(); - let reporter_index = reporter_index as usize; - let who_index = who_index as usize; - ensure!(reporter_index < voters.len() && voters[reporter_index] == reporter, "bad reporter index"); - ensure!(who_index < voters.len() && voters[who_index] == who, "bad target index"); - - // will definitely kill one of signed or who now. - - let valid = !Self::approvals_of(&who).iter() - .zip(Self::candidates().iter()) - .any(|(&appr, addr)| - appr && - *addr != T::AccountId::default() && - Self::candidate_reg_info(addr).map_or(false, |x| x.0 <= last_active)/*defensive only: all items in candidates list are registered*/ - ); - - Self::remove_voter( - if valid { &who } else { &reporter }, - if valid { who_index } else { reporter_index }, - voters - ); - if valid { - // This only fails if `reporter` doesn't exist, which it clearly must do since its the origin. - // Still, it's no more harmful to propagate any error at this point. - T::Currency::repatriate_reserved(&who, &reporter, Self::voting_bond())?; - Self::deposit_event(RawEvent::VoterReaped(who, reporter)); - } else { - let imbalance = T::Currency::slash_reserved(&reporter, Self::voting_bond()).0; - T::BadReaper::on_unbalanced(imbalance); - Self::deposit_event(RawEvent::BadReaperSlashed(reporter)); - } - } - - /// Remove a voter. All votes are cancelled and the voter deposit is returned. - fn retract_voter(origin, #[compact] index: u32) { - let who = ensure_signed(origin)?; - - ensure!(!Self::presentation_active(), "cannot retract when presenting"); - ensure!(>::exists(&who), "cannot retract non-voter"); - let voters = Self::voters(); - let index = index as usize; - ensure!(index < voters.len(), "retraction index invalid"); - ensure!(voters[index] == who, "retraction index mismatch"); - - Self::remove_voter(&who, index, voters); - T::Currency::unreserve(&who, Self::voting_bond()); - } - - /// Submit oneself for candidacy. - /// - /// Account must have enough transferrable funds in it to pay the bond. - fn submit_candidacy(origin, #[compact] slot: u32) { - let who = ensure_signed(origin)?; - - ensure!(!Self::is_a_candidate(&who), "duplicate candidate submission"); - let slot = slot as usize; - let count = Self::candidate_count() as usize; - let candidates = Self::candidates(); - ensure!( - (slot == count && count == candidates.len()) || - (slot < candidates.len() && candidates[slot] == T::AccountId::default()), - "invalid candidate slot" - ); - // NOTE: This must be last as it has side-effects. - T::Currency::reserve(&who, Self::candidacy_bond()) - .map_err(|_| "candidate has not enough funds")?; - - >::insert(&who, (Self::vote_index(), slot as u32)); - let mut candidates = candidates; - if slot == candidates.len() { - candidates.push(who); - } else { - candidates[slot] = who; - } - >::put(candidates); - >::put(count as u32 + 1); - } - - /// Claim that `signed` is one of the top Self::carry_count() + current_vote().1 candidates. - /// Only works if the `block_number >= current_vote().0` and `< current_vote().0 + presentation_duration()`` - /// `signed` should have at least - fn present_winner( - origin, - candidate: ::Source, - #[compact] total: BalanceOf, - #[compact] index: VoteIndex - ) -> Result { - let who = ensure_signed(origin)?; - ensure!(!total.is_zero(), "stake deposited to present winner and be added to leaderboard should be non-zero"); - - let candidate = T::Lookup::lookup(candidate)?; - ensure!(index == Self::vote_index(), "index not current"); - let (_, _, expiring) = Self::next_finalize().ok_or("cannot present outside of presentation period")?; - let stakes = Self::snapshoted_stakes(); - let voters = Self::voters(); - let bad_presentation_punishment = Self::present_slash_per_voter() * BalanceOf::::sa(voters.len() as u64); - ensure!(T::Currency::can_slash(&who, bad_presentation_punishment), "presenter must have sufficient slashable funds"); - - let mut leaderboard = Self::leaderboard().ok_or("leaderboard must exist while present phase active")?; - ensure!(total > leaderboard[0].0, "candidate not worthy of leaderboard"); - - if let Some(p) = Self::active_council().iter().position(|&(ref c, _)| c == &candidate) { - ensure!(p < expiring.len(), "candidate must not form a duplicated member if elected"); - } - - let (registered_since, candidate_index): (VoteIndex, u32) = - Self::candidate_reg_info(&candidate).ok_or("presented candidate must be current")?; - let actual_total = voters.iter() - .zip(stakes.iter()) - .filter_map(|(voter, stake)| - match Self::voter_last_active(voter) { - Some(b) if b >= registered_since => - Self::approvals_of(voter).get(candidate_index as usize) - .and_then(|approved| if *approved { Some(*stake) } else { None }), - _ => None, - }) - .fold(Zero::zero(), |acc, n| acc + n); - let dupe = leaderboard.iter().find(|&&(_, ref c)| c == &candidate).is_some(); - if total == actual_total && !dupe { - // insert into leaderboard - leaderboard[0] = (total, candidate); - leaderboard.sort_by_key(|&(t, _)| t); - >::put(leaderboard); - Ok(()) - } else { - // we can rest assured it will be Ok since we checked `can_slash` earlier; still - // better safe than sorry. - let imbalance = T::Currency::slash(&who, bad_presentation_punishment).0; - T::BadPresentation::on_unbalanced(imbalance); - Err(if dupe { "duplicate presentation" } else { "incorrect total" }) - } - } - - /// Set the desired member count; if lower than the current count, then seats will not be up - /// election when they expire. If more, then a new vote will be started if one is not already - /// in progress. - fn set_desired_seats(#[compact] count: u32) { - >::put(count); - } - - /// Remove a particular member. A tally will happen instantly (if not already in a presentation - /// period) to fill the seat if removal means that the desired members are not met. - /// This is effective immediately. - fn remove_member(who: ::Source) { - let who = T::Lookup::lookup(who)?; - let new_council: Vec<(T::AccountId, T::BlockNumber)> = Self::active_council() - .into_iter() - .filter(|i| i.0 != who) - .collect(); - >::put(new_council); - } - - /// Set the presentation duration. If there is currently a vote being presented for, will - /// invoke `finalize_vote`. - fn set_presentation_duration(#[compact] count: T::BlockNumber) { - >::put(count); - } - - /// Set the presentation duration. If there is current a vote being presented for, will - /// invoke `finalize_vote`. - fn set_term_duration(#[compact] count: T::BlockNumber) { - >::put(count); - } - - fn on_finalize(n: T::BlockNumber) { - if let Err(e) = Self::end_block(n) { - print("Guru meditation"); - print(e); - } - } - } + pub struct Module for enum Call where origin: T::Origin { + fn deposit_event() = default; + + /// Set candidate approvals. Approval slots stay valid as long as candidates in those slots + /// are registered. + fn set_approvals(origin, votes: Vec, #[compact] index: VoteIndex) -> Result { + let who = ensure_signed(origin)?; + Self::do_set_approvals(who, votes, index) + } + + /// Set candidate approvals from a proxy. Approval slots stay valid as long as candidates in those slots + /// are registered. + fn proxy_set_approvals(origin, votes: Vec, #[compact] index: VoteIndex) -> Result { + let who = >::proxy(ensure_signed(origin)?).ok_or("not a proxy")?; + Self::do_set_approvals(who, votes, index) + } + + /// Remove a voter. For it not to be a bond-consuming no-op, all approved candidate indices + /// must now be either unregistered or registered to a candidate that registered the slot after + /// the voter gave their last approval set. + /// + /// May be called by anyone. Returns the voter deposit to `signed`. + fn reap_inactive_voter( + origin, + #[compact] reporter_index: u32, + who: ::Source, + #[compact] who_index: u32, + #[compact] assumed_vote_index: VoteIndex + ) { + let reporter = ensure_signed(origin)?; + + let who = T::Lookup::lookup(who)?; + ensure!(!Self::presentation_active(), "cannot reap during presentation period"); + ensure!(Self::voter_last_active(&reporter).is_some(), "reporter must be a voter"); + let last_active = Self::voter_last_active(&who).ok_or("target for inactivity cleanup must be active")?; + ensure!(assumed_vote_index == Self::vote_index(), "vote index not current"); + ensure!(assumed_vote_index > last_active + Self::inactivity_grace_period(), "cannot reap during grace period"); + let voters = Self::voters(); + let reporter_index = reporter_index as usize; + let who_index = who_index as usize; + ensure!(reporter_index < voters.len() && voters[reporter_index] == reporter, "bad reporter index"); + ensure!(who_index < voters.len() && voters[who_index] == who, "bad target index"); + + // will definitely kill one of signed or who now. + + let valid = !Self::approvals_of(&who).iter() + .zip(Self::candidates().iter()) + .any(|(&appr, addr)| + appr && + *addr != T::AccountId::default() && + Self::candidate_reg_info(addr).map_or(false, |x| x.0 <= last_active)/*defensive only: all items in candidates list are registered*/ + ); + + Self::remove_voter( + if valid { &who } else { &reporter }, + if valid { who_index } else { reporter_index }, + voters + ); + if valid { + // This only fails if `reporter` doesn't exist, which it clearly must do since its the origin. + // Still, it's no more harmful to propagate any error at this point. + T::Currency::repatriate_reserved(&who, &reporter, Self::voting_bond())?; + Self::deposit_event(RawEvent::VoterReaped(who, reporter)); + } else { + let imbalance = T::Currency::slash_reserved(&reporter, Self::voting_bond()).0; + T::BadReaper::on_unbalanced(imbalance); + Self::deposit_event(RawEvent::BadReaperSlashed(reporter)); + } + } + + /// Remove a voter. All votes are cancelled and the voter deposit is returned. + fn retract_voter(origin, #[compact] index: u32) { + let who = ensure_signed(origin)?; + + ensure!(!Self::presentation_active(), "cannot retract when presenting"); + ensure!(>::exists(&who), "cannot retract non-voter"); + let voters = Self::voters(); + let index = index as usize; + ensure!(index < voters.len(), "retraction index invalid"); + ensure!(voters[index] == who, "retraction index mismatch"); + + Self::remove_voter(&who, index, voters); + T::Currency::unreserve(&who, Self::voting_bond()); + } + + /// Submit oneself for candidacy. + /// + /// Account must have enough transferrable funds in it to pay the bond. + fn submit_candidacy(origin, #[compact] slot: u32) { + let who = ensure_signed(origin)?; + + ensure!(!Self::is_a_candidate(&who), "duplicate candidate submission"); + let slot = slot as usize; + let count = Self::candidate_count() as usize; + let candidates = Self::candidates(); + ensure!( + (slot == count && count == candidates.len()) || + (slot < candidates.len() && candidates[slot] == T::AccountId::default()), + "invalid candidate slot" + ); + // NOTE: This must be last as it has side-effects. + T::Currency::reserve(&who, Self::candidacy_bond()) + .map_err(|_| "candidate has not enough funds")?; + + >::insert(&who, (Self::vote_index(), slot as u32)); + let mut candidates = candidates; + if slot == candidates.len() { + candidates.push(who); + } else { + candidates[slot] = who; + } + >::put(candidates); + >::put(count as u32 + 1); + } + + /// Claim that `signed` is one of the top Self::carry_count() + current_vote().1 candidates. + /// Only works if the `block_number >= current_vote().0` and `< current_vote().0 + presentation_duration()`` + /// `signed` should have at least + fn present_winner( + origin, + candidate: ::Source, + #[compact] total: BalanceOf, + #[compact] index: VoteIndex + ) -> Result { + let who = ensure_signed(origin)?; + ensure!(!total.is_zero(), "stake deposited to present winner and be added to leaderboard should be non-zero"); + + let candidate = T::Lookup::lookup(candidate)?; + ensure!(index == Self::vote_index(), "index not current"); + let (_, _, expiring) = Self::next_finalize().ok_or("cannot present outside of presentation period")?; + let stakes = Self::snapshoted_stakes(); + let voters = Self::voters(); + let bad_presentation_punishment = Self::present_slash_per_voter() * BalanceOf::::sa(voters.len() as u64); + ensure!(T::Currency::can_slash(&who, bad_presentation_punishment), "presenter must have sufficient slashable funds"); + + let mut leaderboard = Self::leaderboard().ok_or("leaderboard must exist while present phase active")?; + ensure!(total > leaderboard[0].0, "candidate not worthy of leaderboard"); + + if let Some(p) = Self::active_council().iter().position(|&(ref c, _)| c == &candidate) { + ensure!(p < expiring.len(), "candidate must not form a duplicated member if elected"); + } + + let (registered_since, candidate_index): (VoteIndex, u32) = + Self::candidate_reg_info(&candidate).ok_or("presented candidate must be current")?; + let actual_total = voters.iter() + .zip(stakes.iter()) + .filter_map(|(voter, stake)| + match Self::voter_last_active(voter) { + Some(b) if b >= registered_since => + Self::approvals_of(voter).get(candidate_index as usize) + .and_then(|approved| if *approved { Some(*stake) } else { None }), + _ => None, + }) + .fold(Zero::zero(), |acc, n| acc + n); + let dupe = leaderboard.iter().find(|&&(_, ref c)| c == &candidate).is_some(); + if total == actual_total && !dupe { + // insert into leaderboard + leaderboard[0] = (total, candidate); + leaderboard.sort_by_key(|&(t, _)| t); + >::put(leaderboard); + Ok(()) + } else { + // we can rest assured it will be Ok since we checked `can_slash` earlier; still + // better safe than sorry. + let imbalance = T::Currency::slash(&who, bad_presentation_punishment).0; + T::BadPresentation::on_unbalanced(imbalance); + Err(if dupe { "duplicate presentation" } else { "incorrect total" }) + } + } + + /// Set the desired member count; if lower than the current count, then seats will not be up + /// election when they expire. If more, then a new vote will be started if one is not already + /// in progress. + fn set_desired_seats(#[compact] count: u32) { + >::put(count); + } + + /// Remove a particular member. A tally will happen instantly (if not already in a presentation + /// period) to fill the seat if removal means that the desired members are not met. + /// This is effective immediately. + fn remove_member(who: ::Source) { + let who = T::Lookup::lookup(who)?; + let new_council: Vec<(T::AccountId, T::BlockNumber)> = Self::active_council() + .into_iter() + .filter(|i| i.0 != who) + .collect(); + >::put(new_council); + } + + /// Set the presentation duration. If there is currently a vote being presented for, will + /// invoke `finalize_vote`. + fn set_presentation_duration(#[compact] count: T::BlockNumber) { + >::put(count); + } + + /// Set the presentation duration. If there is current a vote being presented for, will + /// invoke `finalize_vote`. + fn set_term_duration(#[compact] count: T::BlockNumber) { + >::put(count); + } + + fn on_finalize(n: T::BlockNumber) { + if let Err(e) = Self::end_block(n) { + print("Guru meditation"); + print(e); + } + } + } } decl_storage! { - trait Store for Module as Council { - - // parameters - /// How much should be locked up in order to submit one's candidacy. - pub CandidacyBond get(candidacy_bond) config(): BalanceOf = BalanceOf::::sa(9); - /// How much should be locked up in order to be able to submit votes. - pub VotingBond get(voting_bond) config(voter_bond): BalanceOf; - /// The punishment, per voter, if you provide an invalid presentation. - pub PresentSlashPerVoter get(present_slash_per_voter) config(): BalanceOf = BalanceOf::::sa(1); - /// How many runners-up should have their approvals persist until the next vote. - pub CarryCount get(carry_count) config(): u32 = 2; - /// How long to give each top candidate to present themselves after the vote ends. - pub PresentationDuration get(presentation_duration) config(): T::BlockNumber = T::BlockNumber::sa(1000); - /// How many vote indexes need to go by after a target voter's last vote before they can be reaped if their - /// approvals are moot. - pub InactiveGracePeriod get(inactivity_grace_period) config(inactive_grace_period): VoteIndex = 1; - /// How often (in blocks) to check for new votes. - pub VotingPeriod get(voting_period) config(approval_voting_period): T::BlockNumber = T::BlockNumber::sa(1000); - /// How long each position is active for. - pub TermDuration get(term_duration) config(): T::BlockNumber = T::BlockNumber::sa(5); - /// Number of accounts that should be sitting on the council. - pub DesiredSeats get(desired_seats) config(): u32; - - // permanent state (always relevant, changes only at the finalization of voting) - /// The current council. When there's a vote going on, this should still be used for executive - /// matters. The block number (second element in the tuple) is the block that their position is - /// active until (calculated by the sum of the block number when the council member was elected - /// and their term duration). - pub ActiveCouncil get(active_council) config(): Vec<(T::AccountId, T::BlockNumber)>; - /// The total number of votes that have happened or are in progress. - pub VoteCount get(vote_index): VoteIndex; - - // persistent state (always relevant, changes constantly) - /// A list of votes for each voter, respecting the last cleared vote index that this voter was - /// last active at. - pub ApprovalsOf get(approvals_of): map T::AccountId => Vec; - /// The vote index and list slot that the candidate `who` was registered or `None` if they are not - /// currently registered. - pub RegisterInfoOf get(candidate_reg_info): map T::AccountId => Option<(VoteIndex, u32)>; - /// The last cleared vote index that this voter was last active at. - pub LastActiveOf get(voter_last_active): map T::AccountId => Option; - /// The present voter list. - pub Voters get(voters): Vec; - /// The present candidate list. - pub Candidates get(candidates): Vec; // has holes - pub CandidateCount get(candidate_count): u32; - - // temporary state (only relevant during finalization/presentation) - /// The accounts holding the seats that will become free on the next tally. - pub NextFinalize get(next_finalize): Option<(T::BlockNumber, u32, Vec)>; - /// The stakes as they were at the point that the vote ended. - pub SnapshotedStakes get(snapshoted_stakes): Vec>; - /// Get the leaderboard if we;re in the presentation phase. - pub Leaderboard get(leaderboard): Option, T::AccountId)> >; // ORDERED low -> high - } + trait Store for Module as Council { + + // parameters + /// How much should be locked up in order to submit one's candidacy. + pub CandidacyBond get(candidacy_bond) config(): BalanceOf = BalanceOf::::sa(9); + /// How much should be locked up in order to be able to submit votes. + pub VotingBond get(voting_bond) config(voter_bond): BalanceOf; + /// The punishment, per voter, if you provide an invalid presentation. + pub PresentSlashPerVoter get(present_slash_per_voter) config(): BalanceOf = BalanceOf::::sa(1); + /// How many runners-up should have their approvals persist until the next vote. + pub CarryCount get(carry_count) config(): u32 = 2; + /// How long to give each top candidate to present themselves after the vote ends. + pub PresentationDuration get(presentation_duration) config(): T::BlockNumber = T::BlockNumber::sa(1000); + /// How many vote indexes need to go by after a target voter's last vote before they can be reaped if their + /// approvals are moot. + pub InactiveGracePeriod get(inactivity_grace_period) config(inactive_grace_period): VoteIndex = 1; + /// How often (in blocks) to check for new votes. + pub VotingPeriod get(voting_period) config(approval_voting_period): T::BlockNumber = T::BlockNumber::sa(1000); + /// How long each position is active for. + pub TermDuration get(term_duration) config(): T::BlockNumber = T::BlockNumber::sa(5); + /// Number of accounts that should be sitting on the council. + pub DesiredSeats get(desired_seats) config(): u32; + + // permanent state (always relevant, changes only at the finalization of voting) + /// The current council. When there's a vote going on, this should still be used for executive + /// matters. The block number (second element in the tuple) is the block that their position is + /// active until (calculated by the sum of the block number when the council member was elected + /// and their term duration). + pub ActiveCouncil get(active_council) config(): Vec<(T::AccountId, T::BlockNumber)>; + /// The total number of votes that have happened or are in progress. + pub VoteCount get(vote_index): VoteIndex; + + // persistent state (always relevant, changes constantly) + /// A list of votes for each voter, respecting the last cleared vote index that this voter was + /// last active at. + pub ApprovalsOf get(approvals_of): map T::AccountId => Vec; + /// The vote index and list slot that the candidate `who` was registered or `None` if they are not + /// currently registered. + pub RegisterInfoOf get(candidate_reg_info): map T::AccountId => Option<(VoteIndex, u32)>; + /// The last cleared vote index that this voter was last active at. + pub LastActiveOf get(voter_last_active): map T::AccountId => Option; + /// The present voter list. + pub Voters get(voters): Vec; + /// The present candidate list. + pub Candidates get(candidates): Vec; // has holes + pub CandidateCount get(candidate_count): u32; + + // temporary state (only relevant during finalization/presentation) + /// The accounts holding the seats that will become free on the next tally. + pub NextFinalize get(next_finalize): Option<(T::BlockNumber, u32, Vec)>; + /// The stakes as they were at the point that the vote ended. + pub SnapshotedStakes get(snapshoted_stakes): Vec>; + /// Get the leaderboard if we;re in the presentation phase. + pub Leaderboard get(leaderboard): Option, T::AccountId)> >; // ORDERED low -> high + } } decl_event!( @@ -380,1068 +386,1341 @@ decl_event!( ); impl Module { - // exposed immutables. - - /// True if we're currently in a presentation period. - pub fn presentation_active() -> bool { - >::exists() - } - - /// If `who` a candidate at the moment? - pub fn is_a_candidate(who: &T::AccountId) -> bool { - >::exists(who) - } - - /// Determine the block that a vote can happen on which is no less than `n`. - pub fn next_vote_from(n: T::BlockNumber) -> T::BlockNumber { - let voting_period = Self::voting_period(); - (n + voting_period - One::one()) / voting_period * voting_period - } - - /// The block number on which the tally for the next election will happen. `None` only if the - /// desired seats of the council is zero. - pub fn next_tally() -> Option { - let desired_seats = Self::desired_seats(); - if desired_seats == 0 { - None - } else { - let c = Self::active_council(); - let (next_possible, count, coming) = - if let Some((tally_end, comers, leavers)) = Self::next_finalize() { - // if there's a tally in progress, then next tally can begin immediately afterwards - (tally_end, c.len() - leavers.len() + comers as usize, comers) - } else { - (>::block_number(), c.len(), 0) - }; - if count < desired_seats as usize { - Some(next_possible) - } else { - // next tally begins once enough council members expire to bring members below desired. - if desired_seats <= coming { - // the entire amount of desired seats is less than those new members - we'll have - // to wait until they expire. - Some(next_possible + Self::term_duration()) - } else { - Some(c[c.len() - (desired_seats - coming) as usize].1) - } - }.map(Self::next_vote_from) - } - } - - // Private - /// Check there's nothing to do this block - fn end_block(block_number: T::BlockNumber) -> Result { - if (block_number % Self::voting_period()).is_zero() { - if let Some(number) = Self::next_tally() { - if block_number == number { - Self::start_tally(); - } - } - } - if let Some((number, _, _)) = Self::next_finalize() { - if block_number == number { - Self::finalize_tally()? - } - } - Ok(()) - } - - /// Remove a voter from the system. Trusts that Self::voters()[index] != voter. - fn remove_voter(voter: &T::AccountId, index: usize, mut voters: Vec) { - >::put({ voters.swap_remove(index); voters }); - >::remove(voter); - >::remove(voter); - } - - // Actually do the voting. - fn do_set_approvals(who: T::AccountId, votes: Vec, index: VoteIndex) -> Result { - let candidates = Self::candidates(); - - ensure!(!Self::presentation_active(), "no approval changes during presentation period"); - ensure!(index == Self::vote_index(), "incorrect vote index"); - ensure!(!candidates.is_empty(), "amount of candidates to receive approval votes should be non-zero"); - // Prevent a vote from voters that provide a list of votes that exceeds the candidates length - // since otherwise an attacker may be able to submit a very long list of `votes` that far exceeds - // the amount of candidates and waste more computation than a reasonable voting bond would cover. - ensure!(candidates.len() >= votes.len(), "amount of candidate approval votes cannot exceed amount of candidates"); - - if !>::exists(&who) { - // not yet a voter - deduct bond. - // NOTE: this must be the last potential bailer, since it changes state. - T::Currency::reserve(&who, Self::voting_bond())?; - - >::mutate(|v| v.push(who.clone())); - } - >::insert(&who, index); - >::insert(&who, votes); - - Ok(()) - } - - /// Close the voting, snapshot the staking and the number of seats that are actually up for grabs. - fn start_tally() { - let active_council = Self::active_council(); - let desired_seats = Self::desired_seats() as usize; - let number = >::block_number(); - let expiring = active_council.iter().take_while(|i| i.1 == number).map(|i| i.0.clone()).collect::>(); - let retaining_seats = active_council.len() - expiring.len(); - if retaining_seats < desired_seats { - let empty_seats = desired_seats - retaining_seats; - >::put((number + Self::presentation_duration(), empty_seats as u32, expiring)); - - let voters = Self::voters(); - let votes = voters.iter().map(T::Currency::total_balance).collect::>(); - >::put(votes); - - // initialize leaderboard. - let leaderboard_size = empty_seats + Self::carry_count() as usize; - >::put(vec![(BalanceOf::::zero(), T::AccountId::default()); leaderboard_size]); - - Self::deposit_event(RawEvent::TallyStarted(empty_seats as u32)); - } - } - - /// Finalize the vote, removing each of the `removals` and inserting `seats` of the most approved - /// candidates in their place. If the total council members is less than the desired membership - /// a new vote is started. - /// Clears all presented candidates, returning the bond of the elected ones. - fn finalize_tally() -> Result { - >::kill(); - let (_, coming, expiring): (T::BlockNumber, u32, Vec) = - >::take().ok_or("finalize can only be called after a tally is started.")?; - let leaderboard: Vec<(BalanceOf, T::AccountId)> = >::take().unwrap_or_default(); - let new_expiry = >::block_number() + Self::term_duration(); - - // return bond to winners. - let candidacy_bond = Self::candidacy_bond(); - let incoming: Vec = leaderboard.iter() - .rev() - .take_while(|&&(b, _)| !b.is_zero()) - .take(coming as usize) - .map(|(_, a)| a) - .cloned() - .inspect(|a| {T::Currency::unreserve(a, candidacy_bond);}) - .collect(); - let active_council = Self::active_council(); - let outgoing = active_council.iter().take(expiring.len()).map(|a| a.0.clone()).collect(); - - // set the new council. - let mut new_council: Vec<_> = active_council - .into_iter() - .skip(expiring.len()) - .chain(incoming.iter().cloned().map(|a| (a, new_expiry))) - .collect(); - new_council.sort_by_key(|&(_, expiry)| expiry); - >::put(new_council); - - // clear all except runners-up from candidate list. - let candidates = Self::candidates(); - let mut new_candidates = vec![T::AccountId::default(); candidates.len()]; // shrink later. - let runners_up = leaderboard.into_iter() - .rev() - .take_while(|&(b, _)| !b.is_zero()) - .skip(coming as usize) - .filter_map(|(_, a)| Self::candidate_reg_info(&a).map(|i| (a, i.1))); - let mut count = 0u32; - for (address, slot) in runners_up { - new_candidates[slot as usize] = address; - count += 1; - } - for (old, new) in candidates.iter().zip(new_candidates.iter()) { - if old != new { - // removed - kill it - >::remove(old); - } - } - // discard any superfluous slots. - if let Some(last_index) = new_candidates.iter().rposition(|c| *c != T::AccountId::default()) { - new_candidates.truncate(last_index + 1); - } - - Self::deposit_event(RawEvent::TallyFinalized(incoming, outgoing)); - - >::put(new_candidates); - >::put(count); - >::put(Self::vote_index() + 1); - Ok(()) - } + // exposed immutables. + + /// True if we're currently in a presentation period. + pub fn presentation_active() -> bool { + >::exists() + } + + /// If `who` a candidate at the moment? + pub fn is_a_candidate(who: &T::AccountId) -> bool { + >::exists(who) + } + + /// Determine the block that a vote can happen on which is no less than `n`. + pub fn next_vote_from(n: T::BlockNumber) -> T::BlockNumber { + let voting_period = Self::voting_period(); + (n + voting_period - One::one()) / voting_period * voting_period + } + + /// The block number on which the tally for the next election will happen. `None` only if the + /// desired seats of the council is zero. + pub fn next_tally() -> Option { + let desired_seats = Self::desired_seats(); + if desired_seats == 0 { + None + } else { + let c = Self::active_council(); + let (next_possible, count, coming) = + if let Some((tally_end, comers, leavers)) = Self::next_finalize() { + // if there's a tally in progress, then next tally can begin immediately afterwards + (tally_end, c.len() - leavers.len() + comers as usize, comers) + } else { + (>::block_number(), c.len(), 0) + }; + if count < desired_seats as usize { + Some(next_possible) + } else { + // next tally begins once enough council members expire to bring members below desired. + if desired_seats <= coming { + // the entire amount of desired seats is less than those new members - we'll have + // to wait until they expire. + Some(next_possible + Self::term_duration()) + } else { + Some(c[c.len() - (desired_seats - coming) as usize].1) + } + } + .map(Self::next_vote_from) + } + } + + // Private + /// Check there's nothing to do this block + fn end_block(block_number: T::BlockNumber) -> Result { + if (block_number % Self::voting_period()).is_zero() { + if let Some(number) = Self::next_tally() { + if block_number == number { + Self::start_tally(); + } + } + } + if let Some((number, _, _)) = Self::next_finalize() { + if block_number == number { + Self::finalize_tally()? + } + } + Ok(()) + } + + /// Remove a voter from the system. Trusts that Self::voters()[index] != voter. + fn remove_voter(voter: &T::AccountId, index: usize, mut voters: Vec) { + >::put({ + voters.swap_remove(index); + voters + }); + >::remove(voter); + >::remove(voter); + } + + // Actually do the voting. + fn do_set_approvals(who: T::AccountId, votes: Vec, index: VoteIndex) -> Result { + let candidates = Self::candidates(); + + ensure!( + !Self::presentation_active(), + "no approval changes during presentation period" + ); + ensure!(index == Self::vote_index(), "incorrect vote index"); + ensure!( + !candidates.is_empty(), + "amount of candidates to receive approval votes should be non-zero" + ); + // Prevent a vote from voters that provide a list of votes that exceeds the candidates length + // since otherwise an attacker may be able to submit a very long list of `votes` that far exceeds + // the amount of candidates and waste more computation than a reasonable voting bond would cover. + ensure!( + candidates.len() >= votes.len(), + "amount of candidate approval votes cannot exceed amount of candidates" + ); + + if !>::exists(&who) { + // not yet a voter - deduct bond. + // NOTE: this must be the last potential bailer, since it changes state. + T::Currency::reserve(&who, Self::voting_bond())?; + + >::mutate(|v| v.push(who.clone())); + } + >::insert(&who, index); + >::insert(&who, votes); + + Ok(()) + } + + /// Close the voting, snapshot the staking and the number of seats that are actually up for grabs. + fn start_tally() { + let active_council = Self::active_council(); + let desired_seats = Self::desired_seats() as usize; + let number = >::block_number(); + let expiring = active_council + .iter() + .take_while(|i| i.1 == number) + .map(|i| i.0.clone()) + .collect::>(); + let retaining_seats = active_council.len() - expiring.len(); + if retaining_seats < desired_seats { + let empty_seats = desired_seats - retaining_seats; + >::put(( + number + Self::presentation_duration(), + empty_seats as u32, + expiring, + )); + + let voters = Self::voters(); + let votes = voters + .iter() + .map(T::Currency::total_balance) + .collect::>(); + >::put(votes); + + // initialize leaderboard. + let leaderboard_size = empty_seats + Self::carry_count() as usize; + >::put(vec![ + (BalanceOf::::zero(), T::AccountId::default()); + leaderboard_size + ]); + + Self::deposit_event(RawEvent::TallyStarted(empty_seats as u32)); + } + } + + /// Finalize the vote, removing each of the `removals` and inserting `seats` of the most approved + /// candidates in their place. If the total council members is less than the desired membership + /// a new vote is started. + /// Clears all presented candidates, returning the bond of the elected ones. + fn finalize_tally() -> Result { + >::kill(); + let (_, coming, expiring): (T::BlockNumber, u32, Vec) = + >::take() + .ok_or("finalize can only be called after a tally is started.")?; + let leaderboard: Vec<(BalanceOf, T::AccountId)> = + >::take().unwrap_or_default(); + let new_expiry = >::block_number() + Self::term_duration(); + + // return bond to winners. + let candidacy_bond = Self::candidacy_bond(); + let incoming: Vec = leaderboard + .iter() + .rev() + .take_while(|&&(b, _)| !b.is_zero()) + .take(coming as usize) + .map(|(_, a)| a) + .cloned() + .inspect(|a| { + T::Currency::unreserve(a, candidacy_bond); + }) + .collect(); + let active_council = Self::active_council(); + let outgoing = active_council + .iter() + .take(expiring.len()) + .map(|a| a.0.clone()) + .collect(); + + // set the new council. + let mut new_council: Vec<_> = active_council + .into_iter() + .skip(expiring.len()) + .chain(incoming.iter().cloned().map(|a| (a, new_expiry))) + .collect(); + new_council.sort_by_key(|&(_, expiry)| expiry); + >::put(new_council); + + // clear all except runners-up from candidate list. + let candidates = Self::candidates(); + let mut new_candidates = vec![T::AccountId::default(); candidates.len()]; // shrink later. + let runners_up = leaderboard + .into_iter() + .rev() + .take_while(|&(b, _)| !b.is_zero()) + .skip(coming as usize) + .filter_map(|(_, a)| Self::candidate_reg_info(&a).map(|i| (a, i.1))); + let mut count = 0u32; + for (address, slot) in runners_up { + new_candidates[slot as usize] = address; + count += 1; + } + for (old, new) in candidates.iter().zip(new_candidates.iter()) { + if old != new { + // removed - kill it + >::remove(old); + } + } + // discard any superfluous slots. + if let Some(last_index) = new_candidates + .iter() + .rposition(|c| *c != T::AccountId::default()) + { + new_candidates.truncate(last_index + 1); + } + + Self::deposit_event(RawEvent::TallyFinalized(incoming, outgoing)); + + >::put(new_candidates); + >::put(count); + >::put(Self::vote_index() + 1); + Ok(()) + } } #[cfg(test)] mod tests { - use super::*; - use crate::tests::*; - use srml_support::{assert_ok, assert_noop, assert_err}; - - #[test] - fn params_should_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(1); - assert_eq!(Council::next_vote_from(1), 4); - assert_eq!(Council::next_vote_from(4), 4); - assert_eq!(Council::next_vote_from(5), 8); - assert_eq!(Council::vote_index(), 0); - assert_eq!(Council::candidacy_bond(), 9); - assert_eq!(Council::voting_bond(), 3); - assert_eq!(Council::present_slash_per_voter(), 1); - assert_eq!(Council::presentation_duration(), 2); - assert_eq!(Council::inactivity_grace_period(), 1); - assert_eq!(Council::voting_period(), 4); - assert_eq!(Council::term_duration(), 5); - assert_eq!(Council::desired_seats(), 2); - assert_eq!(Council::carry_count(), 2); - - assert_eq!(Council::active_council(), vec![]); - assert_eq!(Council::next_tally(), Some(4)); - assert_eq!(Council::presentation_active(), false); - assert_eq!(Council::next_finalize(), None); - - assert_eq!(Council::candidates(), Vec::::new()); - assert_eq!(Council::is_a_candidate(&1), false); - assert_eq!(Council::candidate_reg_info(1), None); - - assert_eq!(Council::voters(), Vec::::new()); - assert_eq!(Council::voter_last_active(1), None); - assert_eq!(Council::approvals_of(1), vec![]); - }); - } - - #[test] - fn simple_candidate_submission_should_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(1); - assert_eq!(Council::candidates(), Vec::::new()); - assert_eq!(Council::candidate_reg_info(1), None); - assert_eq!(Council::candidate_reg_info(2), None); - assert_eq!(Council::is_a_candidate(&1), false); - assert_eq!(Council::is_a_candidate(&2), false); - - assert_ok!(Council::submit_candidacy(Origin::signed(1), 0)); - assert_eq!(Council::candidates(), vec![1]); - assert_eq!(Council::candidate_reg_info(1), Some((0, 0))); - assert_eq!(Council::candidate_reg_info(2), None); - assert_eq!(Council::is_a_candidate(&1), true); - assert_eq!(Council::is_a_candidate(&2), false); - - assert_ok!(Council::submit_candidacy(Origin::signed(2), 1)); - assert_eq!(Council::candidates(), vec![1, 2]); - assert_eq!(Council::candidate_reg_info(1), Some((0, 0))); - assert_eq!(Council::candidate_reg_info(2), Some((0, 1))); - assert_eq!(Council::is_a_candidate(&1), true); - assert_eq!(Council::is_a_candidate(&2), true); - }); - } - - fn new_test_ext_with_candidate_holes() -> runtime_io::TestExternalities { - let mut t = new_test_ext(false); - with_externalities(&mut t, || { - >::put(vec![0, 0, 1]); - >::put(1); - >::insert(1, (0, 2)); - }); - t - } - - #[test] - fn candidate_submission_using_free_slot_should_work() { - let mut t = new_test_ext_with_candidate_holes(); - - with_externalities(&mut t, || { - System::set_block_number(1); - assert_eq!(Council::candidates(), vec![0, 0, 1]); - - assert_ok!(Council::submit_candidacy(Origin::signed(2), 1)); - assert_eq!(Council::candidates(), vec![0, 2, 1]); - - assert_ok!(Council::submit_candidacy(Origin::signed(3), 0)); - assert_eq!(Council::candidates(), vec![3, 2, 1]); - }); - } - - #[test] - fn candidate_submission_using_alternative_free_slot_should_work() { - let mut t = new_test_ext_with_candidate_holes(); - - with_externalities(&mut t, || { - System::set_block_number(1); - assert_eq!(Council::candidates(), vec![0, 0, 1]); - - assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); - assert_eq!(Council::candidates(), vec![2, 0, 1]); - - assert_ok!(Council::submit_candidacy(Origin::signed(3), 1)); - assert_eq!(Council::candidates(), vec![2, 3, 1]); - }); - } - - #[test] - fn candidate_submission_not_using_free_slot_should_not_work() { - with_externalities(&mut new_test_ext_with_candidate_holes(), || { - System::set_block_number(1); - assert_noop!(Council::submit_candidacy(Origin::signed(4), 3), "invalid candidate slot"); - }); - } - - #[test] - fn bad_candidate_slot_submission_should_not_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(1); - assert_eq!(Council::candidates(), Vec::::new()); - assert_noop!(Council::submit_candidacy(Origin::signed(1), 1), "invalid candidate slot"); - }); - } - - #[test] - fn non_free_candidate_slot_submission_should_not_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(1); - assert_eq!(Council::candidates(), Vec::::new()); - assert_ok!(Council::submit_candidacy(Origin::signed(1), 0)); - assert_eq!(Council::candidates(), vec![1]); - assert_noop!(Council::submit_candidacy(Origin::signed(2), 0), "invalid candidate slot"); - }); - } - - #[test] - fn dupe_candidate_submission_should_not_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(1); - assert_eq!(Council::candidates(), Vec::::new()); - assert_ok!(Council::submit_candidacy(Origin::signed(1), 0)); - assert_eq!(Council::candidates(), vec![1]); - assert_noop!(Council::submit_candidacy(Origin::signed(1), 1), "duplicate candidate submission"); - }); - } - - #[test] - fn poor_candidate_submission_should_not_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(1); - assert_eq!(Council::candidates(), Vec::::new()); - assert_noop!(Council::submit_candidacy(Origin::signed(7), 0), "candidate has not enough funds"); - }); - } - - #[test] - fn voting_should_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(1); - - assert_ok!(Council::submit_candidacy(Origin::signed(5), 0)); - - assert_ok!(Council::set_approvals(Origin::signed(1), vec![true], 0)); - assert_ok!(Council::set_approvals(Origin::signed(4), vec![true], 0)); - - assert_eq!(Council::approvals_of(1), vec![true]); - assert_eq!(Council::approvals_of(4), vec![true]); - assert_eq!(Council::voters(), vec![1, 4]); - - assert_ok!(Council::submit_candidacy(Origin::signed(2), 1)); - assert_ok!(Council::submit_candidacy(Origin::signed(3), 2)); - - assert_ok!(Council::set_approvals(Origin::signed(2), vec![false, true, true], 0)); - assert_ok!(Council::set_approvals(Origin::signed(3), vec![false, true, true], 0)); - - assert_eq!(Council::approvals_of(1), vec![true]); - assert_eq!(Council::approvals_of(4), vec![true]); - assert_eq!(Council::approvals_of(2), vec![false, true, true]); - assert_eq!(Council::approvals_of(3), vec![false, true, true]); - - assert_eq!(Council::voters(), vec![1, 4, 2, 3]); - }); - } - - #[test] - fn proxy_voting_should_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(1); - - assert_ok!(Council::submit_candidacy(Origin::signed(5), 0)); - - Democracy::force_proxy(1, 11); - Democracy::force_proxy(2, 12); - Democracy::force_proxy(3, 13); - Democracy::force_proxy(4, 14); - - assert_ok!(Council::proxy_set_approvals(Origin::signed(11), vec![true], 0)); - assert_ok!(Council::proxy_set_approvals(Origin::signed(14), vec![true], 0)); - - assert_eq!(Council::approvals_of(1), vec![true]); - assert_eq!(Council::approvals_of(4), vec![true]); - assert_eq!(Council::voters(), vec![1, 4]); - - assert_ok!(Council::submit_candidacy(Origin::signed(2), 1)); - assert_ok!(Council::submit_candidacy(Origin::signed(3), 2)); - - assert_ok!(Council::proxy_set_approvals(Origin::signed(12), vec![false, true, true], 0)); - assert_ok!(Council::proxy_set_approvals(Origin::signed(13), vec![false, true, true], 0)); - - assert_eq!(Council::approvals_of(1), vec![true]); - assert_eq!(Council::approvals_of(4), vec![true]); - assert_eq!(Council::approvals_of(2), vec![false, true, true]); - assert_eq!(Council::approvals_of(3), vec![false, true, true]); - - assert_eq!(Council::voters(), vec![1, 4, 2, 3]); - }); - } - - #[test] - fn setting_any_approval_vote_count_without_any_candidate_count_should_not_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(1); - - assert_eq!(Council::candidates().len(), 0); - - assert_noop!(Council::set_approvals(Origin::signed(4), vec![], 0), "amount of candidates to receive approval votes should be non-zero"); - }); - } - - #[test] - fn setting_an_approval_vote_count_more_than_candidate_count_should_not_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(1); - - assert_ok!(Council::submit_candidacy(Origin::signed(5), 0)); - assert_eq!(Council::candidates().len(), 1); - - assert_noop!(Council::set_approvals(Origin::signed(4), vec![true, true], 0), "amount of candidate approval votes cannot exceed amount of candidates"); - }); - } - - #[test] - fn resubmitting_voting_should_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(1); - - assert_ok!(Council::submit_candidacy(Origin::signed(5), 0)); - assert_ok!(Council::set_approvals(Origin::signed(4), vec![true], 0)); - - assert_eq!(Council::approvals_of(4), vec![true]); - - assert_ok!(Council::submit_candidacy(Origin::signed(2), 1)); - assert_ok!(Council::submit_candidacy(Origin::signed(3), 2)); - assert_eq!(Council::candidates().len(), 3); - assert_ok!(Council::set_approvals(Origin::signed(4), vec![true, false, true], 0)); - - assert_eq!(Council::approvals_of(4), vec![true, false, true]); - }); - } - - #[test] - fn retracting_voter_should_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(1); - - assert_ok!(Council::submit_candidacy(Origin::signed(5), 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(2), 1)); - assert_ok!(Council::submit_candidacy(Origin::signed(3), 2)); - assert_eq!(Council::candidates().len(), 3); - - assert_ok!(Council::set_approvals(Origin::signed(1), vec![true], 0)); - assert_ok!(Council::set_approvals(Origin::signed(2), vec![false, true, true], 0)); - assert_ok!(Council::set_approvals(Origin::signed(3), vec![false, true, true], 0)); - assert_ok!(Council::set_approvals(Origin::signed(4), vec![true, false, true], 0)); - - assert_eq!(Council::voters(), vec![1, 2, 3, 4]); - assert_eq!(Council::approvals_of(1), vec![true]); - assert_eq!(Council::approvals_of(2), vec![false, true, true]); - assert_eq!(Council::approvals_of(3), vec![false, true, true]); - assert_eq!(Council::approvals_of(4), vec![true, false, true]); - - assert_ok!(Council::retract_voter(Origin::signed(1), 0)); - - assert_eq!(Council::voters(), vec![4, 2, 3]); - assert_eq!(Council::approvals_of(1), Vec::::new()); - assert_eq!(Council::approvals_of(2), vec![false, true, true]); - assert_eq!(Council::approvals_of(3), vec![false, true, true]); - assert_eq!(Council::approvals_of(4), vec![true, false, true]); - - assert_ok!(Council::retract_voter(Origin::signed(2), 1)); - - assert_eq!(Council::voters(), vec![4, 3]); - assert_eq!(Council::approvals_of(1), Vec::::new()); - assert_eq!(Council::approvals_of(2), Vec::::new()); - assert_eq!(Council::approvals_of(3), vec![false, true, true]); - assert_eq!(Council::approvals_of(4), vec![true, false, true]); - - assert_ok!(Council::retract_voter(Origin::signed(3), 1)); - - assert_eq!(Council::voters(), vec![4]); - assert_eq!(Council::approvals_of(1), Vec::::new()); - assert_eq!(Council::approvals_of(2), Vec::::new()); - assert_eq!(Council::approvals_of(3), Vec::::new()); - assert_eq!(Council::approvals_of(4), vec![true, false, true]); - }); - } - - #[test] - fn invalid_retraction_index_should_not_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(1); - assert_ok!(Council::submit_candidacy(Origin::signed(3), 0)); - assert_ok!(Council::set_approvals(Origin::signed(1), vec![true], 0)); - assert_ok!(Council::set_approvals(Origin::signed(2), vec![true], 0)); - assert_eq!(Council::voters(), vec![1, 2]); - assert_noop!(Council::retract_voter(Origin::signed(1), 1), "retraction index mismatch"); - }); - } - - #[test] - fn overflow_retraction_index_should_not_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(1); - assert_ok!(Council::submit_candidacy(Origin::signed(3), 0)); - assert_ok!(Council::set_approvals(Origin::signed(1), vec![true], 0)); - assert_noop!(Council::retract_voter(Origin::signed(1), 1), "retraction index invalid"); - }); - } - - #[test] - fn non_voter_retraction_should_not_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(1); - assert_ok!(Council::submit_candidacy(Origin::signed(3), 0)); - assert_ok!(Council::set_approvals(Origin::signed(1), vec![true], 0)); - assert_noop!(Council::retract_voter(Origin::signed(2), 0), "cannot retract non-voter"); - }); - } - - #[test] - fn simple_tally_should_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(4); - assert!(!Council::presentation_active()); - - assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(5), 1)); - assert_ok!(Council::set_approvals(Origin::signed(2), vec![true, false], 0)); - assert_ok!(Council::set_approvals(Origin::signed(5), vec![false, true], 0)); - assert_eq!(Council::voters(), vec![2, 5]); - assert_eq!(Council::approvals_of(2), vec![true, false]); - assert_eq!(Council::approvals_of(5), vec![false, true]); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(6); - assert!(Council::presentation_active()); - assert_eq!(Council::present_winner(Origin::signed(4), 2, 20, 0), Ok(())); - assert_eq!(Council::present_winner(Origin::signed(4), 5, 50, 0), Ok(())); - assert_eq!(Council::leaderboard(), Some(vec![(0, 0), (0, 0), (20, 2), (50, 5)])); - - assert_ok!(Council::end_block(System::block_number())); - - assert!(!Council::presentation_active()); - assert_eq!(Council::active_council(), vec![(5, 11), (2, 11)]); - - assert!(!Council::is_a_candidate(&2)); - assert!(!Council::is_a_candidate(&5)); - assert_eq!(Council::vote_index(), 1); - assert_eq!(Council::voter_last_active(2), Some(0)); - assert_eq!(Council::voter_last_active(5), Some(0)); - }); - } - - #[test] - fn presentations_with_zero_staked_deposit_should_not_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(4); - assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Council::set_approvals(Origin::signed(2), vec![true], 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(6); - assert_noop!(Council::present_winner(Origin::signed(4), 2, 0, 0), "stake deposited to present winner and be added to leaderboard should be non-zero"); - }); - } - - #[test] - fn double_presentations_should_be_punished() { - with_externalities(&mut new_test_ext(false), || { - assert!(Balances::can_slash(&4, 10)); - - System::set_block_number(4); - assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(5), 1)); - assert_ok!(Council::set_approvals(Origin::signed(2), vec![true, false], 0)); - assert_ok!(Council::set_approvals(Origin::signed(5), vec![false, true], 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Council::present_winner(Origin::signed(4), 2, 20, 0)); - assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 0)); - assert_eq!(Council::present_winner(Origin::signed(4), 5, 50, 0), Err("duplicate presentation")); - assert_ok!(Council::end_block(System::block_number())); - - assert_eq!(Council::active_council(), vec![(5, 11), (2, 11)]); - assert_eq!(Balances::total_balance(&4), 38); - }); - } - - #[test] - fn retracting_inactive_voter_should_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(4); - assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Council::set_approvals(Origin::signed(2), vec![true], 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Council::present_winner(Origin::signed(4), 2, 20, 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(8); - assert_ok!(Council::submit_candidacy(Origin::signed(5), 0)); - assert_ok!(Council::set_approvals(Origin::signed(5), vec![true], 1)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(10); - assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 1)); - assert_ok!(Council::end_block(System::block_number())); - - assert_ok!(Council::reap_inactive_voter(Origin::signed(5), - (Council::voters().iter().position(|&i| i == 5).unwrap() as u32).into(), - 2, (Council::voters().iter().position(|&i| i == 2).unwrap() as u32).into(), - 2 - )); - - assert_eq!(Council::voters(), vec![5]); - assert_eq!(Council::approvals_of(2).len(), 0); - assert_eq!(Balances::total_balance(&2), 17); - assert_eq!(Balances::total_balance(&5), 53); - }); - } - - #[test] - fn presenting_for_double_election_should_not_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(4); - assert_eq!(Council::submit_candidacy(Origin::signed(2), 0), Ok(())); - assert_ok!(Council::set_approvals(Origin::signed(2), vec![true], 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Council::present_winner(Origin::signed(4), 2, 20, 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(8); - assert_eq!(Council::submit_candidacy(Origin::signed(2), 0), Ok(())); - assert_ok!(Council::set_approvals(Origin::signed(2), vec![true], 1)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(10); - assert_noop!(Council::present_winner(Origin::signed(4), 2, 20, 1), "candidate must not form a duplicated member if elected"); - }); - } - - #[test] - fn retracting_inactive_voter_with_other_candidates_in_slots_should_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(4); - assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Council::set_approvals(Origin::signed(2), vec![true], 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Council::present_winner(Origin::signed(4), 2, 20, 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(8); - assert_ok!(Council::submit_candidacy(Origin::signed(5), 0)); - assert_ok!(Council::set_approvals(Origin::signed(5), vec![true], 1)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(10); - assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 1)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(11); - assert_ok!(Council::submit_candidacy(Origin::signed(1), 0)); - - assert_ok!(Council::reap_inactive_voter(Origin::signed(5), - (Council::voters().iter().position(|&i| i == 5).unwrap() as u32).into(), - 2, (Council::voters().iter().position(|&i| i == 2).unwrap() as u32).into(), - 2 - )); - - assert_eq!(Council::voters(), vec![5]); - assert_eq!(Council::approvals_of(2).len(), 0); - assert_eq!(Balances::total_balance(&2), 17); - assert_eq!(Balances::total_balance(&5), 53); - }); - } - - #[test] - fn retracting_inactive_voter_with_bad_reporter_index_should_not_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(4); - assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Council::set_approvals(Origin::signed(2), vec![true], 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Council::present_winner(Origin::signed(4), 2, 20, 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(8); - assert_ok!(Council::submit_candidacy(Origin::signed(5), 0)); - assert_ok!(Council::set_approvals(Origin::signed(5), vec![true], 1)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(10); - assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 1)); - assert_ok!(Council::end_block(System::block_number())); - - assert_noop!(Council::reap_inactive_voter(Origin::signed(2), - 42, - 2, (Council::voters().iter().position(|&i| i == 2).unwrap() as u32).into(), - 2 - ), "bad reporter index"); - }); - } - - #[test] - fn retracting_inactive_voter_with_bad_target_index_should_not_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(4); - assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Council::set_approvals(Origin::signed(2), vec![true], 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Council::present_winner(Origin::signed(4), 2, 20, 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(8); - assert_ok!(Council::submit_candidacy(Origin::signed(5), 0)); - assert_ok!(Council::set_approvals(Origin::signed(5), vec![true], 1)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(10); - assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 1)); - assert_ok!(Council::end_block(System::block_number())); - - assert_noop!(Council::reap_inactive_voter(Origin::signed(2), - (Council::voters().iter().position(|&i| i == 2).unwrap() as u32).into(), - 2, 42, - 2 - ), "bad target index"); - }); - } - - #[test] - fn attempting_to_retract_active_voter_should_slash_reporter() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(4); - assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(3), 1)); - assert_ok!(Council::submit_candidacy(Origin::signed(4), 2)); - assert_ok!(Council::submit_candidacy(Origin::signed(5), 3)); - assert_ok!(Council::set_approvals(Origin::signed(2), vec![true, false, false, false], 0)); - assert_ok!(Council::set_approvals(Origin::signed(3), vec![false, true, false, false], 0)); - assert_ok!(Council::set_approvals(Origin::signed(4), vec![false, false, true, false], 0)); - assert_ok!(Council::set_approvals(Origin::signed(5), vec![false, false, false, true], 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Council::present_winner(Origin::signed(4), 2, 20, 0)); - assert_ok!(Council::present_winner(Origin::signed(4), 3, 30, 0)); - assert_ok!(Council::present_winner(Origin::signed(4), 4, 40, 0)); - assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(8); - assert_ok!(Council::set_desired_seats(3)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(10); - assert_ok!(Council::present_winner(Origin::signed(4), 2, 20, 1)); - assert_ok!(Council::present_winner(Origin::signed(4), 3, 30, 1)); - assert_ok!(Council::end_block(System::block_number())); - - assert_eq!(Council::vote_index(), 2); - assert_eq!(Council::inactivity_grace_period(), 1); - assert_eq!(Council::voting_period(), 4); - assert_eq!(Council::voter_last_active(4), Some(0)); - - assert_ok!(Council::reap_inactive_voter(Origin::signed(4), - (Council::voters().iter().position(|&i| i == 4).unwrap() as u32).into(), - 2, - (Council::voters().iter().position(|&i| i == 2).unwrap() as u32).into(), - 2 - )); - - assert_eq!(Council::voters(), vec![2, 3, 5]); - assert_eq!(Council::approvals_of(4).len(), 0); - assert_eq!(Balances::total_balance(&4), 37); - }); - } - - #[test] - fn attempting_to_retract_inactive_voter_by_nonvoter_should_not_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(4); - assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Council::set_approvals(Origin::signed(2), vec![true], 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Council::present_winner(Origin::signed(4), 2, 20, 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(8); - assert_ok!(Council::submit_candidacy(Origin::signed(5), 0)); - assert_ok!(Council::set_approvals(Origin::signed(5), vec![true], 1)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(10); - assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 1)); - assert_ok!(Council::end_block(System::block_number())); - - assert_noop!(Council::reap_inactive_voter(Origin::signed(4), - 0, - 2, (Council::voters().iter().position(|&i| i == 2).unwrap() as u32).into(), - 2 - ), "reporter must be a voter"); - }); - } - - #[test] - fn presenting_loser_should_not_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(4); - assert_ok!(Council::submit_candidacy(Origin::signed(1), 0)); - assert_ok!(Council::set_approvals(Origin::signed(6), vec![true], 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(2), 1)); - assert_ok!(Council::set_approvals(Origin::signed(2), vec![false, true], 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(3), 2)); - assert_ok!(Council::set_approvals(Origin::signed(3), vec![false, false, true], 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(4), 3)); - assert_ok!(Council::set_approvals(Origin::signed(4), vec![false, false, false, true], 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(5), 4)); - assert_ok!(Council::set_approvals(Origin::signed(5), vec![false, false, false, false, true], 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Council::present_winner(Origin::signed(4), 1, 60, 0)); - assert_ok!(Council::present_winner(Origin::signed(4), 3, 30, 0)); - assert_ok!(Council::present_winner(Origin::signed(4), 4, 40, 0)); - assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 0)); - - assert_eq!(Council::leaderboard(), Some(vec![ - (30, 3), - (40, 4), - (50, 5), - (60, 1) - ])); - - assert_noop!(Council::present_winner(Origin::signed(4), 2, 20, 0), "candidate not worthy of leaderboard"); - }); - } - - #[test] - fn presenting_loser_first_should_not_matter() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(4); - assert_ok!(Council::submit_candidacy(Origin::signed(1), 0)); - assert_ok!(Council::set_approvals(Origin::signed(6), vec![true], 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(2), 1)); - assert_ok!(Council::set_approvals(Origin::signed(2), vec![false, true], 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(3), 2)); - assert_ok!(Council::set_approvals(Origin::signed(3), vec![false, false, true], 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(4), 3)); - assert_ok!(Council::set_approvals(Origin::signed(4), vec![false, false, false, true], 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(5), 4)); - assert_ok!(Council::set_approvals(Origin::signed(5), vec![false, false, false, false, true], 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Council::present_winner(Origin::signed(4), 2, 20, 0)); - assert_ok!(Council::present_winner(Origin::signed(4), 1, 60, 0)); - assert_ok!(Council::present_winner(Origin::signed(4), 3, 30, 0)); - assert_ok!(Council::present_winner(Origin::signed(4), 4, 40, 0)); - assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 0)); - - assert_eq!(Council::leaderboard(), Some(vec![ - (30, 3), - (40, 4), - (50, 5), - (60, 1) - ])); - }); - } - - #[test] - fn present_outside_of_presentation_period_should_not_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(4); - assert!(!Council::presentation_active()); - assert_noop!(Council::present_winner(Origin::signed(5), 5, 1, 0), "cannot present outside of presentation period"); - }); - } - - #[test] - fn present_with_invalid_vote_index_should_not_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(4); - assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(5), 1)); - assert_ok!(Council::set_approvals(Origin::signed(2), vec![true, false], 0)); - assert_ok!(Council::set_approvals(Origin::signed(5), vec![false, true], 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(6); - assert_noop!(Council::present_winner(Origin::signed(4), 2, 20, 1), "index not current"); - }); - } - - #[test] - fn present_when_presenter_is_poor_should_not_work() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(4); - assert!(!Council::presentation_active()); - - assert_ok!(Council::submit_candidacy(Origin::signed(1), 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(5), 1)); - assert_ok!(Council::set_approvals(Origin::signed(2), vec![true, false], 0)); - assert_ok!(Council::set_approvals(Origin::signed(5), vec![false, true], 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(6); - assert_eq!(Balances::free_balance(&1), 1); - assert_eq!(Balances::reserved_balance(&1), 9); - assert_noop!(Council::present_winner(Origin::signed(1), 1, 20, 0), "presenter must have sufficient slashable funds"); - }); - } - - #[test] - fn invalid_present_tally_should_slash() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(4); - assert!(!Council::presentation_active()); - assert_eq!(Balances::total_balance(&4), 40); - - assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(5), 1)); - assert_ok!(Council::set_approvals(Origin::signed(2), vec![true, false], 0)); - assert_ok!(Council::set_approvals(Origin::signed(5), vec![false, true], 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(6); - assert_err!(Council::present_winner(Origin::signed(4), 2, 80, 0), "incorrect total"); - - assert_eq!(Balances::total_balance(&4), 38); - }); - } - - #[test] - fn runners_up_should_be_kept() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(4); - assert!(!Council::presentation_active()); - - assert_ok!(Council::submit_candidacy(Origin::signed(1), 0)); - assert_ok!(Council::set_approvals(Origin::signed(6), vec![true], 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(2), 1)); - assert_ok!(Council::set_approvals(Origin::signed(2), vec![false, true], 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(3), 2)); - assert_ok!(Council::set_approvals(Origin::signed(3), vec![false, false, true], 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(4), 3)); - assert_ok!(Council::set_approvals(Origin::signed(4), vec![false, false, false, true], 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(5), 4)); - assert_ok!(Council::set_approvals(Origin::signed(5), vec![false, false, false, false, true], 0)); - - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(6); - assert!(Council::presentation_active()); - assert_ok!(Council::present_winner(Origin::signed(4), 1, 60, 0)); - // leaderboard length is the empty seats plus the carry count (i.e. 5 + 2), where those - // to be carried are the lowest and stored in lowest indexes - assert_eq!(Council::leaderboard(), Some(vec![ - (0, 0), - (0, 0), - (0, 0), - (60, 1) - ])); - assert_ok!(Council::present_winner(Origin::signed(4), 3, 30, 0)); - assert_ok!(Council::present_winner(Origin::signed(4), 4, 40, 0)); - assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 0)); - assert_eq!(Council::leaderboard(), Some(vec![ - (30, 3), - (40, 4), - (50, 5), - (60, 1) - ])); - - assert_ok!(Council::end_block(System::block_number())); - - assert!(!Council::presentation_active()); - assert_eq!(Council::active_council(), vec![(1, 11), (5, 11)]); - - assert!(!Council::is_a_candidate(&1)); - assert!(!Council::is_a_candidate(&5)); - assert!(!Council::is_a_candidate(&2)); - assert!(Council::is_a_candidate(&3)); - assert!(Council::is_a_candidate(&4)); - assert_eq!(Council::vote_index(), 1); - assert_eq!(Council::voter_last_active(2), Some(0)); - assert_eq!(Council::voter_last_active(3), Some(0)); - assert_eq!(Council::voter_last_active(4), Some(0)); - assert_eq!(Council::voter_last_active(5), Some(0)); - assert_eq!(Council::voter_last_active(6), Some(0)); - assert_eq!(Council::candidate_reg_info(3), Some((0, 2))); - assert_eq!(Council::candidate_reg_info(4), Some((0, 3))); - }); - } - - #[test] - fn second_tally_should_use_runners_up() { - with_externalities(&mut new_test_ext(false), || { - System::set_block_number(4); - assert_ok!(Council::submit_candidacy(Origin::signed(1), 0)); - assert_ok!(Council::set_approvals(Origin::signed(6), vec![true], 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(2), 1)); - assert_ok!(Council::set_approvals(Origin::signed(2), vec![false, true], 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(3), 2)); - assert_ok!(Council::set_approvals(Origin::signed(3), vec![false, false, true], 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(4), 3)); - assert_ok!(Council::set_approvals(Origin::signed(4), vec![false, false, false, true], 0)); - assert_ok!(Council::submit_candidacy(Origin::signed(5), 4)); - assert_ok!(Council::set_approvals(Origin::signed(5), vec![false, false, false, false, true], 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Council::present_winner(Origin::signed(4), 1, 60, 0)); - assert_ok!(Council::present_winner(Origin::signed(4), 3, 30, 0)); - assert_ok!(Council::present_winner(Origin::signed(4), 4, 40, 0)); - assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 0)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(8); - assert_ok!(Council::set_approvals(Origin::signed(6), vec![false, false, true, false], 1)); - assert_ok!(Council::set_desired_seats(3)); - assert_ok!(Council::end_block(System::block_number())); - - System::set_block_number(10); - assert_ok!(Council::present_winner(Origin::signed(4), 3, 90, 1)); - assert_ok!(Council::present_winner(Origin::signed(4), 4, 40, 1)); - assert_ok!(Council::end_block(System::block_number())); - - assert!(!Council::presentation_active()); - assert_eq!(Council::active_council(), vec![(1, 11), (5, 11), (3, 15)]); - - assert!(!Council::is_a_candidate(&1)); - assert!(!Council::is_a_candidate(&2)); - assert!(!Council::is_a_candidate(&3)); - assert!(!Council::is_a_candidate(&5)); - assert!(Council::is_a_candidate(&4)); - assert_eq!(Council::vote_index(), 2); - assert_eq!(Council::voter_last_active(2), Some(0)); - assert_eq!(Council::voter_last_active(3), Some(0)); - assert_eq!(Council::voter_last_active(4), Some(0)); - assert_eq!(Council::voter_last_active(5), Some(0)); - assert_eq!(Council::voter_last_active(6), Some(1)); - - assert_eq!(Council::candidate_reg_info(4), Some((0, 3))); - }); - } + use super::*; + use crate::tests::*; + use srml_support::{assert_err, assert_noop, assert_ok}; + + #[test] + fn params_should_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(1); + assert_eq!(Council::next_vote_from(1), 4); + assert_eq!(Council::next_vote_from(4), 4); + assert_eq!(Council::next_vote_from(5), 8); + assert_eq!(Council::vote_index(), 0); + assert_eq!(Council::candidacy_bond(), 9); + assert_eq!(Council::voting_bond(), 3); + assert_eq!(Council::present_slash_per_voter(), 1); + assert_eq!(Council::presentation_duration(), 2); + assert_eq!(Council::inactivity_grace_period(), 1); + assert_eq!(Council::voting_period(), 4); + assert_eq!(Council::term_duration(), 5); + assert_eq!(Council::desired_seats(), 2); + assert_eq!(Council::carry_count(), 2); + + assert_eq!(Council::active_council(), vec![]); + assert_eq!(Council::next_tally(), Some(4)); + assert_eq!(Council::presentation_active(), false); + assert_eq!(Council::next_finalize(), None); + + assert_eq!(Council::candidates(), Vec::::new()); + assert_eq!(Council::is_a_candidate(&1), false); + assert_eq!(Council::candidate_reg_info(1), None); + + assert_eq!(Council::voters(), Vec::::new()); + assert_eq!(Council::voter_last_active(1), None); + assert_eq!(Council::approvals_of(1), vec![]); + }); + } + + #[test] + fn simple_candidate_submission_should_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(1); + assert_eq!(Council::candidates(), Vec::::new()); + assert_eq!(Council::candidate_reg_info(1), None); + assert_eq!(Council::candidate_reg_info(2), None); + assert_eq!(Council::is_a_candidate(&1), false); + assert_eq!(Council::is_a_candidate(&2), false); + + assert_ok!(Council::submit_candidacy(Origin::signed(1), 0)); + assert_eq!(Council::candidates(), vec![1]); + assert_eq!(Council::candidate_reg_info(1), Some((0, 0))); + assert_eq!(Council::candidate_reg_info(2), None); + assert_eq!(Council::is_a_candidate(&1), true); + assert_eq!(Council::is_a_candidate(&2), false); + + assert_ok!(Council::submit_candidacy(Origin::signed(2), 1)); + assert_eq!(Council::candidates(), vec![1, 2]); + assert_eq!(Council::candidate_reg_info(1), Some((0, 0))); + assert_eq!(Council::candidate_reg_info(2), Some((0, 1))); + assert_eq!(Council::is_a_candidate(&1), true); + assert_eq!(Council::is_a_candidate(&2), true); + }); + } + + fn new_test_ext_with_candidate_holes() -> runtime_io::TestExternalities { + let mut t = new_test_ext(false); + with_externalities(&mut t, || { + >::put(vec![0, 0, 1]); + >::put(1); + >::insert(1, (0, 2)); + }); + t + } + + #[test] + fn candidate_submission_using_free_slot_should_work() { + let mut t = new_test_ext_with_candidate_holes(); + + with_externalities(&mut t, || { + System::set_block_number(1); + assert_eq!(Council::candidates(), vec![0, 0, 1]); + + assert_ok!(Council::submit_candidacy(Origin::signed(2), 1)); + assert_eq!(Council::candidates(), vec![0, 2, 1]); + + assert_ok!(Council::submit_candidacy(Origin::signed(3), 0)); + assert_eq!(Council::candidates(), vec![3, 2, 1]); + }); + } + + #[test] + fn candidate_submission_using_alternative_free_slot_should_work() { + let mut t = new_test_ext_with_candidate_holes(); + + with_externalities(&mut t, || { + System::set_block_number(1); + assert_eq!(Council::candidates(), vec![0, 0, 1]); + + assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); + assert_eq!(Council::candidates(), vec![2, 0, 1]); + + assert_ok!(Council::submit_candidacy(Origin::signed(3), 1)); + assert_eq!(Council::candidates(), vec![2, 3, 1]); + }); + } + + #[test] + fn candidate_submission_not_using_free_slot_should_not_work() { + with_externalities(&mut new_test_ext_with_candidate_holes(), || { + System::set_block_number(1); + assert_noop!( + Council::submit_candidacy(Origin::signed(4), 3), + "invalid candidate slot" + ); + }); + } + + #[test] + fn bad_candidate_slot_submission_should_not_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(1); + assert_eq!(Council::candidates(), Vec::::new()); + assert_noop!( + Council::submit_candidacy(Origin::signed(1), 1), + "invalid candidate slot" + ); + }); + } + + #[test] + fn non_free_candidate_slot_submission_should_not_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(1); + assert_eq!(Council::candidates(), Vec::::new()); + assert_ok!(Council::submit_candidacy(Origin::signed(1), 0)); + assert_eq!(Council::candidates(), vec![1]); + assert_noop!( + Council::submit_candidacy(Origin::signed(2), 0), + "invalid candidate slot" + ); + }); + } + + #[test] + fn dupe_candidate_submission_should_not_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(1); + assert_eq!(Council::candidates(), Vec::::new()); + assert_ok!(Council::submit_candidacy(Origin::signed(1), 0)); + assert_eq!(Council::candidates(), vec![1]); + assert_noop!( + Council::submit_candidacy(Origin::signed(1), 1), + "duplicate candidate submission" + ); + }); + } + + #[test] + fn poor_candidate_submission_should_not_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(1); + assert_eq!(Council::candidates(), Vec::::new()); + assert_noop!( + Council::submit_candidacy(Origin::signed(7), 0), + "candidate has not enough funds" + ); + }); + } + + #[test] + fn voting_should_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(1); + + assert_ok!(Council::submit_candidacy(Origin::signed(5), 0)); + + assert_ok!(Council::set_approvals(Origin::signed(1), vec![true], 0)); + assert_ok!(Council::set_approvals(Origin::signed(4), vec![true], 0)); + + assert_eq!(Council::approvals_of(1), vec![true]); + assert_eq!(Council::approvals_of(4), vec![true]); + assert_eq!(Council::voters(), vec![1, 4]); + + assert_ok!(Council::submit_candidacy(Origin::signed(2), 1)); + assert_ok!(Council::submit_candidacy(Origin::signed(3), 2)); + + assert_ok!(Council::set_approvals( + Origin::signed(2), + vec![false, true, true], + 0 + )); + assert_ok!(Council::set_approvals( + Origin::signed(3), + vec![false, true, true], + 0 + )); + + assert_eq!(Council::approvals_of(1), vec![true]); + assert_eq!(Council::approvals_of(4), vec![true]); + assert_eq!(Council::approvals_of(2), vec![false, true, true]); + assert_eq!(Council::approvals_of(3), vec![false, true, true]); + + assert_eq!(Council::voters(), vec![1, 4, 2, 3]); + }); + } + + #[test] + fn proxy_voting_should_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(1); + + assert_ok!(Council::submit_candidacy(Origin::signed(5), 0)); + + Democracy::force_proxy(1, 11); + Democracy::force_proxy(2, 12); + Democracy::force_proxy(3, 13); + Democracy::force_proxy(4, 14); + + assert_ok!(Council::proxy_set_approvals( + Origin::signed(11), + vec![true], + 0 + )); + assert_ok!(Council::proxy_set_approvals( + Origin::signed(14), + vec![true], + 0 + )); + + assert_eq!(Council::approvals_of(1), vec![true]); + assert_eq!(Council::approvals_of(4), vec![true]); + assert_eq!(Council::voters(), vec![1, 4]); + + assert_ok!(Council::submit_candidacy(Origin::signed(2), 1)); + assert_ok!(Council::submit_candidacy(Origin::signed(3), 2)); + + assert_ok!(Council::proxy_set_approvals( + Origin::signed(12), + vec![false, true, true], + 0 + )); + assert_ok!(Council::proxy_set_approvals( + Origin::signed(13), + vec![false, true, true], + 0 + )); + + assert_eq!(Council::approvals_of(1), vec![true]); + assert_eq!(Council::approvals_of(4), vec![true]); + assert_eq!(Council::approvals_of(2), vec![false, true, true]); + assert_eq!(Council::approvals_of(3), vec![false, true, true]); + + assert_eq!(Council::voters(), vec![1, 4, 2, 3]); + }); + } + + #[test] + fn setting_any_approval_vote_count_without_any_candidate_count_should_not_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(1); + + assert_eq!(Council::candidates().len(), 0); + + assert_noop!( + Council::set_approvals(Origin::signed(4), vec![], 0), + "amount of candidates to receive approval votes should be non-zero" + ); + }); + } + + #[test] + fn setting_an_approval_vote_count_more_than_candidate_count_should_not_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(1); + + assert_ok!(Council::submit_candidacy(Origin::signed(5), 0)); + assert_eq!(Council::candidates().len(), 1); + + assert_noop!( + Council::set_approvals(Origin::signed(4), vec![true, true], 0), + "amount of candidate approval votes cannot exceed amount of candidates" + ); + }); + } + + #[test] + fn resubmitting_voting_should_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(1); + + assert_ok!(Council::submit_candidacy(Origin::signed(5), 0)); + assert_ok!(Council::set_approvals(Origin::signed(4), vec![true], 0)); + + assert_eq!(Council::approvals_of(4), vec![true]); + + assert_ok!(Council::submit_candidacy(Origin::signed(2), 1)); + assert_ok!(Council::submit_candidacy(Origin::signed(3), 2)); + assert_eq!(Council::candidates().len(), 3); + assert_ok!(Council::set_approvals( + Origin::signed(4), + vec![true, false, true], + 0 + )); + + assert_eq!(Council::approvals_of(4), vec![true, false, true]); + }); + } + + #[test] + fn retracting_voter_should_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(1); + + assert_ok!(Council::submit_candidacy(Origin::signed(5), 0)); + assert_ok!(Council::submit_candidacy(Origin::signed(2), 1)); + assert_ok!(Council::submit_candidacy(Origin::signed(3), 2)); + assert_eq!(Council::candidates().len(), 3); + + assert_ok!(Council::set_approvals(Origin::signed(1), vec![true], 0)); + assert_ok!(Council::set_approvals( + Origin::signed(2), + vec![false, true, true], + 0 + )); + assert_ok!(Council::set_approvals( + Origin::signed(3), + vec![false, true, true], + 0 + )); + assert_ok!(Council::set_approvals( + Origin::signed(4), + vec![true, false, true], + 0 + )); + + assert_eq!(Council::voters(), vec![1, 2, 3, 4]); + assert_eq!(Council::approvals_of(1), vec![true]); + assert_eq!(Council::approvals_of(2), vec![false, true, true]); + assert_eq!(Council::approvals_of(3), vec![false, true, true]); + assert_eq!(Council::approvals_of(4), vec![true, false, true]); + + assert_ok!(Council::retract_voter(Origin::signed(1), 0)); + + assert_eq!(Council::voters(), vec![4, 2, 3]); + assert_eq!(Council::approvals_of(1), Vec::::new()); + assert_eq!(Council::approvals_of(2), vec![false, true, true]); + assert_eq!(Council::approvals_of(3), vec![false, true, true]); + assert_eq!(Council::approvals_of(4), vec![true, false, true]); + + assert_ok!(Council::retract_voter(Origin::signed(2), 1)); + + assert_eq!(Council::voters(), vec![4, 3]); + assert_eq!(Council::approvals_of(1), Vec::::new()); + assert_eq!(Council::approvals_of(2), Vec::::new()); + assert_eq!(Council::approvals_of(3), vec![false, true, true]); + assert_eq!(Council::approvals_of(4), vec![true, false, true]); + + assert_ok!(Council::retract_voter(Origin::signed(3), 1)); + + assert_eq!(Council::voters(), vec![4]); + assert_eq!(Council::approvals_of(1), Vec::::new()); + assert_eq!(Council::approvals_of(2), Vec::::new()); + assert_eq!(Council::approvals_of(3), Vec::::new()); + assert_eq!(Council::approvals_of(4), vec![true, false, true]); + }); + } + + #[test] + fn invalid_retraction_index_should_not_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(1); + assert_ok!(Council::submit_candidacy(Origin::signed(3), 0)); + assert_ok!(Council::set_approvals(Origin::signed(1), vec![true], 0)); + assert_ok!(Council::set_approvals(Origin::signed(2), vec![true], 0)); + assert_eq!(Council::voters(), vec![1, 2]); + assert_noop!( + Council::retract_voter(Origin::signed(1), 1), + "retraction index mismatch" + ); + }); + } + + #[test] + fn overflow_retraction_index_should_not_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(1); + assert_ok!(Council::submit_candidacy(Origin::signed(3), 0)); + assert_ok!(Council::set_approvals(Origin::signed(1), vec![true], 0)); + assert_noop!( + Council::retract_voter(Origin::signed(1), 1), + "retraction index invalid" + ); + }); + } + + #[test] + fn non_voter_retraction_should_not_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(1); + assert_ok!(Council::submit_candidacy(Origin::signed(3), 0)); + assert_ok!(Council::set_approvals(Origin::signed(1), vec![true], 0)); + assert_noop!( + Council::retract_voter(Origin::signed(2), 0), + "cannot retract non-voter" + ); + }); + } + + #[test] + fn simple_tally_should_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(4); + assert!(!Council::presentation_active()); + + assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Council::submit_candidacy(Origin::signed(5), 1)); + assert_ok!(Council::set_approvals( + Origin::signed(2), + vec![true, false], + 0 + )); + assert_ok!(Council::set_approvals( + Origin::signed(5), + vec![false, true], + 0 + )); + assert_eq!(Council::voters(), vec![2, 5]); + assert_eq!(Council::approvals_of(2), vec![true, false]); + assert_eq!(Council::approvals_of(5), vec![false, true]); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(6); + assert!(Council::presentation_active()); + assert_eq!(Council::present_winner(Origin::signed(4), 2, 20, 0), Ok(())); + assert_eq!(Council::present_winner(Origin::signed(4), 5, 50, 0), Ok(())); + assert_eq!( + Council::leaderboard(), + Some(vec![(0, 0), (0, 0), (20, 2), (50, 5)]) + ); + + assert_ok!(Council::end_block(System::block_number())); + + assert!(!Council::presentation_active()); + assert_eq!(Council::active_council(), vec![(5, 11), (2, 11)]); + + assert!(!Council::is_a_candidate(&2)); + assert!(!Council::is_a_candidate(&5)); + assert_eq!(Council::vote_index(), 1); + assert_eq!(Council::voter_last_active(2), Some(0)); + assert_eq!(Council::voter_last_active(5), Some(0)); + }); + } + + #[test] + fn presentations_with_zero_staked_deposit_should_not_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(4); + assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Council::set_approvals(Origin::signed(2), vec![true], 0)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(6); + assert_noop!( + Council::present_winner(Origin::signed(4), 2, 0, 0), + "stake deposited to present winner and be added to leaderboard should be non-zero" + ); + }); + } + + #[test] + fn double_presentations_should_be_punished() { + with_externalities(&mut new_test_ext(false), || { + assert!(Balances::can_slash(&4, 10)); + + System::set_block_number(4); + assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Council::submit_candidacy(Origin::signed(5), 1)); + assert_ok!(Council::set_approvals( + Origin::signed(2), + vec![true, false], + 0 + )); + assert_ok!(Council::set_approvals( + Origin::signed(5), + vec![false, true], + 0 + )); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Council::present_winner(Origin::signed(4), 2, 20, 0)); + assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 0)); + assert_eq!( + Council::present_winner(Origin::signed(4), 5, 50, 0), + Err("duplicate presentation") + ); + assert_ok!(Council::end_block(System::block_number())); + + assert_eq!(Council::active_council(), vec![(5, 11), (2, 11)]); + assert_eq!(Balances::total_balance(&4), 38); + }); + } + + #[test] + fn retracting_inactive_voter_should_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(4); + assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Council::set_approvals(Origin::signed(2), vec![true], 0)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Council::present_winner(Origin::signed(4), 2, 20, 0)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(8); + assert_ok!(Council::submit_candidacy(Origin::signed(5), 0)); + assert_ok!(Council::set_approvals(Origin::signed(5), vec![true], 1)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(10); + assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 1)); + assert_ok!(Council::end_block(System::block_number())); + + assert_ok!(Council::reap_inactive_voter( + Origin::signed(5), + (Council::voters().iter().position(|&i| i == 5).unwrap() as u32).into(), + 2, + (Council::voters().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2 + )); + + assert_eq!(Council::voters(), vec![5]); + assert_eq!(Council::approvals_of(2).len(), 0); + assert_eq!(Balances::total_balance(&2), 17); + assert_eq!(Balances::total_balance(&5), 53); + }); + } + + #[test] + fn presenting_for_double_election_should_not_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(4); + assert_eq!(Council::submit_candidacy(Origin::signed(2), 0), Ok(())); + assert_ok!(Council::set_approvals(Origin::signed(2), vec![true], 0)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Council::present_winner(Origin::signed(4), 2, 20, 0)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(8); + assert_eq!(Council::submit_candidacy(Origin::signed(2), 0), Ok(())); + assert_ok!(Council::set_approvals(Origin::signed(2), vec![true], 1)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(10); + assert_noop!( + Council::present_winner(Origin::signed(4), 2, 20, 1), + "candidate must not form a duplicated member if elected" + ); + }); + } + + #[test] + fn retracting_inactive_voter_with_other_candidates_in_slots_should_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(4); + assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Council::set_approvals(Origin::signed(2), vec![true], 0)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Council::present_winner(Origin::signed(4), 2, 20, 0)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(8); + assert_ok!(Council::submit_candidacy(Origin::signed(5), 0)); + assert_ok!(Council::set_approvals(Origin::signed(5), vec![true], 1)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(10); + assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 1)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(11); + assert_ok!(Council::submit_candidacy(Origin::signed(1), 0)); + + assert_ok!(Council::reap_inactive_voter( + Origin::signed(5), + (Council::voters().iter().position(|&i| i == 5).unwrap() as u32).into(), + 2, + (Council::voters().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2 + )); + + assert_eq!(Council::voters(), vec![5]); + assert_eq!(Council::approvals_of(2).len(), 0); + assert_eq!(Balances::total_balance(&2), 17); + assert_eq!(Balances::total_balance(&5), 53); + }); + } + + #[test] + fn retracting_inactive_voter_with_bad_reporter_index_should_not_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(4); + assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Council::set_approvals(Origin::signed(2), vec![true], 0)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Council::present_winner(Origin::signed(4), 2, 20, 0)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(8); + assert_ok!(Council::submit_candidacy(Origin::signed(5), 0)); + assert_ok!(Council::set_approvals(Origin::signed(5), vec![true], 1)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(10); + assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 1)); + assert_ok!(Council::end_block(System::block_number())); + + assert_noop!( + Council::reap_inactive_voter( + Origin::signed(2), + 42, + 2, + (Council::voters().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2 + ), + "bad reporter index" + ); + }); + } + + #[test] + fn retracting_inactive_voter_with_bad_target_index_should_not_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(4); + assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Council::set_approvals(Origin::signed(2), vec![true], 0)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Council::present_winner(Origin::signed(4), 2, 20, 0)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(8); + assert_ok!(Council::submit_candidacy(Origin::signed(5), 0)); + assert_ok!(Council::set_approvals(Origin::signed(5), vec![true], 1)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(10); + assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 1)); + assert_ok!(Council::end_block(System::block_number())); + + assert_noop!( + Council::reap_inactive_voter( + Origin::signed(2), + (Council::voters().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2, + 42, + 2 + ), + "bad target index" + ); + }); + } + + #[test] + fn attempting_to_retract_active_voter_should_slash_reporter() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(4); + assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Council::submit_candidacy(Origin::signed(3), 1)); + assert_ok!(Council::submit_candidacy(Origin::signed(4), 2)); + assert_ok!(Council::submit_candidacy(Origin::signed(5), 3)); + assert_ok!(Council::set_approvals( + Origin::signed(2), + vec![true, false, false, false], + 0 + )); + assert_ok!(Council::set_approvals( + Origin::signed(3), + vec![false, true, false, false], + 0 + )); + assert_ok!(Council::set_approvals( + Origin::signed(4), + vec![false, false, true, false], + 0 + )); + assert_ok!(Council::set_approvals( + Origin::signed(5), + vec![false, false, false, true], + 0 + )); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Council::present_winner(Origin::signed(4), 2, 20, 0)); + assert_ok!(Council::present_winner(Origin::signed(4), 3, 30, 0)); + assert_ok!(Council::present_winner(Origin::signed(4), 4, 40, 0)); + assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 0)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(8); + assert_ok!(Council::set_desired_seats(3)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(10); + assert_ok!(Council::present_winner(Origin::signed(4), 2, 20, 1)); + assert_ok!(Council::present_winner(Origin::signed(4), 3, 30, 1)); + assert_ok!(Council::end_block(System::block_number())); + + assert_eq!(Council::vote_index(), 2); + assert_eq!(Council::inactivity_grace_period(), 1); + assert_eq!(Council::voting_period(), 4); + assert_eq!(Council::voter_last_active(4), Some(0)); + + assert_ok!(Council::reap_inactive_voter( + Origin::signed(4), + (Council::voters().iter().position(|&i| i == 4).unwrap() as u32).into(), + 2, + (Council::voters().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2 + )); + + assert_eq!(Council::voters(), vec![2, 3, 5]); + assert_eq!(Council::approvals_of(4).len(), 0); + assert_eq!(Balances::total_balance(&4), 37); + }); + } + + #[test] + fn attempting_to_retract_inactive_voter_by_nonvoter_should_not_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(4); + assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Council::set_approvals(Origin::signed(2), vec![true], 0)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Council::present_winner(Origin::signed(4), 2, 20, 0)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(8); + assert_ok!(Council::submit_candidacy(Origin::signed(5), 0)); + assert_ok!(Council::set_approvals(Origin::signed(5), vec![true], 1)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(10); + assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 1)); + assert_ok!(Council::end_block(System::block_number())); + + assert_noop!( + Council::reap_inactive_voter( + Origin::signed(4), + 0, + 2, + (Council::voters().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2 + ), + "reporter must be a voter" + ); + }); + } + + #[test] + fn presenting_loser_should_not_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(4); + assert_ok!(Council::submit_candidacy(Origin::signed(1), 0)); + assert_ok!(Council::set_approvals(Origin::signed(6), vec![true], 0)); + assert_ok!(Council::submit_candidacy(Origin::signed(2), 1)); + assert_ok!(Council::set_approvals( + Origin::signed(2), + vec![false, true], + 0 + )); + assert_ok!(Council::submit_candidacy(Origin::signed(3), 2)); + assert_ok!(Council::set_approvals( + Origin::signed(3), + vec![false, false, true], + 0 + )); + assert_ok!(Council::submit_candidacy(Origin::signed(4), 3)); + assert_ok!(Council::set_approvals( + Origin::signed(4), + vec![false, false, false, true], + 0 + )); + assert_ok!(Council::submit_candidacy(Origin::signed(5), 4)); + assert_ok!(Council::set_approvals( + Origin::signed(5), + vec![false, false, false, false, true], + 0 + )); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Council::present_winner(Origin::signed(4), 1, 60, 0)); + assert_ok!(Council::present_winner(Origin::signed(4), 3, 30, 0)); + assert_ok!(Council::present_winner(Origin::signed(4), 4, 40, 0)); + assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 0)); + + assert_eq!( + Council::leaderboard(), + Some(vec![(30, 3), (40, 4), (50, 5), (60, 1)]) + ); + + assert_noop!( + Council::present_winner(Origin::signed(4), 2, 20, 0), + "candidate not worthy of leaderboard" + ); + }); + } + + #[test] + fn presenting_loser_first_should_not_matter() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(4); + assert_ok!(Council::submit_candidacy(Origin::signed(1), 0)); + assert_ok!(Council::set_approvals(Origin::signed(6), vec![true], 0)); + assert_ok!(Council::submit_candidacy(Origin::signed(2), 1)); + assert_ok!(Council::set_approvals( + Origin::signed(2), + vec![false, true], + 0 + )); + assert_ok!(Council::submit_candidacy(Origin::signed(3), 2)); + assert_ok!(Council::set_approvals( + Origin::signed(3), + vec![false, false, true], + 0 + )); + assert_ok!(Council::submit_candidacy(Origin::signed(4), 3)); + assert_ok!(Council::set_approvals( + Origin::signed(4), + vec![false, false, false, true], + 0 + )); + assert_ok!(Council::submit_candidacy(Origin::signed(5), 4)); + assert_ok!(Council::set_approvals( + Origin::signed(5), + vec![false, false, false, false, true], + 0 + )); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Council::present_winner(Origin::signed(4), 2, 20, 0)); + assert_ok!(Council::present_winner(Origin::signed(4), 1, 60, 0)); + assert_ok!(Council::present_winner(Origin::signed(4), 3, 30, 0)); + assert_ok!(Council::present_winner(Origin::signed(4), 4, 40, 0)); + assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 0)); + + assert_eq!( + Council::leaderboard(), + Some(vec![(30, 3), (40, 4), (50, 5), (60, 1)]) + ); + }); + } + + #[test] + fn present_outside_of_presentation_period_should_not_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(4); + assert!(!Council::presentation_active()); + assert_noop!( + Council::present_winner(Origin::signed(5), 5, 1, 0), + "cannot present outside of presentation period" + ); + }); + } + + #[test] + fn present_with_invalid_vote_index_should_not_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(4); + assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Council::submit_candidacy(Origin::signed(5), 1)); + assert_ok!(Council::set_approvals( + Origin::signed(2), + vec![true, false], + 0 + )); + assert_ok!(Council::set_approvals( + Origin::signed(5), + vec![false, true], + 0 + )); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(6); + assert_noop!( + Council::present_winner(Origin::signed(4), 2, 20, 1), + "index not current" + ); + }); + } + + #[test] + fn present_when_presenter_is_poor_should_not_work() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(4); + assert!(!Council::presentation_active()); + + assert_ok!(Council::submit_candidacy(Origin::signed(1), 0)); + assert_ok!(Council::submit_candidacy(Origin::signed(5), 1)); + assert_ok!(Council::set_approvals( + Origin::signed(2), + vec![true, false], + 0 + )); + assert_ok!(Council::set_approvals( + Origin::signed(5), + vec![false, true], + 0 + )); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(6); + assert_eq!(Balances::free_balance(&1), 1); + assert_eq!(Balances::reserved_balance(&1), 9); + assert_noop!( + Council::present_winner(Origin::signed(1), 1, 20, 0), + "presenter must have sufficient slashable funds" + ); + }); + } + + #[test] + fn invalid_present_tally_should_slash() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(4); + assert!(!Council::presentation_active()); + assert_eq!(Balances::total_balance(&4), 40); + + assert_ok!(Council::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Council::submit_candidacy(Origin::signed(5), 1)); + assert_ok!(Council::set_approvals( + Origin::signed(2), + vec![true, false], + 0 + )); + assert_ok!(Council::set_approvals( + Origin::signed(5), + vec![false, true], + 0 + )); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(6); + assert_err!( + Council::present_winner(Origin::signed(4), 2, 80, 0), + "incorrect total" + ); + + assert_eq!(Balances::total_balance(&4), 38); + }); + } + + #[test] + fn runners_up_should_be_kept() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(4); + assert!(!Council::presentation_active()); + + assert_ok!(Council::submit_candidacy(Origin::signed(1), 0)); + assert_ok!(Council::set_approvals(Origin::signed(6), vec![true], 0)); + assert_ok!(Council::submit_candidacy(Origin::signed(2), 1)); + assert_ok!(Council::set_approvals( + Origin::signed(2), + vec![false, true], + 0 + )); + assert_ok!(Council::submit_candidacy(Origin::signed(3), 2)); + assert_ok!(Council::set_approvals( + Origin::signed(3), + vec![false, false, true], + 0 + )); + assert_ok!(Council::submit_candidacy(Origin::signed(4), 3)); + assert_ok!(Council::set_approvals( + Origin::signed(4), + vec![false, false, false, true], + 0 + )); + assert_ok!(Council::submit_candidacy(Origin::signed(5), 4)); + assert_ok!(Council::set_approvals( + Origin::signed(5), + vec![false, false, false, false, true], + 0 + )); + + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(6); + assert!(Council::presentation_active()); + assert_ok!(Council::present_winner(Origin::signed(4), 1, 60, 0)); + // leaderboard length is the empty seats plus the carry count (i.e. 5 + 2), where those + // to be carried are the lowest and stored in lowest indexes + assert_eq!( + Council::leaderboard(), + Some(vec![(0, 0), (0, 0), (0, 0), (60, 1)]) + ); + assert_ok!(Council::present_winner(Origin::signed(4), 3, 30, 0)); + assert_ok!(Council::present_winner(Origin::signed(4), 4, 40, 0)); + assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 0)); + assert_eq!( + Council::leaderboard(), + Some(vec![(30, 3), (40, 4), (50, 5), (60, 1)]) + ); + + assert_ok!(Council::end_block(System::block_number())); + + assert!(!Council::presentation_active()); + assert_eq!(Council::active_council(), vec![(1, 11), (5, 11)]); + + assert!(!Council::is_a_candidate(&1)); + assert!(!Council::is_a_candidate(&5)); + assert!(!Council::is_a_candidate(&2)); + assert!(Council::is_a_candidate(&3)); + assert!(Council::is_a_candidate(&4)); + assert_eq!(Council::vote_index(), 1); + assert_eq!(Council::voter_last_active(2), Some(0)); + assert_eq!(Council::voter_last_active(3), Some(0)); + assert_eq!(Council::voter_last_active(4), Some(0)); + assert_eq!(Council::voter_last_active(5), Some(0)); + assert_eq!(Council::voter_last_active(6), Some(0)); + assert_eq!(Council::candidate_reg_info(3), Some((0, 2))); + assert_eq!(Council::candidate_reg_info(4), Some((0, 3))); + }); + } + + #[test] + fn second_tally_should_use_runners_up() { + with_externalities(&mut new_test_ext(false), || { + System::set_block_number(4); + assert_ok!(Council::submit_candidacy(Origin::signed(1), 0)); + assert_ok!(Council::set_approvals(Origin::signed(6), vec![true], 0)); + assert_ok!(Council::submit_candidacy(Origin::signed(2), 1)); + assert_ok!(Council::set_approvals( + Origin::signed(2), + vec![false, true], + 0 + )); + assert_ok!(Council::submit_candidacy(Origin::signed(3), 2)); + assert_ok!(Council::set_approvals( + Origin::signed(3), + vec![false, false, true], + 0 + )); + assert_ok!(Council::submit_candidacy(Origin::signed(4), 3)); + assert_ok!(Council::set_approvals( + Origin::signed(4), + vec![false, false, false, true], + 0 + )); + assert_ok!(Council::submit_candidacy(Origin::signed(5), 4)); + assert_ok!(Council::set_approvals( + Origin::signed(5), + vec![false, false, false, false, true], + 0 + )); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Council::present_winner(Origin::signed(4), 1, 60, 0)); + assert_ok!(Council::present_winner(Origin::signed(4), 3, 30, 0)); + assert_ok!(Council::present_winner(Origin::signed(4), 4, 40, 0)); + assert_ok!(Council::present_winner(Origin::signed(4), 5, 50, 0)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(8); + assert_ok!(Council::set_approvals( + Origin::signed(6), + vec![false, false, true, false], + 1 + )); + assert_ok!(Council::set_desired_seats(3)); + assert_ok!(Council::end_block(System::block_number())); + + System::set_block_number(10); + assert_ok!(Council::present_winner(Origin::signed(4), 3, 90, 1)); + assert_ok!(Council::present_winner(Origin::signed(4), 4, 40, 1)); + assert_ok!(Council::end_block(System::block_number())); + + assert!(!Council::presentation_active()); + assert_eq!(Council::active_council(), vec![(1, 11), (5, 11), (3, 15)]); + + assert!(!Council::is_a_candidate(&1)); + assert!(!Council::is_a_candidate(&2)); + assert!(!Council::is_a_candidate(&3)); + assert!(!Council::is_a_candidate(&5)); + assert!(Council::is_a_candidate(&4)); + assert_eq!(Council::vote_index(), 2); + assert_eq!(Council::voter_last_active(2), Some(0)); + assert_eq!(Council::voter_last_active(3), Some(0)); + assert_eq!(Council::voter_last_active(4), Some(0)); + assert_eq!(Council::voter_last_active(5), Some(0)); + assert_eq!(Council::voter_last_active(6), Some(1)); + + assert_eq!(Council::candidate_reg_info(4), Some((0, 3))); + }); + } } diff --git a/srml/council/src/voting.rs b/srml/council/src/voting.rs index 37c1444a74..660de09248 100644 --- a/srml/council/src/voting.rs +++ b/srml/council/src/voting.rs @@ -16,113 +16,115 @@ //! Council voting system. -use rstd::prelude::*; +use super::{Module as Council, Trait as CouncilTrait}; +use primitives::traits::{As, Hash, Zero}; use rstd::borrow::Borrow; -use primitives::traits::{Hash, As, Zero}; +use rstd::prelude::*; use runtime_io::print; use srml_support::dispatch::Result; -use srml_support::{StorageValue, StorageMap, IsSubType, decl_module, decl_storage, decl_event, ensure}; -use {system, democracy}; -use super::{Trait as CouncilTrait, Module as Council}; +use srml_support::{ + decl_event, decl_module, decl_storage, ensure, IsSubType, StorageMap, StorageValue, +}; use system::ensure_signed; +use {democracy, system}; pub trait Trait: CouncilTrait { - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - fn deposit_event() = default; - - fn propose(origin, proposal: Box) { - let who = ensure_signed(origin)?; - - let expiry = >::block_number() + Self::voting_period(); - ensure!(Self::will_still_be_councillor_at(&who, expiry), "proposer would not be on council"); - - let proposal_hash = T::Hashing::hash_of(&proposal); - - ensure!(!>::exists(proposal_hash), "duplicate proposals not allowed"); - ensure!(!Self::is_vetoed(&proposal_hash), "proposal is vetoed"); - - let mut proposals = Self::proposals(); - proposals.push((expiry, proposal_hash)); - proposals.sort_by_key(|&(expiry, _)| expiry); - Self::set_proposals(&proposals); - - >::insert(proposal_hash, *proposal); - >::insert(proposal_hash, vec![who.clone()]); - >::insert((proposal_hash, who.clone()), true); - } - - fn vote(origin, proposal: T::Hash, approve: bool) { - let who = ensure_signed(origin)?; - - ensure!(Self::is_councillor(&who), "only councillors may vote on council proposals"); - - if Self::vote_of((proposal, who.clone())).is_none() { - >::mutate(proposal, |voters| voters.push(who.clone())); - } - >::insert((proposal, who), approve); - } - - fn veto(origin, proposal_hash: T::Hash) { - let who = ensure_signed(origin)?; - - ensure!(Self::is_councillor(&who), "only councillors may veto council proposals"); - ensure!(>::exists(&proposal_hash), "proposal must exist to be vetoed"); - - let mut existing_vetoers = Self::veto_of(&proposal_hash) - .map(|pair| pair.1) - .unwrap_or_else(Vec::new); - let insert_position = existing_vetoers.binary_search(&who) - .err().ok_or("a councillor may not veto a proposal twice")?; - existing_vetoers.insert(insert_position, who); - Self::set_veto_of( - &proposal_hash, - >::block_number() + Self::cooloff_period(), - existing_vetoers - ); - - Self::set_proposals( - &Self::proposals().into_iter().filter(|&(_, h)| h != proposal_hash - ).collect::>()); - >::remove(proposal_hash); - >::remove(proposal_hash); - for (c, _) in >::active_council() { - >::remove((proposal_hash, c)); - } - } - - fn set_cooloff_period(#[compact] blocks: T::BlockNumber) { - >::put(blocks); - } - - fn set_voting_period(#[compact] blocks: T::BlockNumber) { - >::put(blocks); - } - - fn on_finalize(n: T::BlockNumber) { - if let Err(e) = Self::end_block(n) { - print("Guru meditation"); - print(e); - } - } - } + pub struct Module for enum Call where origin: T::Origin { + fn deposit_event() = default; + + fn propose(origin, proposal: Box) { + let who = ensure_signed(origin)?; + + let expiry = >::block_number() + Self::voting_period(); + ensure!(Self::will_still_be_councillor_at(&who, expiry), "proposer would not be on council"); + + let proposal_hash = T::Hashing::hash_of(&proposal); + + ensure!(!>::exists(proposal_hash), "duplicate proposals not allowed"); + ensure!(!Self::is_vetoed(&proposal_hash), "proposal is vetoed"); + + let mut proposals = Self::proposals(); + proposals.push((expiry, proposal_hash)); + proposals.sort_by_key(|&(expiry, _)| expiry); + Self::set_proposals(&proposals); + + >::insert(proposal_hash, *proposal); + >::insert(proposal_hash, vec![who.clone()]); + >::insert((proposal_hash, who.clone()), true); + } + + fn vote(origin, proposal: T::Hash, approve: bool) { + let who = ensure_signed(origin)?; + + ensure!(Self::is_councillor(&who), "only councillors may vote on council proposals"); + + if Self::vote_of((proposal, who.clone())).is_none() { + >::mutate(proposal, |voters| voters.push(who.clone())); + } + >::insert((proposal, who), approve); + } + + fn veto(origin, proposal_hash: T::Hash) { + let who = ensure_signed(origin)?; + + ensure!(Self::is_councillor(&who), "only councillors may veto council proposals"); + ensure!(>::exists(&proposal_hash), "proposal must exist to be vetoed"); + + let mut existing_vetoers = Self::veto_of(&proposal_hash) + .map(|pair| pair.1) + .unwrap_or_else(Vec::new); + let insert_position = existing_vetoers.binary_search(&who) + .err().ok_or("a councillor may not veto a proposal twice")?; + existing_vetoers.insert(insert_position, who); + Self::set_veto_of( + &proposal_hash, + >::block_number() + Self::cooloff_period(), + existing_vetoers + ); + + Self::set_proposals( + &Self::proposals().into_iter().filter(|&(_, h)| h != proposal_hash + ).collect::>()); + >::remove(proposal_hash); + >::remove(proposal_hash); + for (c, _) in >::active_council() { + >::remove((proposal_hash, c)); + } + } + + fn set_cooloff_period(#[compact] blocks: T::BlockNumber) { + >::put(blocks); + } + + fn set_voting_period(#[compact] blocks: T::BlockNumber) { + >::put(blocks); + } + + fn on_finalize(n: T::BlockNumber) { + if let Err(e) = Self::end_block(n) { + print("Guru meditation"); + print(e); + } + } + } } decl_storage! { - trait Store for Module as CouncilVoting { - pub CooloffPeriod get(cooloff_period) config(): T::BlockNumber = T::BlockNumber::sa(1000); - pub VotingPeriod get(voting_period) config(): T::BlockNumber = T::BlockNumber::sa(3); - /// Number of blocks by which to delay enactment of successful, non-unanimous-council-instigated referendum proposals. - pub EnactDelayPeriod get(enact_delay_period) config(): T::BlockNumber = T::BlockNumber::sa(0); - pub Proposals get(proposals) build(|_| vec![]): Vec<(T::BlockNumber, T::Hash)>; // ordered by expiry. - pub ProposalOf get(proposal_of): map T::Hash => Option; - pub ProposalVoters get(proposal_voters): map T::Hash => Vec; - pub CouncilVoteOf get(vote_of): map (T::Hash, T::AccountId) => Option; - pub VetoedProposal get(veto_of): map T::Hash => Option<(T::BlockNumber, Vec)>; - } + trait Store for Module as CouncilVoting { + pub CooloffPeriod get(cooloff_period) config(): T::BlockNumber = T::BlockNumber::sa(1000); + pub VotingPeriod get(voting_period) config(): T::BlockNumber = T::BlockNumber::sa(3); + /// Number of blocks by which to delay enactment of successful, non-unanimous-council-instigated referendum proposals. + pub EnactDelayPeriod get(enact_delay_period) config(): T::BlockNumber = T::BlockNumber::sa(0); + pub Proposals get(proposals) build(|_| vec![]): Vec<(T::BlockNumber, T::Hash)>; // ordered by expiry. + pub ProposalOf get(proposal_of): map T::Hash => Option; + pub ProposalVoters get(proposal_voters): map T::Hash => Vec; + pub CouncilVoteOf get(vote_of): map (T::Hash, T::AccountId) => Option; + pub VetoedProposal get(veto_of): map T::Hash => Option<(T::BlockNumber, Vec)>; + } } decl_event!( @@ -137,358 +139,533 @@ decl_event!( ); impl Module { - pub fn is_vetoed>(proposal: B) -> bool { - Self::veto_of(proposal.borrow()) - .map(|(expiry, _): (T::BlockNumber, Vec)| >::block_number() < expiry) - .unwrap_or(false) - } - - pub fn will_still_be_councillor_at(who: &T::AccountId, n: T::BlockNumber) -> bool { - >::active_council().iter() - .find(|&&(ref a, _)| a == who) - .map(|&(_, expires)| expires > n) - .unwrap_or(false) - } - - pub fn is_councillor(who: &T::AccountId) -> bool { - >::active_council().iter() - .any(|&(ref a, _)| a == who) - } - - pub fn tally(proposal_hash: &T::Hash) -> (u32, u32, u32) { - Self::generic_tally(proposal_hash, |w: &T::AccountId, p: &T::Hash| Self::vote_of((*p, w.clone()))) - } - - // Private - fn set_veto_of(proposal: &T::Hash, expiry: T::BlockNumber, vetoers: Vec) { - >::insert(proposal, (expiry, vetoers)); - } - - fn kill_veto_of(proposal: &T::Hash) { - >::remove(proposal); - } - - fn take_tally(proposal_hash: &T::Hash) -> (u32, u32, u32) { - Self::generic_tally(proposal_hash, |w: &T::AccountId, p: &T::Hash| >::take((*p, w.clone()))) - } - - fn generic_tally Option>(proposal_hash: &T::Hash, vote_of: F) -> (u32, u32, u32) { - let c = >::active_council(); - let (approve, reject) = c.iter() - .filter_map(|&(ref a, _)| vote_of(a, proposal_hash)) - .map(|approve| if approve { (1, 0) } else { (0, 1) }) - .fold((0, 0), |(a, b), (c, d)| (a + c, b + d)); - (approve, reject, c.len() as u32 - approve - reject) - } - - fn set_proposals(p: &Vec<(T::BlockNumber, T::Hash)>) { - >::put(p); - } - - fn take_proposal_if_expiring_at(n: T::BlockNumber) -> Option<(T::Proposal, T::Hash)> { - let proposals = Self::proposals(); - match proposals.first() { - Some(&(expiry, hash)) if expiry == n => { - // yes this is horrible, but fixing it will need substantial work in storage. - Self::set_proposals(&proposals[1..].to_vec()); - >::take(hash).map(|p| (p, hash)) /* defensive only: all queued proposal hashes must have associated proposals*/ - } - _ => None, - } - } - - fn end_block(now: T::BlockNumber) -> Result { - while let Some((proposal, proposal_hash)) = Self::take_proposal_if_expiring_at(now) { - let tally = Self::take_tally(&proposal_hash); - if let Some(&democracy::Call::cancel_referendum(ref_index)) = IsSubType::>::is_aux_sub_type(&proposal) { - Self::deposit_event(RawEvent::TallyCancelation(proposal_hash, tally.0, tally.1, tally.2)); - if let (_, 0, 0) = tally { - >::internal_cancel_referendum(ref_index.into()); - } - } else { - Self::deposit_event(RawEvent::TallyReferendum(proposal_hash.clone(), tally.0, tally.1, tally.2)); - if tally.0 > tally.1 + tally.2 { - Self::kill_veto_of(&proposal_hash); - // If there were no nay-votes from the council, then it's weakly uncontroversial; we enact immediately. - let period = match tally.1 { - 0 => Zero::zero(), - _ => Self::enact_delay_period(), - }; - // If all council members voted yes, then it's strongly uncontroversial; we require a negative - // super-majority at referendum in order to defeat it. - let threshold = match tally { - (_, 0, 0) => democracy::VoteThreshold::SuperMajorityAgainst, - _ => democracy::VoteThreshold::SimpleMajority, - }; - >::internal_start_referendum(proposal, threshold, period).map(|_| ())?; - } - } - } - Ok(()) - } + pub fn is_vetoed>(proposal: B) -> bool { + Self::veto_of(proposal.borrow()) + .map(|(expiry, _): (T::BlockNumber, Vec)| { + >::block_number() < expiry + }) + .unwrap_or(false) + } + + pub fn will_still_be_councillor_at(who: &T::AccountId, n: T::BlockNumber) -> bool { + >::active_council() + .iter() + .find(|&&(ref a, _)| a == who) + .map(|&(_, expires)| expires > n) + .unwrap_or(false) + } + + pub fn is_councillor(who: &T::AccountId) -> bool { + >::active_council() + .iter() + .any(|&(ref a, _)| a == who) + } + + pub fn tally(proposal_hash: &T::Hash) -> (u32, u32, u32) { + Self::generic_tally(proposal_hash, |w: &T::AccountId, p: &T::Hash| { + Self::vote_of((*p, w.clone())) + }) + } + + // Private + fn set_veto_of(proposal: &T::Hash, expiry: T::BlockNumber, vetoers: Vec) { + >::insert(proposal, (expiry, vetoers)); + } + + fn kill_veto_of(proposal: &T::Hash) { + >::remove(proposal); + } + + fn take_tally(proposal_hash: &T::Hash) -> (u32, u32, u32) { + Self::generic_tally(proposal_hash, |w: &T::AccountId, p: &T::Hash| { + >::take((*p, w.clone())) + }) + } + + fn generic_tally Option>( + proposal_hash: &T::Hash, + vote_of: F, + ) -> (u32, u32, u32) { + let c = >::active_council(); + let (approve, reject) = c + .iter() + .filter_map(|&(ref a, _)| vote_of(a, proposal_hash)) + .map(|approve| if approve { (1, 0) } else { (0, 1) }) + .fold((0, 0), |(a, b), (c, d)| (a + c, b + d)); + (approve, reject, c.len() as u32 - approve - reject) + } + + fn set_proposals(p: &Vec<(T::BlockNumber, T::Hash)>) { + >::put(p); + } + + fn take_proposal_if_expiring_at(n: T::BlockNumber) -> Option<(T::Proposal, T::Hash)> { + let proposals = Self::proposals(); + match proposals.first() { + Some(&(expiry, hash)) if expiry == n => { + // yes this is horrible, but fixing it will need substantial work in storage. + Self::set_proposals(&proposals[1..].to_vec()); + >::take(hash).map(|p| (p, hash)) /* defensive only: all queued proposal hashes must have associated proposals*/ + } + _ => None, + } + } + + fn end_block(now: T::BlockNumber) -> Result { + while let Some((proposal, proposal_hash)) = Self::take_proposal_if_expiring_at(now) { + let tally = Self::take_tally(&proposal_hash); + if let Some(&democracy::Call::cancel_referendum(ref_index)) = + IsSubType::>::is_aux_sub_type(&proposal) + { + Self::deposit_event(RawEvent::TallyCancelation( + proposal_hash, + tally.0, + tally.1, + tally.2, + )); + if let (_, 0, 0) = tally { + >::internal_cancel_referendum(ref_index.into()); + } + } else { + Self::deposit_event(RawEvent::TallyReferendum( + proposal_hash.clone(), + tally.0, + tally.1, + tally.2, + )); + if tally.0 > tally.1 + tally.2 { + Self::kill_veto_of(&proposal_hash); + // If there were no nay-votes from the council, then it's weakly uncontroversial; we enact immediately. + let period = match tally.1 { + 0 => Zero::zero(), + _ => Self::enact_delay_period(), + }; + // If all council members voted yes, then it's strongly uncontroversial; we require a negative + // super-majority at referendum in order to defeat it. + let threshold = match tally { + (_, 0, 0) => democracy::VoteThreshold::SuperMajorityAgainst, + _ => democracy::VoteThreshold::SimpleMajority, + }; + >::internal_start_referendum(proposal, threshold, period) + .map(|_| ())?; + } + } + } + Ok(()) + } } #[cfg(test)] mod tests { - use super::*; - use crate::tests::*; - use crate::tests::{Call, Origin}; - use srml_support::{Hashable, assert_ok, assert_noop}; - use democracy::{ReferendumInfo, VoteThreshold}; - - #[test] - fn basic_environment_works() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - assert_eq!(Balances::free_balance(&42), 0); - assert_eq!(CouncilVoting::cooloff_period(), 2); - assert_eq!(CouncilVoting::voting_period(), 1); - assert_eq!(CouncilVoting::will_still_be_councillor_at(&1, 1), true); - assert_eq!(CouncilVoting::will_still_be_councillor_at(&1, 10), false); - assert_eq!(CouncilVoting::will_still_be_councillor_at(&4, 10), false); - assert_eq!(CouncilVoting::is_councillor(&1), true); - assert_eq!(CouncilVoting::is_councillor(&4), false); - assert_eq!(CouncilVoting::proposals(), Vec::<(u64, H256)>::new()); - assert_eq!(CouncilVoting::proposal_voters(H256::default()), Vec::::new()); - assert_eq!(CouncilVoting::is_vetoed(&H256::default()), false); - assert_eq!(CouncilVoting::vote_of((H256::default(), 1)), None); - assert_eq!(CouncilVoting::tally(&H256::default()), (0, 0, 3)); - }); - } - - fn set_balance_proposal(value: u64) -> Call { - Call::Balances(balances::Call::set_balance(42, value.into(), 0)) - } - - fn cancel_referendum_proposal(id: u32) -> Call { - Call::Democracy(democracy::Call::cancel_referendum(id.into())) - } - - #[test] - fn referendum_cancellation_should_work_when_unanimous() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - let proposal = set_balance_proposal(42); - assert_ok!(Democracy::internal_start_referendum(proposal.clone(), VoteThreshold::SuperMajorityApprove, 0), 0); - assert_eq!(Democracy::active_referendums(), vec![(0, ReferendumInfo::new(4, proposal, VoteThreshold::SuperMajorityApprove, 0))]); - - let cancellation = cancel_referendum_proposal(0); - let hash = cancellation.blake2_256().into(); - assert_ok!(CouncilVoting::propose(Origin::signed(1), Box::new(cancellation))); - assert_ok!(CouncilVoting::vote(Origin::signed(2), hash, true)); - assert_ok!(CouncilVoting::vote(Origin::signed(3), hash, true)); - assert_eq!(CouncilVoting::proposals(), vec![(2, hash)]); - assert_ok!(CouncilVoting::end_block(System::block_number())); - - System::set_block_number(2); - assert_ok!(CouncilVoting::end_block(System::block_number())); - assert_eq!(Democracy::active_referendums(), vec![]); - assert_eq!(Balances::free_balance(&42), 0); - }); - } - - #[test] - fn referendum_cancellation_should_fail_when_not_unanimous() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - let proposal = set_balance_proposal(42); - assert_ok!(Democracy::internal_start_referendum(proposal.clone(), VoteThreshold::SuperMajorityApprove, 0), 0); - - let cancellation = cancel_referendum_proposal(0); - let hash = cancellation.blake2_256().into(); - assert_ok!(CouncilVoting::propose(Origin::signed(1), Box::new(cancellation))); - assert_ok!(CouncilVoting::vote(Origin::signed(2), hash, true)); - assert_ok!(CouncilVoting::vote(Origin::signed(3), hash, false)); - assert_ok!(CouncilVoting::end_block(System::block_number())); - - System::set_block_number(2); - assert_ok!(CouncilVoting::end_block(System::block_number())); - assert_eq!(Democracy::active_referendums(), vec![(0, ReferendumInfo::new(4, proposal, VoteThreshold::SuperMajorityApprove, 0))]); - }); - } - - #[test] - fn referendum_cancellation_should_fail_when_abstentions() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - let proposal = set_balance_proposal(42); - assert_ok!(Democracy::internal_start_referendum(proposal.clone(), VoteThreshold::SuperMajorityApprove, 0), 0); - - let cancellation = cancel_referendum_proposal(0); - let hash = cancellation.blake2_256().into(); - assert_ok!(CouncilVoting::propose(Origin::signed(1), Box::new(cancellation))); - assert_ok!(CouncilVoting::vote(Origin::signed(2), hash, true)); - assert_ok!(CouncilVoting::end_block(System::block_number())); - - System::set_block_number(2); - assert_ok!(CouncilVoting::end_block(System::block_number())); - assert_eq!(Democracy::active_referendums(), vec![(0, ReferendumInfo::new(4, proposal, VoteThreshold::SuperMajorityApprove, 0))]); - }); - } - - #[test] - fn veto_should_work() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - let proposal = set_balance_proposal(42); - let hash = proposal.blake2_256().into(); - assert_ok!(CouncilVoting::propose(Origin::signed(1), Box::new(proposal.clone()))); - assert_ok!(CouncilVoting::veto(Origin::signed(2), hash)); - assert_eq!(CouncilVoting::proposals().len(), 0); - assert_eq!(Democracy::active_referendums().len(), 0); - }); - } - - #[test] - fn double_veto_should_not_work() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - let proposal = set_balance_proposal(42); - let hash = proposal.blake2_256().into(); - assert_ok!(CouncilVoting::propose(Origin::signed(1), Box::new(proposal.clone()))); - assert_ok!(CouncilVoting::veto(Origin::signed(2), hash)); - - System::set_block_number(3); - assert_ok!(CouncilVoting::propose(Origin::signed(1), Box::new(proposal.clone()))); - assert_noop!(CouncilVoting::veto(Origin::signed(2), hash), "a councillor may not veto a proposal twice"); - }); - } - - #[test] - fn retry_in_cooloff_should_not_work() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - let proposal = set_balance_proposal(42); - let hash = proposal.blake2_256().into(); - assert_ok!(CouncilVoting::propose(Origin::signed(1), Box::new(proposal.clone()))); - assert_ok!(CouncilVoting::veto(Origin::signed(2), hash)); - - System::set_block_number(2); - assert_noop!(CouncilVoting::propose(Origin::signed(1), Box::new(proposal.clone())), "proposal is vetoed"); - }); - } - - #[test] - fn retry_after_cooloff_should_work() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - let proposal = set_balance_proposal(42); - let hash = proposal.blake2_256().into(); - assert_ok!(CouncilVoting::propose(Origin::signed(1), Box::new(proposal.clone()))); - assert_ok!(CouncilVoting::veto(Origin::signed(2), hash)); - - System::set_block_number(3); - assert_ok!(CouncilVoting::propose(Origin::signed(1), Box::new(proposal.clone()))); - assert_ok!(CouncilVoting::vote(Origin::signed(2), hash, false)); - assert_ok!(CouncilVoting::vote(Origin::signed(3), hash, true)); - assert_ok!(CouncilVoting::end_block(System::block_number())); - - System::set_block_number(4); - assert_ok!(CouncilVoting::end_block(System::block_number())); - assert_eq!(CouncilVoting::proposals().len(), 0); - assert_eq!(Democracy::active_referendums(), vec![(0, ReferendumInfo::new(7, set_balance_proposal(42), VoteThreshold::SimpleMajority, 0))]); - }); - } - - #[test] - fn alternative_double_veto_should_work() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - let proposal = set_balance_proposal(42); - let hash = proposal.blake2_256().into(); - assert_ok!(CouncilVoting::propose(Origin::signed(1), Box::new(proposal.clone()))); - assert_ok!(CouncilVoting::veto(Origin::signed(2), hash)); - - System::set_block_number(3); - assert_ok!(CouncilVoting::propose(Origin::signed(1), Box::new(proposal.clone()))); - assert_ok!(CouncilVoting::veto(Origin::signed(3), hash)); - assert_eq!(CouncilVoting::proposals().len(), 0); - assert_eq!(Democracy::active_referendums().len(), 0); - }); - } - - #[test] - fn simple_propose_should_work() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - let proposal = set_balance_proposal(42); - let hash = proposal.blake2_256().into(); - assert_ok!(CouncilVoting::propose(Origin::signed(1), Box::new(proposal.clone()))); - assert_eq!(CouncilVoting::proposals().len(), 1); - assert_eq!(CouncilVoting::proposal_voters(&hash), vec![1]); - assert_eq!(CouncilVoting::vote_of((hash, 1)), Some(true)); - assert_eq!(CouncilVoting::tally(&hash), (1, 0, 2)); - }); - } - - #[test] - fn unvoted_proposal_should_expire_without_action() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - let proposal = set_balance_proposal(42); - assert_ok!(CouncilVoting::propose(Origin::signed(1), Box::new(proposal.clone()))); - assert_eq!(CouncilVoting::tally(&proposal.blake2_256().into()), (1, 0, 2)); - assert_ok!(CouncilVoting::end_block(System::block_number())); - - System::set_block_number(2); - assert_ok!(CouncilVoting::end_block(System::block_number())); - assert_eq!(CouncilVoting::proposals().len(), 0); - assert_eq!(Democracy::active_referendums().len(), 0); - }); - } - - #[test] - fn unanimous_proposal_should_expire_with_biased_referendum() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - let proposal = set_balance_proposal(42); - assert_ok!(CouncilVoting::propose(Origin::signed(1), Box::new(proposal.clone()))); - assert_ok!(CouncilVoting::vote(Origin::signed(2), proposal.blake2_256().into(), true)); - assert_ok!(CouncilVoting::vote(Origin::signed(3), proposal.blake2_256().into(), true)); - assert_eq!(CouncilVoting::tally(&proposal.blake2_256().into()), (3, 0, 0)); - assert_ok!(CouncilVoting::end_block(System::block_number())); - - System::set_block_number(2); - assert_ok!(CouncilVoting::end_block(System::block_number())); - assert_eq!(CouncilVoting::proposals().len(), 0); - assert_eq!(Democracy::active_referendums(), vec![(0, ReferendumInfo::new(5, proposal, VoteThreshold::SuperMajorityAgainst, 0))]); - }); - } - - #[test] - fn majority_proposal_should_expire_with_unbiased_referendum() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - let proposal = set_balance_proposal(42); - assert_ok!(CouncilVoting::propose(Origin::signed(1), Box::new(proposal.clone()))); - assert_ok!(CouncilVoting::vote(Origin::signed(2), proposal.blake2_256().into(), true)); - assert_ok!(CouncilVoting::vote(Origin::signed(3), proposal.blake2_256().into(), false)); - assert_eq!(CouncilVoting::tally(&proposal.blake2_256().into()), (2, 1, 0)); - assert_ok!(CouncilVoting::end_block(System::block_number())); - - System::set_block_number(2); - assert_ok!(CouncilVoting::end_block(System::block_number())); - assert_eq!(CouncilVoting::proposals().len(), 0); - assert_eq!(Democracy::active_referendums(), vec![(0, ReferendumInfo::new(5, proposal, VoteThreshold::SimpleMajority, 0))]); - }); - } - - #[test] - fn propose_by_public_should_not_work() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - let proposal = set_balance_proposal(42); - assert_noop!(CouncilVoting::propose(Origin::signed(4), Box::new(proposal)), "proposer would not be on council"); - }); - } - - #[test] - fn vote_by_public_should_not_work() { - with_externalities(&mut new_test_ext(true), || { - System::set_block_number(1); - let proposal = set_balance_proposal(42); - assert_ok!(CouncilVoting::propose(Origin::signed(1), Box::new(proposal.clone()))); - assert_noop!(CouncilVoting::vote(Origin::signed(4), proposal.blake2_256().into(), true), "only councillors may vote on council proposals"); - }); - } + use super::*; + use crate::tests::*; + use crate::tests::{Call, Origin}; + use democracy::{ReferendumInfo, VoteThreshold}; + use srml_support::{assert_noop, assert_ok, Hashable}; + + #[test] + fn basic_environment_works() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + assert_eq!(Balances::free_balance(&42), 0); + assert_eq!(CouncilVoting::cooloff_period(), 2); + assert_eq!(CouncilVoting::voting_period(), 1); + assert_eq!(CouncilVoting::will_still_be_councillor_at(&1, 1), true); + assert_eq!(CouncilVoting::will_still_be_councillor_at(&1, 10), false); + assert_eq!(CouncilVoting::will_still_be_councillor_at(&4, 10), false); + assert_eq!(CouncilVoting::is_councillor(&1), true); + assert_eq!(CouncilVoting::is_councillor(&4), false); + assert_eq!(CouncilVoting::proposals(), Vec::<(u64, H256)>::new()); + assert_eq!( + CouncilVoting::proposal_voters(H256::default()), + Vec::::new() + ); + assert_eq!(CouncilVoting::is_vetoed(&H256::default()), false); + assert_eq!(CouncilVoting::vote_of((H256::default(), 1)), None); + assert_eq!(CouncilVoting::tally(&H256::default()), (0, 0, 3)); + }); + } + + fn set_balance_proposal(value: u64) -> Call { + Call::Balances(balances::Call::set_balance(42, value.into(), 0)) + } + + fn cancel_referendum_proposal(id: u32) -> Call { + Call::Democracy(democracy::Call::cancel_referendum(id.into())) + } + + #[test] + fn referendum_cancellation_should_work_when_unanimous() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + let proposal = set_balance_proposal(42); + assert_ok!( + Democracy::internal_start_referendum( + proposal.clone(), + VoteThreshold::SuperMajorityApprove, + 0 + ), + 0 + ); + assert_eq!( + Democracy::active_referendums(), + vec![( + 0, + ReferendumInfo::new(4, proposal, VoteThreshold::SuperMajorityApprove, 0) + )] + ); + + let cancellation = cancel_referendum_proposal(0); + let hash = cancellation.blake2_256().into(); + assert_ok!(CouncilVoting::propose( + Origin::signed(1), + Box::new(cancellation) + )); + assert_ok!(CouncilVoting::vote(Origin::signed(2), hash, true)); + assert_ok!(CouncilVoting::vote(Origin::signed(3), hash, true)); + assert_eq!(CouncilVoting::proposals(), vec![(2, hash)]); + assert_ok!(CouncilVoting::end_block(System::block_number())); + + System::set_block_number(2); + assert_ok!(CouncilVoting::end_block(System::block_number())); + assert_eq!(Democracy::active_referendums(), vec![]); + assert_eq!(Balances::free_balance(&42), 0); + }); + } + + #[test] + fn referendum_cancellation_should_fail_when_not_unanimous() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + let proposal = set_balance_proposal(42); + assert_ok!( + Democracy::internal_start_referendum( + proposal.clone(), + VoteThreshold::SuperMajorityApprove, + 0 + ), + 0 + ); + + let cancellation = cancel_referendum_proposal(0); + let hash = cancellation.blake2_256().into(); + assert_ok!(CouncilVoting::propose( + Origin::signed(1), + Box::new(cancellation) + )); + assert_ok!(CouncilVoting::vote(Origin::signed(2), hash, true)); + assert_ok!(CouncilVoting::vote(Origin::signed(3), hash, false)); + assert_ok!(CouncilVoting::end_block(System::block_number())); + + System::set_block_number(2); + assert_ok!(CouncilVoting::end_block(System::block_number())); + assert_eq!( + Democracy::active_referendums(), + vec![( + 0, + ReferendumInfo::new(4, proposal, VoteThreshold::SuperMajorityApprove, 0) + )] + ); + }); + } + + #[test] + fn referendum_cancellation_should_fail_when_abstentions() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + let proposal = set_balance_proposal(42); + assert_ok!( + Democracy::internal_start_referendum( + proposal.clone(), + VoteThreshold::SuperMajorityApprove, + 0 + ), + 0 + ); + + let cancellation = cancel_referendum_proposal(0); + let hash = cancellation.blake2_256().into(); + assert_ok!(CouncilVoting::propose( + Origin::signed(1), + Box::new(cancellation) + )); + assert_ok!(CouncilVoting::vote(Origin::signed(2), hash, true)); + assert_ok!(CouncilVoting::end_block(System::block_number())); + + System::set_block_number(2); + assert_ok!(CouncilVoting::end_block(System::block_number())); + assert_eq!( + Democracy::active_referendums(), + vec![( + 0, + ReferendumInfo::new(4, proposal, VoteThreshold::SuperMajorityApprove, 0) + )] + ); + }); + } + + #[test] + fn veto_should_work() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + let proposal = set_balance_proposal(42); + let hash = proposal.blake2_256().into(); + assert_ok!(CouncilVoting::propose( + Origin::signed(1), + Box::new(proposal.clone()) + )); + assert_ok!(CouncilVoting::veto(Origin::signed(2), hash)); + assert_eq!(CouncilVoting::proposals().len(), 0); + assert_eq!(Democracy::active_referendums().len(), 0); + }); + } + + #[test] + fn double_veto_should_not_work() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + let proposal = set_balance_proposal(42); + let hash = proposal.blake2_256().into(); + assert_ok!(CouncilVoting::propose( + Origin::signed(1), + Box::new(proposal.clone()) + )); + assert_ok!(CouncilVoting::veto(Origin::signed(2), hash)); + + System::set_block_number(3); + assert_ok!(CouncilVoting::propose( + Origin::signed(1), + Box::new(proposal.clone()) + )); + assert_noop!( + CouncilVoting::veto(Origin::signed(2), hash), + "a councillor may not veto a proposal twice" + ); + }); + } + + #[test] + fn retry_in_cooloff_should_not_work() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + let proposal = set_balance_proposal(42); + let hash = proposal.blake2_256().into(); + assert_ok!(CouncilVoting::propose( + Origin::signed(1), + Box::new(proposal.clone()) + )); + assert_ok!(CouncilVoting::veto(Origin::signed(2), hash)); + + System::set_block_number(2); + assert_noop!( + CouncilVoting::propose(Origin::signed(1), Box::new(proposal.clone())), + "proposal is vetoed" + ); + }); + } + + #[test] + fn retry_after_cooloff_should_work() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + let proposal = set_balance_proposal(42); + let hash = proposal.blake2_256().into(); + assert_ok!(CouncilVoting::propose( + Origin::signed(1), + Box::new(proposal.clone()) + )); + assert_ok!(CouncilVoting::veto(Origin::signed(2), hash)); + + System::set_block_number(3); + assert_ok!(CouncilVoting::propose( + Origin::signed(1), + Box::new(proposal.clone()) + )); + assert_ok!(CouncilVoting::vote(Origin::signed(2), hash, false)); + assert_ok!(CouncilVoting::vote(Origin::signed(3), hash, true)); + assert_ok!(CouncilVoting::end_block(System::block_number())); + + System::set_block_number(4); + assert_ok!(CouncilVoting::end_block(System::block_number())); + assert_eq!(CouncilVoting::proposals().len(), 0); + assert_eq!( + Democracy::active_referendums(), + vec![( + 0, + ReferendumInfo::new( + 7, + set_balance_proposal(42), + VoteThreshold::SimpleMajority, + 0 + ) + )] + ); + }); + } + + #[test] + fn alternative_double_veto_should_work() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + let proposal = set_balance_proposal(42); + let hash = proposal.blake2_256().into(); + assert_ok!(CouncilVoting::propose( + Origin::signed(1), + Box::new(proposal.clone()) + )); + assert_ok!(CouncilVoting::veto(Origin::signed(2), hash)); + + System::set_block_number(3); + assert_ok!(CouncilVoting::propose( + Origin::signed(1), + Box::new(proposal.clone()) + )); + assert_ok!(CouncilVoting::veto(Origin::signed(3), hash)); + assert_eq!(CouncilVoting::proposals().len(), 0); + assert_eq!(Democracy::active_referendums().len(), 0); + }); + } + + #[test] + fn simple_propose_should_work() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + let proposal = set_balance_proposal(42); + let hash = proposal.blake2_256().into(); + assert_ok!(CouncilVoting::propose( + Origin::signed(1), + Box::new(proposal.clone()) + )); + assert_eq!(CouncilVoting::proposals().len(), 1); + assert_eq!(CouncilVoting::proposal_voters(&hash), vec![1]); + assert_eq!(CouncilVoting::vote_of((hash, 1)), Some(true)); + assert_eq!(CouncilVoting::tally(&hash), (1, 0, 2)); + }); + } + + #[test] + fn unvoted_proposal_should_expire_without_action() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + let proposal = set_balance_proposal(42); + assert_ok!(CouncilVoting::propose( + Origin::signed(1), + Box::new(proposal.clone()) + )); + assert_eq!( + CouncilVoting::tally(&proposal.blake2_256().into()), + (1, 0, 2) + ); + assert_ok!(CouncilVoting::end_block(System::block_number())); + + System::set_block_number(2); + assert_ok!(CouncilVoting::end_block(System::block_number())); + assert_eq!(CouncilVoting::proposals().len(), 0); + assert_eq!(Democracy::active_referendums().len(), 0); + }); + } + + #[test] + fn unanimous_proposal_should_expire_with_biased_referendum() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + let proposal = set_balance_proposal(42); + assert_ok!(CouncilVoting::propose( + Origin::signed(1), + Box::new(proposal.clone()) + )); + assert_ok!(CouncilVoting::vote( + Origin::signed(2), + proposal.blake2_256().into(), + true + )); + assert_ok!(CouncilVoting::vote( + Origin::signed(3), + proposal.blake2_256().into(), + true + )); + assert_eq!( + CouncilVoting::tally(&proposal.blake2_256().into()), + (3, 0, 0) + ); + assert_ok!(CouncilVoting::end_block(System::block_number())); + + System::set_block_number(2); + assert_ok!(CouncilVoting::end_block(System::block_number())); + assert_eq!(CouncilVoting::proposals().len(), 0); + assert_eq!( + Democracy::active_referendums(), + vec![( + 0, + ReferendumInfo::new(5, proposal, VoteThreshold::SuperMajorityAgainst, 0) + )] + ); + }); + } + + #[test] + fn majority_proposal_should_expire_with_unbiased_referendum() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + let proposal = set_balance_proposal(42); + assert_ok!(CouncilVoting::propose( + Origin::signed(1), + Box::new(proposal.clone()) + )); + assert_ok!(CouncilVoting::vote( + Origin::signed(2), + proposal.blake2_256().into(), + true + )); + assert_ok!(CouncilVoting::vote( + Origin::signed(3), + proposal.blake2_256().into(), + false + )); + assert_eq!( + CouncilVoting::tally(&proposal.blake2_256().into()), + (2, 1, 0) + ); + assert_ok!(CouncilVoting::end_block(System::block_number())); + + System::set_block_number(2); + assert_ok!(CouncilVoting::end_block(System::block_number())); + assert_eq!(CouncilVoting::proposals().len(), 0); + assert_eq!( + Democracy::active_referendums(), + vec![( + 0, + ReferendumInfo::new(5, proposal, VoteThreshold::SimpleMajority, 0) + )] + ); + }); + } + + #[test] + fn propose_by_public_should_not_work() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + let proposal = set_balance_proposal(42); + assert_noop!( + CouncilVoting::propose(Origin::signed(4), Box::new(proposal)), + "proposer would not be on council" + ); + }); + } + + #[test] + fn vote_by_public_should_not_work() { + with_externalities(&mut new_test_ext(true), || { + System::set_block_number(1); + let proposal = set_balance_proposal(42); + assert_ok!(CouncilVoting::propose( + Origin::signed(1), + Box::new(proposal.clone()) + )); + assert_noop!( + CouncilVoting::vote(Origin::signed(4), proposal.blake2_256().into(), true), + "only councillors may vote on council proposals" + ); + }); + } } diff --git a/srml/democracy/src/lib.rs b/srml/democracy/src/lib.rs index 4bd7ad60aa..6001a36334 100644 --- a/srml/democracy/src/lib.rs +++ b/srml/democracy/src/lib.rs @@ -18,15 +18,19 @@ #![cfg_attr(not(feature = "std"), no_std)] +use parity_codec::{Decode, Encode}; +use primitives::traits::{As, Bounded, Zero}; use rstd::prelude::*; use rstd::result; -use primitives::traits::{Zero, As, Bounded}; -use parity_codec::{Encode, Decode}; -use srml_support::{StorageValue, StorageMap, Parameter, Dispatchable, IsSubType, EnumerableStorageMap}; -use srml_support::{decl_module, decl_storage, decl_event, ensure}; -use srml_support::traits::{Currency, ReservableCurrency, LockableCurrency, WithdrawReason, LockIdentifier, - OnFreeBalanceZero}; use srml_support::dispatch::Result; +use srml_support::traits::{ + Currency, LockIdentifier, LockableCurrency, OnFreeBalanceZero, ReservableCurrency, + WithdrawReason, +}; +use srml_support::{decl_event, decl_module, decl_storage, ensure}; +use srml_support::{ + Dispatchable, EnumerableStorageMap, IsSubType, Parameter, StorageMap, StorageValue, +}; use system::ensure_signed; mod vote_threshold; @@ -49,225 +53,232 @@ const MAX_RECURSION_LIMIT: u32 = 16; pub struct Vote(i8); impl Vote { - /// Create a new instance. - pub fn new(aye: bool, multiplier: LockPeriods) -> Self { - let m = multiplier.max(1) - 1; - Vote(if aye { - -1 - m - } else { - m - }) - } - - /// Is this an aye vote? - pub fn is_aye(self) -> bool { - self.0 < 0 - } - - /// The strength (measured in lock periods). - pub fn multiplier(self) -> LockPeriods { - 1 + if self.0 < 0 { -(self.0 + 1) } else { self.0 } - } + /// Create a new instance. + pub fn new(aye: bool, multiplier: LockPeriods) -> Self { + let m = multiplier.max(1) - 1; + Vote(if aye { -1 - m } else { m }) + } + + /// Is this an aye vote? + pub fn is_aye(self) -> bool { + self.0 < 0 + } + + /// The strength (measured in lock periods). + pub fn multiplier(self) -> LockPeriods { + 1 + if self.0 < 0 { -(self.0 + 1) } else { self.0 } + } } type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; pub trait Trait: system::Trait + Sized { - type Currency: ReservableCurrency + LockableCurrency; + type Currency: ReservableCurrency + + LockableCurrency; - type Proposal: Parameter + Dispatchable + IsSubType>; + type Proposal: Parameter + Dispatchable + IsSubType>; - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - fn deposit_event() = default; - - /// Propose a sensitive action to be taken. - fn propose( - origin, - proposal: Box, - #[compact] value: BalanceOf - ) { - let who = ensure_signed(origin)?; - - ensure!(value >= Self::minimum_deposit(), "value too low"); - T::Currency::reserve(&who, value) - .map_err(|_| "proposer's balance too low")?; - - let index = Self::public_prop_count(); - >::put(index + 1); - >::insert(index, (value, vec![who.clone()])); - - let mut props = Self::public_props(); - props.push((index, (*proposal).clone(), who)); - >::put(props); - - Self::deposit_event(RawEvent::Proposed(index, value)); - } - - /// Propose a sensitive action to be taken. - fn second(origin, #[compact] proposal: PropIndex) { - let who = ensure_signed(origin)?; - let mut deposit = Self::deposit_of(proposal) - .ok_or("can only second an existing proposal")?; - T::Currency::reserve(&who, deposit.0) - .map_err(|_| "seconder's balance too low")?; - deposit.1.push(who); - >::insert(proposal, deposit); - } - - /// Vote in a referendum. If `vote.is_aye()`, the vote is to enact the proposal; - /// otherwise it is a vote to keep the status quo. - fn vote(origin, #[compact] ref_index: ReferendumIndex, vote: Vote) -> Result { - let who = ensure_signed(origin)?; - Self::do_vote(who, ref_index, vote) - } - - /// Vote in a referendum on behalf of a stash. If `vote.is_aye()`, the vote is to enact the proposal; - /// otherwise it is a vote to keep the status quo. - fn proxy_vote(origin, #[compact] ref_index: ReferendumIndex, vote: Vote) -> Result { - let who = Self::proxy(ensure_signed(origin)?).ok_or("not a proxy")?; - Self::do_vote(who, ref_index, vote) - } - - /// Start a referendum. - fn start_referendum(proposal: Box, threshold: VoteThreshold, delay: T::BlockNumber) -> Result { - Self::inject_referendum( - >::block_number() + Self::voting_period(), - *proposal, - threshold, - delay, - ).map(|_| ()) - } - - /// Remove a referendum. - fn cancel_referendum(#[compact] ref_index: ReferendumIndex) { - Self::clear_referendum(ref_index); - } - - /// Cancel a proposal queued for enactment. - pub fn cancel_queued(#[compact] when: T::BlockNumber, #[compact] which: u32) { - let which = which as usize; - >::mutate(when, |items| if items.len() > which { items[which] = None }); - } - - fn on_finalize(n: T::BlockNumber) { - if let Err(e) = Self::end_block(n) { - runtime_io::print(e); - } - } - - /// Specify a proxy. Called by the stash. - fn set_proxy(origin, proxy: T::AccountId) { - let who = ensure_signed(origin)?; - ensure!(!>::exists(&proxy), "already a proxy"); - >::insert(proxy, who) - } - - /// Clear the proxy. Called by the proxy. - fn resign_proxy(origin) { - let who = ensure_signed(origin)?; - >::remove(who); - } - - /// Clear the proxy. Called by the stash. - fn remove_proxy(origin, proxy: T::AccountId) { - let who = ensure_signed(origin)?; - ensure!(&Self::proxy(&proxy).ok_or("not a proxy")? == &who, "wrong proxy"); - >::remove(proxy); - } - - /// Delegate vote. - pub fn delegate(origin, to: T::AccountId, lock_periods: LockPeriods) { - let who = ensure_signed(origin)?; - >::insert(who.clone(), (to.clone(), lock_periods.clone())); - // Currency is locked indefinitely as long as it's delegated. - T::Currency::extend_lock(DEMOCRACY_ID, &who, Bounded::max_value(), T::BlockNumber::max_value(), WithdrawReason::Transfer.into()); - Self::deposit_event(RawEvent::Delegated(who, to)); - } - - /// Undelegate vote. - fn undelegate(origin) { - let who = ensure_signed(origin)?; - ensure!(>::exists(&who), "not delegated"); - let d = >::take(&who); - // Indefinite lock is reduced to the maximum voting lock that could be possible. - let lock_period = Self::public_delay(); - let now = >::block_number(); - let locked_until = now + lock_period * T::BlockNumber::sa(d.1 as u64); - T::Currency::set_lock(DEMOCRACY_ID, &who, Bounded::max_value(), locked_until, WithdrawReason::Transfer.into()); - Self::deposit_event(RawEvent::Undelegated(who)); - } - } + pub struct Module for enum Call where origin: T::Origin { + fn deposit_event() = default; + + /// Propose a sensitive action to be taken. + fn propose( + origin, + proposal: Box, + #[compact] value: BalanceOf + ) { + let who = ensure_signed(origin)?; + + ensure!(value >= Self::minimum_deposit(), "value too low"); + T::Currency::reserve(&who, value) + .map_err(|_| "proposer's balance too low")?; + + let index = Self::public_prop_count(); + >::put(index + 1); + >::insert(index, (value, vec![who.clone()])); + + let mut props = Self::public_props(); + props.push((index, (*proposal).clone(), who)); + >::put(props); + + Self::deposit_event(RawEvent::Proposed(index, value)); + } + + /// Propose a sensitive action to be taken. + fn second(origin, #[compact] proposal: PropIndex) { + let who = ensure_signed(origin)?; + let mut deposit = Self::deposit_of(proposal) + .ok_or("can only second an existing proposal")?; + T::Currency::reserve(&who, deposit.0) + .map_err(|_| "seconder's balance too low")?; + deposit.1.push(who); + >::insert(proposal, deposit); + } + + /// Vote in a referendum. If `vote.is_aye()`, the vote is to enact the proposal; + /// otherwise it is a vote to keep the status quo. + fn vote(origin, #[compact] ref_index: ReferendumIndex, vote: Vote) -> Result { + let who = ensure_signed(origin)?; + Self::do_vote(who, ref_index, vote) + } + + /// Vote in a referendum on behalf of a stash. If `vote.is_aye()`, the vote is to enact the proposal; + /// otherwise it is a vote to keep the status quo. + fn proxy_vote(origin, #[compact] ref_index: ReferendumIndex, vote: Vote) -> Result { + let who = Self::proxy(ensure_signed(origin)?).ok_or("not a proxy")?; + Self::do_vote(who, ref_index, vote) + } + + /// Start a referendum. + fn start_referendum(proposal: Box, threshold: VoteThreshold, delay: T::BlockNumber) -> Result { + Self::inject_referendum( + >::block_number() + Self::voting_period(), + *proposal, + threshold, + delay, + ).map(|_| ()) + } + + /// Remove a referendum. + fn cancel_referendum(#[compact] ref_index: ReferendumIndex) { + Self::clear_referendum(ref_index); + } + + /// Cancel a proposal queued for enactment. + pub fn cancel_queued(#[compact] when: T::BlockNumber, #[compact] which: u32) { + let which = which as usize; + >::mutate(when, |items| if items.len() > which { items[which] = None }); + } + + fn on_finalize(n: T::BlockNumber) { + if let Err(e) = Self::end_block(n) { + runtime_io::print(e); + } + } + + /// Specify a proxy. Called by the stash. + fn set_proxy(origin, proxy: T::AccountId) { + let who = ensure_signed(origin)?; + ensure!(!>::exists(&proxy), "already a proxy"); + >::insert(proxy, who) + } + + /// Clear the proxy. Called by the proxy. + fn resign_proxy(origin) { + let who = ensure_signed(origin)?; + >::remove(who); + } + + /// Clear the proxy. Called by the stash. + fn remove_proxy(origin, proxy: T::AccountId) { + let who = ensure_signed(origin)?; + ensure!(&Self::proxy(&proxy).ok_or("not a proxy")? == &who, "wrong proxy"); + >::remove(proxy); + } + + /// Delegate vote. + pub fn delegate(origin, to: T::AccountId, lock_periods: LockPeriods) { + let who = ensure_signed(origin)?; + >::insert(who.clone(), (to.clone(), lock_periods.clone())); + // Currency is locked indefinitely as long as it's delegated. + T::Currency::extend_lock(DEMOCRACY_ID, &who, Bounded::max_value(), T::BlockNumber::max_value(), WithdrawReason::Transfer.into()); + Self::deposit_event(RawEvent::Delegated(who, to)); + } + + /// Undelegate vote. + fn undelegate(origin) { + let who = ensure_signed(origin)?; + ensure!(>::exists(&who), "not delegated"); + let d = >::take(&who); + // Indefinite lock is reduced to the maximum voting lock that could be possible. + let lock_period = Self::public_delay(); + let now = >::block_number(); + let locked_until = now + lock_period * T::BlockNumber::sa(d.1 as u64); + T::Currency::set_lock(DEMOCRACY_ID, &who, Bounded::max_value(), locked_until, WithdrawReason::Transfer.into()); + Self::deposit_event(RawEvent::Undelegated(who)); + } + } } /// Info regarding an ongoing referendum. #[derive(Encode, Decode, Clone, PartialEq, Eq)] #[cfg_attr(feature = "std", derive(Debug))] pub struct ReferendumInfo { - /// When voting on this referendum will end. - end: BlockNumber, - /// The proposal being voted on. - proposal: Proposal, - /// The thresholding mechanism to determine whether it passed. - threshold: VoteThreshold, - /// The delay (in blocks) to wait after a successful referendum before deploying. - delay: BlockNumber, + /// When voting on this referendum will end. + end: BlockNumber, + /// The proposal being voted on. + proposal: Proposal, + /// The thresholding mechanism to determine whether it passed. + threshold: VoteThreshold, + /// The delay (in blocks) to wait after a successful referendum before deploying. + delay: BlockNumber, } impl ReferendumInfo { - /// Create a new instance. - pub fn new(end: BlockNumber, proposal: Proposal, threshold: VoteThreshold, delay: BlockNumber) -> Self { - ReferendumInfo { end, proposal, threshold, delay } - } + /// Create a new instance. + pub fn new( + end: BlockNumber, + proposal: Proposal, + threshold: VoteThreshold, + delay: BlockNumber, + ) -> Self { + ReferendumInfo { + end, + proposal, + threshold, + delay, + } + } } decl_storage! { - trait Store for Module as Democracy { - - /// The number of (public) proposals that have been made so far. - pub PublicPropCount get(public_prop_count) build(|_| 0 as PropIndex) : PropIndex; - /// The public proposals. Unsorted. - pub PublicProps get(public_props): Vec<(PropIndex, T::Proposal, T::AccountId)>; - /// Those who have locked a deposit. - pub DepositOf get(deposit_of): map PropIndex => Option<(BalanceOf, Vec)>; - /// How often (in blocks) new public referenda are launched. - pub LaunchPeriod get(launch_period) config(): T::BlockNumber = T::BlockNumber::sa(1000); - /// The minimum amount to be used as a deposit for a public referendum proposal. - pub MinimumDeposit get(minimum_deposit) config(): BalanceOf; - /// The delay before enactment for all public referenda. - pub PublicDelay get(public_delay) config(): T::BlockNumber; - /// The maximum number of additional lock periods a voter may offer to strengthen their vote. Multiples of `PublicDelay`. - pub MaxLockPeriods get(max_lock_periods) config(): LockPeriods; - - /// How often (in blocks) to check for new votes. - pub VotingPeriod get(voting_period) config(): T::BlockNumber = T::BlockNumber::sa(1000); - - /// The next free referendum index, aka the number of referendums started so far. - pub ReferendumCount get(referendum_count) build(|_| 0 as ReferendumIndex): ReferendumIndex; - /// The next referendum index that should be tallied. - pub NextTally get(next_tally) build(|_| 0 as ReferendumIndex): ReferendumIndex; - /// Information concerning any given referendum. - pub ReferendumInfoOf get(referendum_info): map ReferendumIndex => Option<(ReferendumInfo)>; - /// Queue of successful referenda to be dispatched. - pub DispatchQueue get(dispatch_queue): map T::BlockNumber => Vec>; - - /// Get the voters for the current proposal. - pub VotersFor get(voters_for): map ReferendumIndex => Vec; - - /// Get the vote in a given referendum of a particular voter. The result is meaningful only if `voters_for` includes the - /// voter when called with the referendum (you'll get the default `Vote` value otherwise). If you don't want to check - /// `voters_for`, then you can also check for simple existence with `VoteOf::exists` first. - pub VoteOf get(vote_of): map (ReferendumIndex, T::AccountId) => Vote; - - /// Who is able to vote for whom. Value is the fund-holding account, key is the vote-transaction-sending account. - pub Proxy get(proxy): map T::AccountId => Option; - - /// Get the account (and lock periods) to which another account is delegating vote. - pub Delegations get(delegations): linked_map T::AccountId => (T::AccountId, LockPeriods); - } + trait Store for Module as Democracy { + + /// The number of (public) proposals that have been made so far. + pub PublicPropCount get(public_prop_count) build(|_| 0 as PropIndex) : PropIndex; + /// The public proposals. Unsorted. + pub PublicProps get(public_props): Vec<(PropIndex, T::Proposal, T::AccountId)>; + /// Those who have locked a deposit. + pub DepositOf get(deposit_of): map PropIndex => Option<(BalanceOf, Vec)>; + /// How often (in blocks) new public referenda are launched. + pub LaunchPeriod get(launch_period) config(): T::BlockNumber = T::BlockNumber::sa(1000); + /// The minimum amount to be used as a deposit for a public referendum proposal. + pub MinimumDeposit get(minimum_deposit) config(): BalanceOf; + /// The delay before enactment for all public referenda. + pub PublicDelay get(public_delay) config(): T::BlockNumber; + /// The maximum number of additional lock periods a voter may offer to strengthen their vote. Multiples of `PublicDelay`. + pub MaxLockPeriods get(max_lock_periods) config(): LockPeriods; + + /// How often (in blocks) to check for new votes. + pub VotingPeriod get(voting_period) config(): T::BlockNumber = T::BlockNumber::sa(1000); + + /// The next free referendum index, aka the number of referendums started so far. + pub ReferendumCount get(referendum_count) build(|_| 0 as ReferendumIndex): ReferendumIndex; + /// The next referendum index that should be tallied. + pub NextTally get(next_tally) build(|_| 0 as ReferendumIndex): ReferendumIndex; + /// Information concerning any given referendum. + pub ReferendumInfoOf get(referendum_info): map ReferendumIndex => Option<(ReferendumInfo)>; + /// Queue of successful referenda to be dispatched. + pub DispatchQueue get(dispatch_queue): map T::BlockNumber => Vec>; + + /// Get the voters for the current proposal. + pub VotersFor get(voters_for): map ReferendumIndex => Vec; + + /// Get the vote in a given referendum of a particular voter. The result is meaningful only if `voters_for` includes the + /// voter when called with the referendum (you'll get the default `Vote` value otherwise). If you don't want to check + /// `voters_for`, then you can also check for simple existence with `VoteOf::exists` first. + pub VoteOf get(vote_of): map (ReferendumIndex, T::AccountId) => Vote; + + /// Who is able to vote for whom. Value is the fund-holding account, key is the vote-transaction-sending account. + pub Proxy get(proxy): map T::AccountId => Option; + + /// Get the account (and lock periods) to which another account is delegating vote. + pub Delegations get(delegations): linked_map T::AccountId => (T::AccountId, LockPeriods); + } } decl_event!( @@ -285,155 +296,235 @@ decl_event!( ); impl Module { - // exposed immutables. - - /// Get the amount locked in support of `proposal`; `None` if proposal isn't a valid proposal - /// index. - pub fn locked_for(proposal: PropIndex) -> Option> { - Self::deposit_of(proposal).map(|(d, l)| d * BalanceOf::::sa(l.len() as u64)) - } - - /// Return true if `ref_index` is an on-going referendum. - pub fn is_active_referendum(ref_index: ReferendumIndex) -> bool { - >::exists(ref_index) - } - - /// Get all referendums currently active. - pub fn active_referendums() -> Vec<(ReferendumIndex, ReferendumInfo)> { - let next = Self::next_tally(); - let last = Self::referendum_count(); - (next..last).into_iter() - .filter_map(|i| Self::referendum_info(i).map(|info| (i, info))) - .collect() - } - - /// Get all referendums ready for tally at block `n`. - pub fn maturing_referendums_at(n: T::BlockNumber) -> Vec<(ReferendumIndex, ReferendumInfo)> { - let next = Self::next_tally(); - let last = Self::referendum_count(); - (next..last).into_iter() - .filter_map(|i| Self::referendum_info(i).map(|info| (i, info))) - .take_while(|&(_, ref info)| info.end == n) - .collect() - } - - /// Get the voters for the current proposal. - pub fn tally(ref_index: ReferendumIndex) -> (BalanceOf, BalanceOf, BalanceOf) { - let (approve, against, capital): (BalanceOf, BalanceOf, BalanceOf) = Self::voters_for(ref_index).iter() - .map(|voter| ( - T::Currency::total_balance(voter), Self::vote_of((ref_index, voter.clone())) - )) - .map(|(bal, vote)| - if vote.is_aye() { - (bal * BalanceOf::::sa(vote.multiplier() as u64), Zero::zero(), bal) - } else { - (Zero::zero(), bal * BalanceOf::::sa(vote.multiplier() as u64), bal) - } - ).fold((Zero::zero(), Zero::zero(), Zero::zero()), |(a, b, c), (d, e, f)| (a + d, b + e, c + f)); - let (del_approve, del_against, del_capital) = Self::tally_delegation(ref_index); - (approve + del_approve, against + del_against, capital + del_capital) - } - - /// Get the delegated voters for the current proposal. - /// I think this goes into a worker once https://github.com/paritytech/substrate/issues/1458 is done. - fn tally_delegation(ref_index: ReferendumIndex) -> (BalanceOf, BalanceOf, BalanceOf) { - Self::voters_for(ref_index).iter() - .fold((Zero::zero(), Zero::zero(), Zero::zero()), |(approve_acc, against_acc, capital_acc), voter| { - let vote = Self::vote_of((ref_index, voter.clone())); - let (votes, balance) = Self::delegated_votes(ref_index, voter.clone(), vote.multiplier(), MAX_RECURSION_LIMIT); - if vote.is_aye() { - (approve_acc + votes, against_acc, capital_acc + balance) - } else { - (approve_acc, against_acc + votes, capital_acc + balance) - } - }) - } - - fn delegated_votes( - ref_index: ReferendumIndex, - to: T::AccountId, - min_lock_periods: LockPeriods, - recursion_limit: u32, - ) -> (BalanceOf, BalanceOf) { - if recursion_limit == 0 { return (Zero::zero(), Zero::zero()); } - >::enumerate() - .filter(|(delegator, (delegate, _))| *delegate == to && !>::exists(&(ref_index, delegator.clone()))) - .fold((Zero::zero(), Zero::zero()), |(votes_acc, balance_acc), (delegator, (_delegate, periods))| { - let lock_periods = if min_lock_periods <= periods { min_lock_periods } else { periods }; - let balance = T::Currency::total_balance(&delegator); - let votes = T::Currency::total_balance(&delegator) * BalanceOf::::sa(lock_periods as u64); - let (del_votes, del_balance) = Self::delegated_votes(ref_index, delegator, lock_periods, recursion_limit - 1); - (votes_acc + votes + del_votes, balance_acc + balance + del_balance) - }) - } - - // Exposed mutables. - - #[cfg(feature = "std")] - pub fn force_proxy(stash: T::AccountId, proxy: T::AccountId) { - >::insert(proxy, stash) - } - - /// Start a referendum. Can be called directly by the council. - pub fn internal_start_referendum(proposal: T::Proposal, threshold: VoteThreshold, delay: T::BlockNumber) -> result::Result { - >::inject_referendum(>::block_number() + >::voting_period(), proposal, threshold, delay) - } - - /// Remove a referendum. Can be called directly by the council. - pub fn internal_cancel_referendum(ref_index: ReferendumIndex) { - Self::deposit_event(RawEvent::Cancelled(ref_index)); - >::clear_referendum(ref_index); - } - - // private. - - /// Actually enact a vote, if legit. - fn do_vote(who: T::AccountId, ref_index: ReferendumIndex, vote: Vote) -> Result { - ensure!(vote.multiplier() <= Self::max_lock_periods(), "vote has too great a strength"); - ensure!(Self::is_active_referendum(ref_index), "vote given for invalid referendum."); - if !>::exists(&(ref_index, who.clone())) { - >::mutate(ref_index, |voters| voters.push(who.clone())); - } - >::insert(&(ref_index, who), vote); - Ok(()) - } - - /// Start a referendum - fn inject_referendum( - end: T::BlockNumber, - proposal: T::Proposal, - threshold: VoteThreshold, - delay: T::BlockNumber, - ) -> result::Result { - let ref_index = Self::referendum_count(); - if ref_index > 0 && Self::referendum_info(ref_index - 1).map(|i| i.end > end).unwrap_or(false) { - Err("Cannot inject a referendum that ends earlier than preceeding referendum")? - } - - >::put(ref_index + 1); - >::insert(ref_index, ReferendumInfo { end, proposal, threshold, delay }); - Self::deposit_event(RawEvent::Started(ref_index, threshold)); - Ok(ref_index) - } - - /// Remove all info on a referendum. - fn clear_referendum(ref_index: ReferendumIndex) { - >::remove(ref_index); - >::remove(ref_index); - for v in Self::voters_for(ref_index) { - >::remove((ref_index, v)); - } - } - - /// Enact a proposal from a referendum. - fn enact_proposal(proposal: T::Proposal, index: ReferendumIndex) { - let ok = proposal.dispatch(system::RawOrigin::Root.into()).is_ok(); - Self::deposit_event(RawEvent::Executed(index, ok)); - } - - fn launch_next(now: T::BlockNumber) -> Result { - let mut public_props = Self::public_props(); - if let Some((winner_index, _)) = public_props.iter() + // exposed immutables. + + /// Get the amount locked in support of `proposal`; `None` if proposal isn't a valid proposal + /// index. + pub fn locked_for(proposal: PropIndex) -> Option> { + Self::deposit_of(proposal).map(|(d, l)| d * BalanceOf::::sa(l.len() as u64)) + } + + /// Return true if `ref_index` is an on-going referendum. + pub fn is_active_referendum(ref_index: ReferendumIndex) -> bool { + >::exists(ref_index) + } + + /// Get all referendums currently active. + pub fn active_referendums( + ) -> Vec<(ReferendumIndex, ReferendumInfo)> { + let next = Self::next_tally(); + let last = Self::referendum_count(); + (next..last) + .into_iter() + .filter_map(|i| Self::referendum_info(i).map(|info| (i, info))) + .collect() + } + + /// Get all referendums ready for tally at block `n`. + pub fn maturing_referendums_at( + n: T::BlockNumber, + ) -> Vec<(ReferendumIndex, ReferendumInfo)> { + let next = Self::next_tally(); + let last = Self::referendum_count(); + (next..last) + .into_iter() + .filter_map(|i| Self::referendum_info(i).map(|info| (i, info))) + .take_while(|&(_, ref info)| info.end == n) + .collect() + } + + /// Get the voters for the current proposal. + pub fn tally(ref_index: ReferendumIndex) -> (BalanceOf, BalanceOf, BalanceOf) { + let (approve, against, capital): (BalanceOf, BalanceOf, BalanceOf) = + Self::voters_for(ref_index) + .iter() + .map(|voter| { + ( + T::Currency::total_balance(voter), + Self::vote_of((ref_index, voter.clone())), + ) + }) + .map(|(bal, vote)| { + if vote.is_aye() { + ( + bal * BalanceOf::::sa(vote.multiplier() as u64), + Zero::zero(), + bal, + ) + } else { + ( + Zero::zero(), + bal * BalanceOf::::sa(vote.multiplier() as u64), + bal, + ) + } + }) + .fold( + (Zero::zero(), Zero::zero(), Zero::zero()), + |(a, b, c), (d, e, f)| (a + d, b + e, c + f), + ); + let (del_approve, del_against, del_capital) = Self::tally_delegation(ref_index); + ( + approve + del_approve, + against + del_against, + capital + del_capital, + ) + } + + /// Get the delegated voters for the current proposal. + /// I think this goes into a worker once https://github.com/paritytech/substrate/issues/1458 is done. + fn tally_delegation(ref_index: ReferendumIndex) -> (BalanceOf, BalanceOf, BalanceOf) { + Self::voters_for(ref_index).iter().fold( + (Zero::zero(), Zero::zero(), Zero::zero()), + |(approve_acc, against_acc, capital_acc), voter| { + let vote = Self::vote_of((ref_index, voter.clone())); + let (votes, balance) = Self::delegated_votes( + ref_index, + voter.clone(), + vote.multiplier(), + MAX_RECURSION_LIMIT, + ); + if vote.is_aye() { + (approve_acc + votes, against_acc, capital_acc + balance) + } else { + (approve_acc, against_acc + votes, capital_acc + balance) + } + }, + ) + } + + fn delegated_votes( + ref_index: ReferendumIndex, + to: T::AccountId, + min_lock_periods: LockPeriods, + recursion_limit: u32, + ) -> (BalanceOf, BalanceOf) { + if recursion_limit == 0 { + return (Zero::zero(), Zero::zero()); + } + >::enumerate() + .filter(|(delegator, (delegate, _))| { + *delegate == to && !>::exists(&(ref_index, delegator.clone())) + }) + .fold( + (Zero::zero(), Zero::zero()), + |(votes_acc, balance_acc), (delegator, (_delegate, periods))| { + let lock_periods = if min_lock_periods <= periods { + min_lock_periods + } else { + periods + }; + let balance = T::Currency::total_balance(&delegator); + let votes = T::Currency::total_balance(&delegator) + * BalanceOf::::sa(lock_periods as u64); + let (del_votes, del_balance) = Self::delegated_votes( + ref_index, + delegator, + lock_periods, + recursion_limit - 1, + ); + ( + votes_acc + votes + del_votes, + balance_acc + balance + del_balance, + ) + }, + ) + } + + // Exposed mutables. + + #[cfg(feature = "std")] + pub fn force_proxy(stash: T::AccountId, proxy: T::AccountId) { + >::insert(proxy, stash) + } + + /// Start a referendum. Can be called directly by the council. + pub fn internal_start_referendum( + proposal: T::Proposal, + threshold: VoteThreshold, + delay: T::BlockNumber, + ) -> result::Result { + >::inject_referendum( + >::block_number() + >::voting_period(), + proposal, + threshold, + delay, + ) + } + + /// Remove a referendum. Can be called directly by the council. + pub fn internal_cancel_referendum(ref_index: ReferendumIndex) { + Self::deposit_event(RawEvent::Cancelled(ref_index)); + >::clear_referendum(ref_index); + } + + // private. + + /// Actually enact a vote, if legit. + fn do_vote(who: T::AccountId, ref_index: ReferendumIndex, vote: Vote) -> Result { + ensure!( + vote.multiplier() <= Self::max_lock_periods(), + "vote has too great a strength" + ); + ensure!( + Self::is_active_referendum(ref_index), + "vote given for invalid referendum." + ); + if !>::exists(&(ref_index, who.clone())) { + >::mutate(ref_index, |voters| voters.push(who.clone())); + } + >::insert(&(ref_index, who), vote); + Ok(()) + } + + /// Start a referendum + fn inject_referendum( + end: T::BlockNumber, + proposal: T::Proposal, + threshold: VoteThreshold, + delay: T::BlockNumber, + ) -> result::Result { + let ref_index = Self::referendum_count(); + if ref_index > 0 + && Self::referendum_info(ref_index - 1) + .map(|i| i.end > end) + .unwrap_or(false) + { + Err("Cannot inject a referendum that ends earlier than preceeding referendum")? + } + + >::put(ref_index + 1); + >::insert( + ref_index, + ReferendumInfo { + end, + proposal, + threshold, + delay, + }, + ); + Self::deposit_event(RawEvent::Started(ref_index, threshold)); + Ok(ref_index) + } + + /// Remove all info on a referendum. + fn clear_referendum(ref_index: ReferendumIndex) { + >::remove(ref_index); + >::remove(ref_index); + for v in Self::voters_for(ref_index) { + >::remove((ref_index, v)); + } + } + + /// Enact a proposal from a referendum. + fn enact_proposal(proposal: T::Proposal, index: ReferendumIndex) { + let ok = proposal.dispatch(system::RawOrigin::Root.into()).is_ok(); + Self::deposit_event(RawEvent::Executed(index, ok)); + } + + fn launch_next(now: T::BlockNumber) -> Result { + let mut public_props = Self::public_props(); + if let Some((winner_index, _)) = public_props.iter() .enumerate() .max_by_key(|x| Self::locked_for((x.1).0).unwrap_or_else(Zero::zero)/*defensive only: All current public proposals have an amount locked*/) { @@ -450,702 +541,844 @@ impl Module { } } - Ok(()) - } - - fn bake_referendum(now: T::BlockNumber, index: ReferendumIndex, info: ReferendumInfo) -> Result { - let (approve, against, capital) = Self::tally(index); - let total_issuance = T::Currency::total_issuance(); - let approved = info.threshold.approved(approve, against, capital, total_issuance); - let lock_period = Self::public_delay(); - - // Logic defined in https://www.slideshare.net/gavofyork/governance-in-polkadot-poc3 - // Essentially, we extend the lock-period of the coins behind the winning votes to be the - // vote strength times the public delay period from now. - for (a, vote) in Self::voters_for(index).into_iter() - .map(|a| (a.clone(), Self::vote_of((index, a)))) - // ^^^ defensive only: all items come from `voters`; for an item to be in `voters` there must be a vote registered; qed - .filter(|&(_, vote)| vote.is_aye() == approved) // Just the winning coins - { - // now plus: the base lock period multiplied by the number of periods this voter offered to - // lock should they win... - let locked_until = now + lock_period * T::BlockNumber::sa((vote.multiplier()) as u64); - // ...extend their bondage until at least then. - T::Currency::extend_lock(DEMOCRACY_ID, &a, Bounded::max_value(), locked_until, WithdrawReason::Transfer.into()); - } - - Self::clear_referendum(index); - if approved { - Self::deposit_event(RawEvent::Passed(index)); - if info.delay.is_zero() { - Self::enact_proposal(info.proposal, index); - } else { - >::mutate(now + info.delay, |q| q.push(Some((info.proposal, index)))); - } - } else { - Self::deposit_event(RawEvent::NotPassed(index)); - } - >::put(index + 1); - - Ok(()) - } - - /// Current era is ending; we should finish up any proposals. - fn end_block(now: T::BlockNumber) -> Result { - // pick out another public referendum if it's time. - if (now % Self::launch_period()).is_zero() { - Self::launch_next(now.clone())?; - } - - // tally up votes for any expiring referenda. - for (index, info) in Self::maturing_referendums_at(now).into_iter() { - Self::bake_referendum(now.clone(), index, info)?; - } - - for (proposal, index) in >::take(now).into_iter().filter_map(|x| x) { - Self::enact_proposal(proposal, index); - } - Ok(()) - } + Ok(()) + } + + fn bake_referendum( + now: T::BlockNumber, + index: ReferendumIndex, + info: ReferendumInfo, + ) -> Result { + let (approve, against, capital) = Self::tally(index); + let total_issuance = T::Currency::total_issuance(); + let approved = info + .threshold + .approved(approve, against, capital, total_issuance); + let lock_period = Self::public_delay(); + + // Logic defined in https://www.slideshare.net/gavofyork/governance-in-polkadot-poc3 + // Essentially, we extend the lock-period of the coins behind the winning votes to be the + // vote strength times the public delay period from now. + for (a, vote) in Self::voters_for(index) + .into_iter() + .map(|a| (a.clone(), Self::vote_of((index, a)))) + // ^^^ defensive only: all items come from `voters`; for an item to be in `voters` there must be a vote registered; qed + .filter(|&(_, vote)| vote.is_aye() == approved) + // Just the winning coins + { + // now plus: the base lock period multiplied by the number of periods this voter offered to + // lock should they win... + let locked_until = now + lock_period * T::BlockNumber::sa((vote.multiplier()) as u64); + // ...extend their bondage until at least then. + T::Currency::extend_lock( + DEMOCRACY_ID, + &a, + Bounded::max_value(), + locked_until, + WithdrawReason::Transfer.into(), + ); + } + + Self::clear_referendum(index); + if approved { + Self::deposit_event(RawEvent::Passed(index)); + if info.delay.is_zero() { + Self::enact_proposal(info.proposal, index); + } else { + >::mutate(now + info.delay, |q| { + q.push(Some((info.proposal, index))) + }); + } + } else { + Self::deposit_event(RawEvent::NotPassed(index)); + } + >::put(index + 1); + + Ok(()) + } + + /// Current era is ending; we should finish up any proposals. + fn end_block(now: T::BlockNumber) -> Result { + // pick out another public referendum if it's time. + if (now % Self::launch_period()).is_zero() { + Self::launch_next(now.clone())?; + } + + // tally up votes for any expiring referenda. + for (index, info) in Self::maturing_referendums_at(now).into_iter() { + Self::bake_referendum(now.clone(), index, info)?; + } + + for (proposal, index) in >::take(now).into_iter().filter_map(|x| x) { + Self::enact_proposal(proposal, index); + } + Ok(()) + } } impl OnFreeBalanceZero for Module { - fn on_free_balance_zero(who: &T::AccountId) { - >::remove(who) - } + fn on_free_balance_zero(who: &T::AccountId) { + >::remove(who) + } } #[cfg(test)] mod tests { - use super::*; - use runtime_io::with_externalities; - use srml_support::{impl_outer_origin, impl_outer_dispatch, assert_noop, assert_ok}; - use substrate_primitives::{H256, Blake2Hasher}; - use primitives::BuildStorage; - use primitives::traits::{BlakeTwo256, IdentityLookup}; - use primitives::testing::{Digest, DigestItem, Header}; - use balances::BalanceLock; - - const AYE: Vote = Vote(-1); - const NAY: Vote = Vote(0); - - impl_outer_origin! { - pub enum Origin for Test {} - } + use super::*; + use balances::BalanceLock; + use primitives::testing::{Digest, DigestItem, Header}; + use primitives::traits::{BlakeTwo256, IdentityLookup}; + use primitives::BuildStorage; + use runtime_io::with_externalities; + use srml_support::{assert_noop, assert_ok, impl_outer_dispatch, impl_outer_origin}; + use substrate_primitives::{Blake2Hasher, H256}; + + const AYE: Vote = Vote(-1); + const NAY: Vote = Vote(0); + + impl_outer_origin! { + pub enum Origin for Test {} + } + + impl_outer_dispatch! { + pub enum Call for Test where origin: Origin { + balances::Balances, + democracy::Democracy, + } + } + + // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. + #[derive(Clone, Eq, PartialEq, Debug)] + pub struct Test; + impl system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type Digest = Digest; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type Log = DigestItem; + } + impl balances::Trait for Test { + type Balance = u64; + type OnFreeBalanceZero = (); + type OnNewAccount = (); + type Event = (); + type TransactionPayment = (); + type TransferPayment = (); + type DustRemoval = (); + } + impl Trait for Test { + type Currency = balances::Module; + type Proposal = Call; + type Event = (); + } + + fn new_test_ext() -> runtime_io::TestExternalities { + new_test_ext_with_public_delay(0) + } + + fn new_test_ext_with_public_delay( + public_delay: u64, + ) -> runtime_io::TestExternalities { + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0; + t.extend( + balances::GenesisConfig:: { + transaction_base_fee: 0, + transaction_byte_fee: 0, + balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], + existential_deposit: 0, + transfer_fee: 0, + creation_fee: 0, + vesting: vec![], + } + .build_storage() + .unwrap() + .0, + ); + t.extend( + GenesisConfig:: { + launch_period: 1, + voting_period: 1, + minimum_deposit: 1, + public_delay, + max_lock_periods: 6, + } + .build_storage() + .unwrap() + .0, + ); + runtime_io::TestExternalities::new(t) + } + + type System = system::Module; + type Balances = balances::Module; + type Democracy = Module; + + #[test] + fn params_should_work() { + with_externalities(&mut new_test_ext(), || { + assert_eq!(Democracy::launch_period(), 1); + assert_eq!(Democracy::voting_period(), 1); + assert_eq!(Democracy::minimum_deposit(), 1); + assert_eq!(Democracy::referendum_count(), 0); + assert_eq!(Balances::free_balance(&42), 0); + assert_eq!(Balances::total_issuance(), 210); + assert_eq!(Democracy::public_delay(), 0); + assert_eq!(Democracy::max_lock_periods(), 6); + }); + } + + #[test] + fn vote_should_work() { + assert_eq!(Vote::new(true, 0).multiplier(), 1); + assert_eq!(Vote::new(true, 1).multiplier(), 1); + assert_eq!(Vote::new(true, 2).multiplier(), 2); + assert_eq!(Vote::new(true, 0).is_aye(), true); + assert_eq!(Vote::new(true, 1).is_aye(), true); + assert_eq!(Vote::new(true, 2).is_aye(), true); + assert_eq!(Vote::new(false, 0).multiplier(), 1); + assert_eq!(Vote::new(false, 1).multiplier(), 1); + assert_eq!(Vote::new(false, 2).multiplier(), 2); + assert_eq!(Vote::new(false, 0).is_aye(), false); + assert_eq!(Vote::new(false, 1).is_aye(), false); + assert_eq!(Vote::new(false, 2).is_aye(), false); + } + + #[test] + fn invalid_vote_strength_should_not_work() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); + let r = Democracy::inject_referendum( + 1, + set_balance_proposal(2), + VoteThreshold::SuperMajorityApprove, + 0, + ) + .unwrap(); + assert_noop!( + Democracy::vote(Origin::signed(1), r, Vote::new(true, 7)), + "vote has too great a strength" + ); + assert_noop!( + Democracy::vote(Origin::signed(1), r, Vote::new(false, 7)), + "vote has too great a strength" + ); + }); + } + + fn set_balance_proposal(value: u64) -> Call { + Call::Balances(balances::Call::set_balance(42, value.into(), 0)) + } + + fn propose_set_balance(who: u64, value: u64, locked: u64) -> super::Result { + Democracy::propose( + Origin::signed(who), + Box::new(set_balance_proposal(value)), + locked.into(), + ) + } + + #[test] + fn locked_for_should_work() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); + assert_ok!(propose_set_balance(1, 2, 2)); + assert_ok!(propose_set_balance(1, 4, 4)); + assert_ok!(propose_set_balance(1, 3, 3)); + assert_eq!(Democracy::locked_for(0), Some(2)); + assert_eq!(Democracy::locked_for(1), Some(4)); + assert_eq!(Democracy::locked_for(2), Some(3)); + }); + } + + #[test] + fn single_proposal_should_work() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); + assert_ok!(propose_set_balance(1, 2, 1)); + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + + System::set_block_number(2); + let r = 0; + assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); + + assert_eq!(Democracy::referendum_count(), 1); + assert_eq!(Democracy::voters_for(r), vec![1]); + assert_eq!(Democracy::vote_of((r, 1)), AYE); + assert_eq!(Democracy::tally(r), (10, 0, 10)); + + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + assert_eq!(Balances::free_balance(&42), 2); + }); + } + + #[test] + fn proxy_should_work() { + with_externalities(&mut new_test_ext(), || { + assert_eq!(Democracy::proxy(10), None); + assert_ok!(Democracy::set_proxy(Origin::signed(1), 10)); + assert_eq!(Democracy::proxy(10), Some(1)); + + // Can't set when already set. + assert_noop!( + Democracy::set_proxy(Origin::signed(2), 10), + "already a proxy" + ); + + // But this works because 11 isn't proxying. + assert_ok!(Democracy::set_proxy(Origin::signed(2), 11)); + assert_eq!(Democracy::proxy(10), Some(1)); + assert_eq!(Democracy::proxy(11), Some(2)); + + // 2 cannot fire 1's proxy: + assert_noop!( + Democracy::remove_proxy(Origin::signed(2), 10), + "wrong proxy" + ); + + // 1 fires his proxy: + assert_ok!(Democracy::remove_proxy(Origin::signed(1), 10)); + assert_eq!(Democracy::proxy(10), None); + assert_eq!(Democracy::proxy(11), Some(2)); + + // 11 resigns: + assert_ok!(Democracy::resign_proxy(Origin::signed(11))); + assert_eq!(Democracy::proxy(10), None); + assert_eq!(Democracy::proxy(11), None); + }); + } + + #[test] + fn single_proposal_should_work_with_proxy() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); + assert_ok!(propose_set_balance(1, 2, 1)); + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + + System::set_block_number(2); + let r = 0; + assert_ok!(Democracy::set_proxy(Origin::signed(1), 10)); + assert_ok!(Democracy::proxy_vote(Origin::signed(10), r, AYE)); + + assert_eq!(Democracy::referendum_count(), 1); + assert_eq!(Democracy::voters_for(r), vec![1]); + assert_eq!(Democracy::vote_of((r, 1)), AYE); + assert_eq!(Democracy::tally(r), (10, 0, 10)); + + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + assert_eq!(Balances::free_balance(&42), 2); + }); + } + + #[test] + fn single_proposal_should_work_with_delegation() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); + + assert_ok!(propose_set_balance(1, 2, 1)); + + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + System::set_block_number(2); + let r = 0; + + // Delegate vote. + assert_ok!(Democracy::delegate(Origin::signed(2), 1, 100)); + + assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); + + assert_eq!(Democracy::referendum_count(), 1); + assert_eq!(Democracy::voters_for(r), vec![1]); + assert_eq!(Democracy::vote_of((r, 1)), AYE); - impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - balances::Balances, - democracy::Democracy, - } - } + // Delegated vote is counted. + assert_eq!(Democracy::tally(r), (30, 0, 30)); + + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + + assert_eq!(Balances::free_balance(&42), 2); + }); + } - // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. - #[derive(Clone, Eq, PartialEq, Debug)] - pub struct Test; - impl system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type Digest = Digest; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type Log = DigestItem; - } - impl balances::Trait for Test { - type Balance = u64; - type OnFreeBalanceZero = (); - type OnNewAccount = (); - type Event = (); - type TransactionPayment = (); - type TransferPayment = (); - type DustRemoval = (); - } - impl Trait for Test { - type Currency = balances::Module; - type Proposal = Call; - type Event = (); - } + #[test] + fn single_proposal_should_work_with_cyclic_delegation() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); - fn new_test_ext() -> runtime_io::TestExternalities { - new_test_ext_with_public_delay(0) - } + assert_ok!(propose_set_balance(1, 2, 1)); - fn new_test_ext_with_public_delay(public_delay: u64) -> runtime_io::TestExternalities { - let mut t = system::GenesisConfig::::default().build_storage().unwrap().0; - t.extend(balances::GenesisConfig::{ - transaction_base_fee: 0, - transaction_byte_fee: 0, - balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], - existential_deposit: 0, - transfer_fee: 0, - creation_fee: 0, - vesting: vec![], - }.build_storage().unwrap().0); - t.extend(GenesisConfig::{ - launch_period: 1, - voting_period: 1, - minimum_deposit: 1, - public_delay, - max_lock_periods: 6, - }.build_storage().unwrap().0); - runtime_io::TestExternalities::new(t) - } + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + System::set_block_number(2); + let r = 0; - type System = system::Module; - type Balances = balances::Module; - type Democracy = Module; - - #[test] - fn params_should_work() { - with_externalities(&mut new_test_ext(), || { - assert_eq!(Democracy::launch_period(), 1); - assert_eq!(Democracy::voting_period(), 1); - assert_eq!(Democracy::minimum_deposit(), 1); - assert_eq!(Democracy::referendum_count(), 0); - assert_eq!(Balances::free_balance(&42), 0); - assert_eq!(Balances::total_issuance(), 210); - assert_eq!(Democracy::public_delay(), 0); - assert_eq!(Democracy::max_lock_periods(), 6); - }); - } + // Check behavior with cycle. + assert_ok!(Democracy::delegate(Origin::signed(2), 1, 100)); + assert_ok!(Democracy::delegate(Origin::signed(3), 2, 100)); + assert_ok!(Democracy::delegate(Origin::signed(1), 3, 100)); - #[test] - fn vote_should_work() { - assert_eq!(Vote::new(true, 0).multiplier(), 1); - assert_eq!(Vote::new(true, 1).multiplier(), 1); - assert_eq!(Vote::new(true, 2).multiplier(), 2); - assert_eq!(Vote::new(true, 0).is_aye(), true); - assert_eq!(Vote::new(true, 1).is_aye(), true); - assert_eq!(Vote::new(true, 2).is_aye(), true); - assert_eq!(Vote::new(false, 0).multiplier(), 1); - assert_eq!(Vote::new(false, 1).multiplier(), 1); - assert_eq!(Vote::new(false, 2).multiplier(), 2); - assert_eq!(Vote::new(false, 0).is_aye(), false); - assert_eq!(Vote::new(false, 1).is_aye(), false); - assert_eq!(Vote::new(false, 2).is_aye(), false); - } + assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - #[test] - fn invalid_vote_strength_should_not_work() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - let r = Democracy::inject_referendum(1, set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0).unwrap(); - assert_noop!(Democracy::vote(Origin::signed(1), r, Vote::new(true, 7)), "vote has too great a strength"); - assert_noop!(Democracy::vote(Origin::signed(1), r, Vote::new(false, 7)), "vote has too great a strength"); - }); - } + assert_eq!(Democracy::referendum_count(), 1); + assert_eq!(Democracy::voters_for(r), vec![1]); - fn set_balance_proposal(value: u64) -> Call { - Call::Balances(balances::Call::set_balance(42, value.into(), 0)) - } + // Delegated vote is counted. + assert_eq!(Democracy::tally(r), (60, 0, 60)); + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - fn propose_set_balance(who: u64, value: u64, locked: u64) -> super::Result { - Democracy::propose(Origin::signed(who), Box::new(set_balance_proposal(value)), locked.into()) - } + assert_eq!(Balances::free_balance(&42), 2); + }); + } - #[test] - fn locked_for_should_work() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - assert_ok!(propose_set_balance(1, 2, 2)); - assert_ok!(propose_set_balance(1, 4, 4)); - assert_ok!(propose_set_balance(1, 3, 3)); - assert_eq!(Democracy::locked_for(0), Some(2)); - assert_eq!(Democracy::locked_for(1), Some(4)); - assert_eq!(Democracy::locked_for(2), Some(3)); - }); - } + #[test] + /// If transactor already voted, delegated vote is overwriten. + fn single_proposal_should_work_with_vote_and_delegation() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); - #[test] - fn single_proposal_should_work() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - assert_ok!(propose_set_balance(1, 2, 1)); - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - - System::set_block_number(2); - let r = 0; - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - - assert_eq!(Democracy::referendum_count(), 1); - assert_eq!(Democracy::voters_for(r), vec![1]); - assert_eq!(Democracy::vote_of((r, 1)), AYE); - assert_eq!(Democracy::tally(r), (10, 0, 10)); - - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - assert_eq!(Balances::free_balance(&42), 2); - }); - } + assert_ok!(propose_set_balance(1, 2, 1)); - #[test] - fn proxy_should_work() { - with_externalities(&mut new_test_ext(), || { - assert_eq!(Democracy::proxy(10), None); - assert_ok!(Democracy::set_proxy(Origin::signed(1), 10)); - assert_eq!(Democracy::proxy(10), Some(1)); - - // Can't set when already set. - assert_noop!(Democracy::set_proxy(Origin::signed(2), 10), "already a proxy"); - - // But this works because 11 isn't proxying. - assert_ok!(Democracy::set_proxy(Origin::signed(2), 11)); - assert_eq!(Democracy::proxy(10), Some(1)); - assert_eq!(Democracy::proxy(11), Some(2)); - - // 2 cannot fire 1's proxy: - assert_noop!(Democracy::remove_proxy(Origin::signed(2), 10), "wrong proxy"); - - // 1 fires his proxy: - assert_ok!(Democracy::remove_proxy(Origin::signed(1), 10)); - assert_eq!(Democracy::proxy(10), None); - assert_eq!(Democracy::proxy(11), Some(2)); - - // 11 resigns: - assert_ok!(Democracy::resign_proxy(Origin::signed(11))); - assert_eq!(Democracy::proxy(10), None); - assert_eq!(Democracy::proxy(11), None); - }); - } - - #[test] - fn single_proposal_should_work_with_proxy() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - assert_ok!(propose_set_balance(1, 2, 1)); - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - - System::set_block_number(2); - let r = 0; - assert_ok!(Democracy::set_proxy(Origin::signed(1), 10)); - assert_ok!(Democracy::proxy_vote(Origin::signed(10), r, AYE)); - - assert_eq!(Democracy::referendum_count(), 1); - assert_eq!(Democracy::voters_for(r), vec![1]); - assert_eq!(Democracy::vote_of((r, 1)), AYE); - assert_eq!(Democracy::tally(r), (10, 0, 10)); - - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - assert_eq!(Balances::free_balance(&42), 2); - }); - } - - #[test] - fn single_proposal_should_work_with_delegation() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - - assert_ok!(propose_set_balance(1, 2, 1)); - - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - System::set_block_number(2); - let r = 0; - - // Delegate vote. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, 100)); - - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - - assert_eq!(Democracy::referendum_count(), 1); - assert_eq!(Democracy::voters_for(r), vec![1]); - assert_eq!(Democracy::vote_of((r, 1)), AYE); - - // Delegated vote is counted. - assert_eq!(Democracy::tally(r), (30, 0, 30)); - - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - - assert_eq!(Balances::free_balance(&42), 2); - }); - } - - #[test] - fn single_proposal_should_work_with_cyclic_delegation() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - - assert_ok!(propose_set_balance(1, 2, 1)); - - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - System::set_block_number(2); - let r = 0; - - // Check behavior with cycle. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, 100)); - assert_ok!(Democracy::delegate(Origin::signed(3), 2, 100)); - assert_ok!(Democracy::delegate(Origin::signed(1), 3, 100)); - - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - - assert_eq!(Democracy::referendum_count(), 1); - assert_eq!(Democracy::voters_for(r), vec![1]); - - // Delegated vote is counted. - assert_eq!(Democracy::tally(r), (60, 0, 60)); - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - - assert_eq!(Balances::free_balance(&42), 2); - }); - } - - #[test] - /// If transactor already voted, delegated vote is overwriten. - fn single_proposal_should_work_with_vote_and_delegation() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - - assert_ok!(propose_set_balance(1, 2, 1)); - - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - System::set_block_number(2); - let r = 0; - - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - - // Vote. - assert_ok!(Democracy::vote(Origin::signed(2), r, AYE)); - - // Delegate vote. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, 100)); - - assert_eq!(Democracy::referendum_count(), 1); - assert_eq!(Democracy::voters_for(r), vec![1, 2]); - assert_eq!(Democracy::vote_of((r, 1)), AYE); - - // Delegated vote is not counted. - assert_eq!(Democracy::tally(r), (30, 0, 30)); - - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - - assert_eq!(Balances::free_balance(&42), 2); - }); - } - - #[test] - fn single_proposal_should_work_with_undelegation() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - - assert_ok!(propose_set_balance(1, 2, 1)); - - // Delegate and undelegate vote. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, 100)); - assert_ok!(Democracy::undelegate(Origin::signed(2))); - - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - System::set_block_number(2); - let r = 0; - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - - assert_eq!(Democracy::referendum_count(), 1); - assert_eq!(Democracy::voters_for(r), vec![1]); - assert_eq!(Democracy::vote_of((r, 1)), AYE); - - // Delegated vote is not counted. - assert_eq!(Democracy::tally(r), (10, 0, 10)); - - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - - assert_eq!(Balances::free_balance(&42), 2); - }); - } - - #[test] - /// If transactor voted, delegated vote is overwriten. - fn single_proposal_should_work_with_delegation_and_vote() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - - assert_ok!(propose_set_balance(1, 2, 1)); - - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - System::set_block_number(2); - let r = 0; - - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - - // Delegate vote. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, 100)); - - // Vote. - assert_ok!(Democracy::vote(Origin::signed(2), r, AYE)); - - assert_eq!(Democracy::referendum_count(), 1); - assert_eq!(Democracy::voters_for(r), vec![1, 2]); - assert_eq!(Democracy::vote_of((r, 1)), AYE); - - // Delegated vote is not counted. - assert_eq!(Democracy::tally(r), (30, 0, 30)); - - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + System::set_block_number(2); + let r = 0; - assert_eq!(Balances::free_balance(&42), 2); - }); - } - - #[test] - fn deposit_for_proposals_should_be_taken() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - assert_ok!(propose_set_balance(1, 2, 5)); - assert_ok!(Democracy::second(Origin::signed(2), 0)); - assert_ok!(Democracy::second(Origin::signed(5), 0)); - assert_ok!(Democracy::second(Origin::signed(5), 0)); - assert_ok!(Democracy::second(Origin::signed(5), 0)); - assert_eq!(Balances::free_balance(&1), 5); - assert_eq!(Balances::free_balance(&2), 15); - assert_eq!(Balances::free_balance(&5), 35); - }); - } - - #[test] - fn deposit_for_proposals_should_be_returned() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - assert_ok!(propose_set_balance(1, 2, 5)); - assert_ok!(Democracy::second(Origin::signed(2), 0)); - assert_ok!(Democracy::second(Origin::signed(5), 0)); - assert_ok!(Democracy::second(Origin::signed(5), 0)); - assert_ok!(Democracy::second(Origin::signed(5), 0)); - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - assert_eq!(Balances::free_balance(&1), 10); - assert_eq!(Balances::free_balance(&2), 20); - assert_eq!(Balances::free_balance(&5), 50); - }); - } - - #[test] - fn proposal_with_deposit_below_minimum_should_not_work() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - assert_noop!(propose_set_balance(1, 2, 0), "value too low"); - }); - } - - #[test] - fn poor_proposer_should_not_work() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - assert_noop!(propose_set_balance(1, 2, 11), "proposer\'s balance too low"); - }); - } - - #[test] - fn poor_seconder_should_not_work() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - assert_ok!(propose_set_balance(2, 2, 11)); - assert_noop!(Democracy::second(Origin::signed(1), 0), "seconder\'s balance too low"); - }); - } - - #[test] - fn runners_up_should_come_after() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(0); - assert_ok!(propose_set_balance(1, 2, 2)); - assert_ok!(propose_set_balance(1, 4, 4)); - assert_ok!(propose_set_balance(1, 3, 3)); - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - - System::set_block_number(1); - assert_ok!(Democracy::vote(Origin::signed(1), 0, AYE)); - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - assert_eq!(Balances::free_balance(&42), 4); - - System::set_block_number(2); - assert_ok!(Democracy::vote(Origin::signed(1), 1, AYE)); - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - assert_eq!(Balances::free_balance(&42), 3); - - System::set_block_number(3); - assert_ok!(Democracy::vote(Origin::signed(1), 2, AYE)); - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - }); - } + assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - #[test] - fn simple_passing_should_work() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - let r = Democracy::inject_referendum(1, set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0).unwrap(); - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); + // Vote. + assert_ok!(Democracy::vote(Origin::signed(2), r, AYE)); - assert_eq!(Democracy::voters_for(r), vec![1]); - assert_eq!(Democracy::vote_of((r, 1)), AYE); - assert_eq!(Democracy::tally(r), (10, 0, 10)); + // Delegate vote. + assert_ok!(Democracy::delegate(Origin::signed(2), 1, 100)); - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + assert_eq!(Democracy::referendum_count(), 1); + assert_eq!(Democracy::voters_for(r), vec![1, 2]); + assert_eq!(Democracy::vote_of((r, 1)), AYE); - assert_eq!(Balances::free_balance(&42), 2); - }); - } - - #[test] - fn cancel_referendum_should_work() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - let r = Democracy::inject_referendum(1, set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0).unwrap(); - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - assert_ok!(Democracy::cancel_referendum(r.into())); - - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - - assert_eq!(Balances::free_balance(&42), 0); - }); - } - - #[test] - fn simple_failing_should_work() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - let r = Democracy::inject_referendum(1, set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0).unwrap(); - assert_ok!(Democracy::vote(Origin::signed(1), r, NAY)); - - assert_eq!(Democracy::voters_for(r), vec![1]); - assert_eq!(Democracy::vote_of((r, 1)), NAY); - assert_eq!(Democracy::tally(r), (0, 10, 10)); + // Delegated vote is not counted. + assert_eq!(Democracy::tally(r), (30, 0, 30)); - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - assert_eq!(Balances::free_balance(&42), 0); - }); - } - - #[test] - fn controversial_voting_should_work() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - let r = Democracy::inject_referendum(1, set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0).unwrap(); - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - assert_ok!(Democracy::vote(Origin::signed(2), r, NAY)); - assert_ok!(Democracy::vote(Origin::signed(3), r, NAY)); - assert_ok!(Democracy::vote(Origin::signed(4), r, AYE)); - assert_ok!(Democracy::vote(Origin::signed(5), r, NAY)); - assert_ok!(Democracy::vote(Origin::signed(6), r, AYE)); - - assert_eq!(Democracy::tally(r), (110, 100, 210)); - - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - - assert_eq!(Balances::free_balance(&42), 2); - }); - } - - #[test] - fn delayed_enactment_should_work() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - let r = Democracy::inject_referendum(1, set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 1).unwrap(); - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - assert_ok!(Democracy::vote(Origin::signed(2), r, AYE)); - assert_ok!(Democracy::vote(Origin::signed(3), r, AYE)); - assert_ok!(Democracy::vote(Origin::signed(4), r, AYE)); - assert_ok!(Democracy::vote(Origin::signed(5), r, AYE)); - assert_ok!(Democracy::vote(Origin::signed(6), r, AYE)); - - assert_eq!(Democracy::tally(r), (210, 0, 210)); - - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - assert_eq!(Balances::free_balance(&42), 0); - - System::set_block_number(2); - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - - assert_eq!(Balances::free_balance(&42), 2); - }); - } - - #[test] - fn lock_voting_should_work() { - with_externalities(&mut new_test_ext_with_public_delay(1), || { - System::set_block_number(1); - let r = Democracy::inject_referendum(1, set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0).unwrap(); - assert_ok!(Democracy::vote(Origin::signed(1), r, Vote::new(false, 6))); - assert_ok!(Democracy::vote(Origin::signed(2), r, Vote::new(true, 5))); - assert_ok!(Democracy::vote(Origin::signed(3), r, Vote::new(true, 4))); - assert_ok!(Democracy::vote(Origin::signed(4), r, Vote::new(true, 3))); - assert_ok!(Democracy::vote(Origin::signed(5), r, Vote::new(true, 2))); - assert_ok!(Democracy::vote(Origin::signed(6), r, Vote::new(false, 1))); - - assert_eq!(Democracy::tally(r), (440, 120, 210)); - - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - - assert_eq!(Balances::locks(1), vec![]); - assert_eq!(Balances::locks(2), vec![BalanceLock { id: DEMOCRACY_ID, amount: u64::max_value(), until: 6, reasons: WithdrawReason::Transfer.into() }]); - assert_eq!(Balances::locks(3), vec![BalanceLock { id: DEMOCRACY_ID, amount: u64::max_value(), until: 5, reasons: WithdrawReason::Transfer.into() }]); - assert_eq!(Balances::locks(4), vec![BalanceLock { id: DEMOCRACY_ID, amount: u64::max_value(), until: 4, reasons: WithdrawReason::Transfer.into() }]); - assert_eq!(Balances::locks(5), vec![BalanceLock { id: DEMOCRACY_ID, amount: u64::max_value(), until: 3, reasons: WithdrawReason::Transfer.into() }]); - assert_eq!(Balances::locks(6), vec![]); - - System::set_block_number(2); - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - - assert_eq!(Balances::free_balance(&42), 2); - }); - } - - #[test] - fn lock_voting_should_work_with_delegation() { - with_externalities(&mut new_test_ext_with_public_delay(1), || { - System::set_block_number(1); - let r = Democracy::inject_referendum(1, set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0).unwrap(); - assert_ok!(Democracy::vote(Origin::signed(1), r, Vote::new(false, 6))); - assert_ok!(Democracy::vote(Origin::signed(2), r, Vote::new(true, 5))); - assert_ok!(Democracy::vote(Origin::signed(3), r, Vote::new(true, 4))); - assert_ok!(Democracy::vote(Origin::signed(4), r, Vote::new(true, 3))); - assert_ok!(Democracy::delegate(Origin::signed(5), 2, 2)); - assert_ok!(Democracy::vote(Origin::signed(6), r, Vote::new(false, 1))); - - assert_eq!(Democracy::tally(r), (440, 120, 210)); - - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - - System::set_block_number(2); - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - - assert_eq!(Balances::free_balance(&42), 2); - }); - } - - #[test] - fn controversial_low_turnout_voting_should_work() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - let r = Democracy::inject_referendum(1, set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0).unwrap(); - assert_ok!(Democracy::vote(Origin::signed(5), r, NAY)); - assert_ok!(Democracy::vote(Origin::signed(6), r, AYE)); - - assert_eq!(Democracy::tally(r), (60, 50, 110)); - - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - - assert_eq!(Balances::free_balance(&42), 0); - }); - } - - #[test] - fn passing_low_turnout_voting_should_work() { - with_externalities(&mut new_test_ext(), || { - assert_eq!(Balances::free_balance(&42), 0); - assert_eq!(Balances::total_issuance(), 210); - - System::set_block_number(1); - let r = Democracy::inject_referendum(1, set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0).unwrap(); - assert_ok!(Democracy::vote(Origin::signed(4), r, AYE)); - assert_ok!(Democracy::vote(Origin::signed(5), r, NAY)); - assert_ok!(Democracy::vote(Origin::signed(6), r, AYE)); - - assert_eq!(Democracy::tally(r), (100, 50, 150)); - - assert_eq!(Democracy::end_block(System::block_number()), Ok(())); - - assert_eq!(Balances::free_balance(&42), 2); - }); - } + assert_eq!(Balances::free_balance(&42), 2); + }); + } + + #[test] + fn single_proposal_should_work_with_undelegation() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); + + assert_ok!(propose_set_balance(1, 2, 1)); + + // Delegate and undelegate vote. + assert_ok!(Democracy::delegate(Origin::signed(2), 1, 100)); + assert_ok!(Democracy::undelegate(Origin::signed(2))); + + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + System::set_block_number(2); + let r = 0; + assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); + + assert_eq!(Democracy::referendum_count(), 1); + assert_eq!(Democracy::voters_for(r), vec![1]); + assert_eq!(Democracy::vote_of((r, 1)), AYE); + + // Delegated vote is not counted. + assert_eq!(Democracy::tally(r), (10, 0, 10)); + + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + + assert_eq!(Balances::free_balance(&42), 2); + }); + } + + #[test] + /// If transactor voted, delegated vote is overwriten. + fn single_proposal_should_work_with_delegation_and_vote() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); + + assert_ok!(propose_set_balance(1, 2, 1)); + + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + System::set_block_number(2); + let r = 0; + + assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); + + // Delegate vote. + assert_ok!(Democracy::delegate(Origin::signed(2), 1, 100)); + + // Vote. + assert_ok!(Democracy::vote(Origin::signed(2), r, AYE)); + + assert_eq!(Democracy::referendum_count(), 1); + assert_eq!(Democracy::voters_for(r), vec![1, 2]); + assert_eq!(Democracy::vote_of((r, 1)), AYE); + + // Delegated vote is not counted. + assert_eq!(Democracy::tally(r), (30, 0, 30)); + + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + + assert_eq!(Balances::free_balance(&42), 2); + }); + } + + #[test] + fn deposit_for_proposals_should_be_taken() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); + assert_ok!(propose_set_balance(1, 2, 5)); + assert_ok!(Democracy::second(Origin::signed(2), 0)); + assert_ok!(Democracy::second(Origin::signed(5), 0)); + assert_ok!(Democracy::second(Origin::signed(5), 0)); + assert_ok!(Democracy::second(Origin::signed(5), 0)); + assert_eq!(Balances::free_balance(&1), 5); + assert_eq!(Balances::free_balance(&2), 15); + assert_eq!(Balances::free_balance(&5), 35); + }); + } + + #[test] + fn deposit_for_proposals_should_be_returned() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); + assert_ok!(propose_set_balance(1, 2, 5)); + assert_ok!(Democracy::second(Origin::signed(2), 0)); + assert_ok!(Democracy::second(Origin::signed(5), 0)); + assert_ok!(Democracy::second(Origin::signed(5), 0)); + assert_ok!(Democracy::second(Origin::signed(5), 0)); + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + assert_eq!(Balances::free_balance(&1), 10); + assert_eq!(Balances::free_balance(&2), 20); + assert_eq!(Balances::free_balance(&5), 50); + }); + } + + #[test] + fn proposal_with_deposit_below_minimum_should_not_work() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); + assert_noop!(propose_set_balance(1, 2, 0), "value too low"); + }); + } + + #[test] + fn poor_proposer_should_not_work() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); + assert_noop!(propose_set_balance(1, 2, 11), "proposer\'s balance too low"); + }); + } + + #[test] + fn poor_seconder_should_not_work() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); + assert_ok!(propose_set_balance(2, 2, 11)); + assert_noop!( + Democracy::second(Origin::signed(1), 0), + "seconder\'s balance too low" + ); + }); + } + + #[test] + fn runners_up_should_come_after() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(0); + assert_ok!(propose_set_balance(1, 2, 2)); + assert_ok!(propose_set_balance(1, 4, 4)); + assert_ok!(propose_set_balance(1, 3, 3)); + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + + System::set_block_number(1); + assert_ok!(Democracy::vote(Origin::signed(1), 0, AYE)); + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + assert_eq!(Balances::free_balance(&42), 4); + + System::set_block_number(2); + assert_ok!(Democracy::vote(Origin::signed(1), 1, AYE)); + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + assert_eq!(Balances::free_balance(&42), 3); + + System::set_block_number(3); + assert_ok!(Democracy::vote(Origin::signed(1), 2, AYE)); + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + }); + } + + #[test] + fn simple_passing_should_work() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); + let r = Democracy::inject_referendum( + 1, + set_balance_proposal(2), + VoteThreshold::SuperMajorityApprove, + 0, + ) + .unwrap(); + assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); + + assert_eq!(Democracy::voters_for(r), vec![1]); + assert_eq!(Democracy::vote_of((r, 1)), AYE); + assert_eq!(Democracy::tally(r), (10, 0, 10)); + + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + + assert_eq!(Balances::free_balance(&42), 2); + }); + } + + #[test] + fn cancel_referendum_should_work() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); + let r = Democracy::inject_referendum( + 1, + set_balance_proposal(2), + VoteThreshold::SuperMajorityApprove, + 0, + ) + .unwrap(); + assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); + assert_ok!(Democracy::cancel_referendum(r.into())); + + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + + assert_eq!(Balances::free_balance(&42), 0); + }); + } + + #[test] + fn simple_failing_should_work() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); + let r = Democracy::inject_referendum( + 1, + set_balance_proposal(2), + VoteThreshold::SuperMajorityApprove, + 0, + ) + .unwrap(); + assert_ok!(Democracy::vote(Origin::signed(1), r, NAY)); + + assert_eq!(Democracy::voters_for(r), vec![1]); + assert_eq!(Democracy::vote_of((r, 1)), NAY); + assert_eq!(Democracy::tally(r), (0, 10, 10)); + + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + + assert_eq!(Balances::free_balance(&42), 0); + }); + } + + #[test] + fn controversial_voting_should_work() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); + let r = Democracy::inject_referendum( + 1, + set_balance_proposal(2), + VoteThreshold::SuperMajorityApprove, + 0, + ) + .unwrap(); + assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); + assert_ok!(Democracy::vote(Origin::signed(2), r, NAY)); + assert_ok!(Democracy::vote(Origin::signed(3), r, NAY)); + assert_ok!(Democracy::vote(Origin::signed(4), r, AYE)); + assert_ok!(Democracy::vote(Origin::signed(5), r, NAY)); + assert_ok!(Democracy::vote(Origin::signed(6), r, AYE)); + + assert_eq!(Democracy::tally(r), (110, 100, 210)); + + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + + assert_eq!(Balances::free_balance(&42), 2); + }); + } + + #[test] + fn delayed_enactment_should_work() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); + let r = Democracy::inject_referendum( + 1, + set_balance_proposal(2), + VoteThreshold::SuperMajorityApprove, + 1, + ) + .unwrap(); + assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); + assert_ok!(Democracy::vote(Origin::signed(2), r, AYE)); + assert_ok!(Democracy::vote(Origin::signed(3), r, AYE)); + assert_ok!(Democracy::vote(Origin::signed(4), r, AYE)); + assert_ok!(Democracy::vote(Origin::signed(5), r, AYE)); + assert_ok!(Democracy::vote(Origin::signed(6), r, AYE)); + + assert_eq!(Democracy::tally(r), (210, 0, 210)); + + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + assert_eq!(Balances::free_balance(&42), 0); + + System::set_block_number(2); + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + + assert_eq!(Balances::free_balance(&42), 2); + }); + } + + #[test] + fn lock_voting_should_work() { + with_externalities(&mut new_test_ext_with_public_delay(1), || { + System::set_block_number(1); + let r = Democracy::inject_referendum( + 1, + set_balance_proposal(2), + VoteThreshold::SuperMajorityApprove, + 0, + ) + .unwrap(); + assert_ok!(Democracy::vote(Origin::signed(1), r, Vote::new(false, 6))); + assert_ok!(Democracy::vote(Origin::signed(2), r, Vote::new(true, 5))); + assert_ok!(Democracy::vote(Origin::signed(3), r, Vote::new(true, 4))); + assert_ok!(Democracy::vote(Origin::signed(4), r, Vote::new(true, 3))); + assert_ok!(Democracy::vote(Origin::signed(5), r, Vote::new(true, 2))); + assert_ok!(Democracy::vote(Origin::signed(6), r, Vote::new(false, 1))); + + assert_eq!(Democracy::tally(r), (440, 120, 210)); + + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + + assert_eq!(Balances::locks(1), vec![]); + assert_eq!( + Balances::locks(2), + vec![BalanceLock { + id: DEMOCRACY_ID, + amount: u64::max_value(), + until: 6, + reasons: WithdrawReason::Transfer.into() + }] + ); + assert_eq!( + Balances::locks(3), + vec![BalanceLock { + id: DEMOCRACY_ID, + amount: u64::max_value(), + until: 5, + reasons: WithdrawReason::Transfer.into() + }] + ); + assert_eq!( + Balances::locks(4), + vec![BalanceLock { + id: DEMOCRACY_ID, + amount: u64::max_value(), + until: 4, + reasons: WithdrawReason::Transfer.into() + }] + ); + assert_eq!( + Balances::locks(5), + vec![BalanceLock { + id: DEMOCRACY_ID, + amount: u64::max_value(), + until: 3, + reasons: WithdrawReason::Transfer.into() + }] + ); + assert_eq!(Balances::locks(6), vec![]); + + System::set_block_number(2); + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + + assert_eq!(Balances::free_balance(&42), 2); + }); + } + + #[test] + fn lock_voting_should_work_with_delegation() { + with_externalities(&mut new_test_ext_with_public_delay(1), || { + System::set_block_number(1); + let r = Democracy::inject_referendum( + 1, + set_balance_proposal(2), + VoteThreshold::SuperMajorityApprove, + 0, + ) + .unwrap(); + assert_ok!(Democracy::vote(Origin::signed(1), r, Vote::new(false, 6))); + assert_ok!(Democracy::vote(Origin::signed(2), r, Vote::new(true, 5))); + assert_ok!(Democracy::vote(Origin::signed(3), r, Vote::new(true, 4))); + assert_ok!(Democracy::vote(Origin::signed(4), r, Vote::new(true, 3))); + assert_ok!(Democracy::delegate(Origin::signed(5), 2, 2)); + assert_ok!(Democracy::vote(Origin::signed(6), r, Vote::new(false, 1))); + + assert_eq!(Democracy::tally(r), (440, 120, 210)); + + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + + System::set_block_number(2); + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + + assert_eq!(Balances::free_balance(&42), 2); + }); + } + + #[test] + fn controversial_low_turnout_voting_should_work() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); + let r = Democracy::inject_referendum( + 1, + set_balance_proposal(2), + VoteThreshold::SuperMajorityApprove, + 0, + ) + .unwrap(); + assert_ok!(Democracy::vote(Origin::signed(5), r, NAY)); + assert_ok!(Democracy::vote(Origin::signed(6), r, AYE)); + + assert_eq!(Democracy::tally(r), (60, 50, 110)); + + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + + assert_eq!(Balances::free_balance(&42), 0); + }); + } + + #[test] + fn passing_low_turnout_voting_should_work() { + with_externalities(&mut new_test_ext(), || { + assert_eq!(Balances::free_balance(&42), 0); + assert_eq!(Balances::total_issuance(), 210); + + System::set_block_number(1); + let r = Democracy::inject_referendum( + 1, + set_balance_proposal(2), + VoteThreshold::SuperMajorityApprove, + 0, + ) + .unwrap(); + assert_ok!(Democracy::vote(Origin::signed(4), r, AYE)); + assert_ok!(Democracy::vote(Origin::signed(5), r, NAY)); + assert_ok!(Democracy::vote(Origin::signed(6), r, AYE)); + + assert_eq!(Democracy::tally(r), (100, 50, 150)); + + assert_eq!(Democracy::end_block(System::block_number()), Ok(())); + + assert_eq!(Balances::free_balance(&42), 2); + }); + } } diff --git a/srml/democracy/src/vote_threshold.rs b/srml/democracy/src/vote_threshold.rs index 5d9b2b742e..817e9eab87 100644 --- a/srml/democracy/src/vote_threshold.rs +++ b/srml/democracy/src/vote_threshold.rs @@ -16,87 +16,126 @@ //! Voting thresholds. +use parity_codec::{Decode, Encode}; +use primitives::traits::{IntegerSquareRoot, Zero}; +use rstd::ops::{Add, Div, Mul, Rem}; #[cfg(feature = "std")] -use serde_derive::{Serialize, Deserialize}; -use parity_codec::{Encode, Decode}; -use primitives::traits::{Zero, IntegerSquareRoot}; -use rstd::ops::{Add, Mul, Div, Rem}; +use serde_derive::{Deserialize, Serialize}; /// A means of determining if a vote is past pass threshold. #[derive(Clone, Copy, PartialEq, Eq, Encode, Decode)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))] pub enum VoteThreshold { - /// A supermajority of approvals is needed to pass this vote. - SuperMajorityApprove, - /// A supermajority of rejects is needed to fail this vote. - SuperMajorityAgainst, - /// A simple majority of approvals is needed to pass this vote. - SimpleMajority, + /// A supermajority of approvals is needed to pass this vote. + SuperMajorityApprove, + /// A supermajority of rejects is needed to fail this vote. + SuperMajorityAgainst, + /// A simple majority of approvals is needed to pass this vote. + SimpleMajority, } pub trait Approved { - /// Given `approve` votes for and `against` votes against from a total electorate size of - /// `electorate` (`electorate - (approve + against)` are abstainers), then returns true if the - /// overall outcome is in favor of approval. - fn approved(&self, approve: Balance, against: Balance, voters: Balance, electorate: Balance) -> bool; + /// Given `approve` votes for and `against` votes against from a total electorate size of + /// `electorate` (`electorate - (approve + against)` are abstainers), then returns true if the + /// overall outcome is in favor of approval. + fn approved( + &self, + approve: Balance, + against: Balance, + voters: Balance, + electorate: Balance, + ) -> bool; } /// Return `true` iff `n1 / d1 < n2 / d2`. `d1` and `d2` may not be zero. -fn compare_rationals + Div + Rem + Ord + Copy>(mut n1: T, mut d1: T, mut n2: T, mut d2: T) -> bool { - // Uses a continued fractional representation for a non-overflowing compare. - // Detailed at https://janmr.com/blog/2014/05/comparing-rational-numbers-without-overflow/. - loop { - let q1 = n1 / d1; - let q2 = n2 / d2; - if q1 < q2 { - return true; - } - if q2 < q1 { - return false; - } - let r1 = n1 % d1; - let r2 = n2 % d2; - if r2.is_zero() { - return false; - } - if r1.is_zero() { - return true; - } - n1 = d2; - n2 = d1; - d1 = r2; - d2 = r1; - } +fn compare_rationals< + T: Zero + Mul + Div + Rem + Ord + Copy, +>( + mut n1: T, + mut d1: T, + mut n2: T, + mut d2: T, +) -> bool { + // Uses a continued fractional representation for a non-overflowing compare. + // Detailed at https://janmr.com/blog/2014/05/comparing-rational-numbers-without-overflow/. + loop { + let q1 = n1 / d1; + let q2 = n2 / d2; + if q1 < q2 { + return true; + } + if q2 < q1 { + return false; + } + let r1 = n1 % d1; + let r2 = n2 % d2; + if r2.is_zero() { + return false; + } + if r1.is_zero() { + return true; + } + n1 = d2; + n2 = d1; + d1 = r2; + d2 = r1; + } } -impl + Mul + Div + Rem + Copy> Approved for VoteThreshold { - /// Given `approve` votes for and `against` votes against from a total electorate size of - /// `electorate` of whom `voters` voted (`electorate - voters` are abstainers) then returns true if the - /// overall outcome is in favor of approval. - /// - /// We assume each *voter* may cast more than one *vote*, hence `voters` is not necessarily equal to - /// `approve + against`. - fn approved(&self, approve: Balance, against: Balance, voters: Balance, electorate: Balance) -> bool { - let sqrt_voters = voters.integer_sqrt(); - let sqrt_electorate = electorate.integer_sqrt(); - if sqrt_voters.is_zero() { return false; } - match *self { - VoteThreshold::SuperMajorityApprove => - compare_rationals(against, sqrt_voters, approve, sqrt_electorate), - VoteThreshold::SuperMajorityAgainst => - compare_rationals(against, sqrt_electorate, approve, sqrt_voters), - VoteThreshold::SimpleMajority => approve > against, - } - } +impl< + Balance: IntegerSquareRoot + + Zero + + Ord + + Add + + Mul + + Div + + Rem + + Copy, + > Approved for VoteThreshold +{ + /// Given `approve` votes for and `against` votes against from a total electorate size of + /// `electorate` of whom `voters` voted (`electorate - voters` are abstainers) then returns true if the + /// overall outcome is in favor of approval. + /// + /// We assume each *voter* may cast more than one *vote*, hence `voters` is not necessarily equal to + /// `approve + against`. + fn approved( + &self, + approve: Balance, + against: Balance, + voters: Balance, + electorate: Balance, + ) -> bool { + let sqrt_voters = voters.integer_sqrt(); + let sqrt_electorate = electorate.integer_sqrt(); + if sqrt_voters.is_zero() { + return false; + } + match *self { + VoteThreshold::SuperMajorityApprove => { + compare_rationals(against, sqrt_voters, approve, sqrt_electorate) + } + VoteThreshold::SuperMajorityAgainst => { + compare_rationals(against, sqrt_electorate, approve, sqrt_voters) + } + VoteThreshold::SimpleMajority => approve > against, + } + } } #[cfg(test)] mod tests { - use super::*; + use super::*; - #[test] - fn should_work() { - assert_eq!(VoteThreshold::SuperMajorityApprove.approved(60, 50, 110, 210), false); - assert_eq!(VoteThreshold::SuperMajorityApprove.approved(100, 50, 150, 210), true); - } + #[test] + fn should_work() { + assert_eq!( + VoteThreshold::SuperMajorityApprove.approved(60, 50, 110, 210), + false + ); + assert_eq!( + VoteThreshold::SuperMajorityApprove.approved(100, 50, 150, 210), + true + ); + } } diff --git a/srml/example/src/lib.rs b/srml/example/src/lib.rs index 8ba83bfd88..94fcd63c37 100644 --- a/srml/example/src/lib.rs +++ b/srml/example/src/lib.rs @@ -20,7 +20,7 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use srml_support::{StorageValue, dispatch::Result, decl_module, decl_storage, decl_event}; +use srml_support::{decl_event, decl_module, decl_storage, dispatch::Result, StorageValue}; use system::ensure_signed; /// Our module's configuration trait. All our types and consts go in here. If the @@ -29,57 +29,60 @@ use system::ensure_signed; /// /// `system::Trait` should always be included in our implied traits. pub trait Trait: balances::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The overarching event type. + type Event: From> + Into<::Event>; } decl_storage! { - // A macro for the Storage trait, and its implementation, for this module. - // This allows for type-safe usage of the Substrate storage database, so you can - // keep things around between blocks. - trait Store for Module as Example { - // Any storage declarations of the form: - // `pub? Name get(getter_name)? [config()|config(myname)] [build(|_| {...})] : (= )?;` - // where `` is either: - // - `Type` (a basic value item); or - // - `map KeyType => ValueType` (a map item). - // - // Note that there are two optional modifiers for the storage type declaration. - // - `Foo: Option`: - // - `Foo::put(1); Foo::get()` returns `Some(1)`; - // - `Foo::kill(); Foo::get()` returns `None`. - // - `Foo: u32`: - // - `Foo::put(1); Foo::get()` returns `1`; - // - `Foo::kill(); Foo::get()` returns `0` (u32::default()). - // e.g. Foo: u32; - // e.g. pub Bar get(bar): map T::AccountId => Vec<(T::Balance, u64)>; - // - // For basic value items, you'll get a type which implements - // `support::StorageValue`. For map items, you'll get a type which - // implements `support::StorageMap`. - // - // If they have a getter (`get(getter_name)`), then your module will come - // equipped with `fn getter_name() -> Type` for basic value items or - // `fn getter_name(key: KeyType) -> ValueType` for map items. - Dummy get(dummy) config(): Option; - - // A map that has enumerable entries. - Bar get(bar) config(): linked_map T::AccountId => T::Balance; - - // this one uses the default, we'll demonstrate the usage of 'mutate' API. - Foo get(foo) config(): T::Balance; - } + // A macro for the Storage trait, and its implementation, for this module. + // This allows for type-safe usage of the Substrate storage database, so you can + // keep things around between blocks. + trait Store for Module as Example { + // Any storage declarations of the form: + // `pub? Name get(getter_name)? [config()|config(myname)] [build(|_| {...})] : (= )?;` + // where `` is either: + // - `Type` (a basic value item); or + // - `map KeyType => ValueType` (a map item). + // + // Note that there are two optional modifiers for the storage type declaration. + // - `Foo: Option`: + // - `Foo::put(1); Foo::get()` returns `Some(1)`; + // - `Foo::kill(); Foo::get()` returns `None`. + // - `Foo: u32`: + // - `Foo::put(1); Foo::get()` returns `1`; + // - `Foo::kill(); Foo::get()` returns `0` (u32::default()). + // e.g. Foo: u32; + // e.g. pub Bar get(bar): map T::AccountId => Vec<(T::Balance, u64)>; + // + // For basic value items, you'll get a type which implements + // `support::StorageValue`. For map items, you'll get a type which + // implements `support::StorageMap`. + // + // If they have a getter (`get(getter_name)`), then your module will come + // equipped with `fn getter_name() -> Type` for basic value items or + // `fn getter_name(key: KeyType) -> ValueType` for map items. + Dummy get(dummy) config(): Option; + + // A map that has enumerable entries. + Bar get(bar) config(): linked_map T::AccountId => T::Balance; + + // this one uses the default, we'll demonstrate the usage of 'mutate' API. + Foo get(foo) config(): T::Balance; + } } decl_event!( - /// Events are a simple means of reporting specific conditions and - /// circumstances that have happened that users, Dapps and/or chain explorers would find - /// interesting and otherwise difficult to detect. - pub enum Event where B = ::Balance { - // Just a normal `enum`, here's a dummy event to ensure it compiles. - /// Dummy event, just here so there's a generic type that's used. - Dummy(B), - } + /// Events are a simple means of reporting specific conditions and + /// circumstances that have happened that users, Dapps and/or chain explorers would find + /// interesting and otherwise difficult to detect. + pub enum Event + where + B = ::Balance, + { + // Just a normal `enum`, here's a dummy event to ensure it compiles. + /// Dummy event, just here so there's a generic type that's used. + Dummy(B), + } ); // The module declaration. This states the entry points that we handle. The @@ -113,116 +116,116 @@ decl_event!( // in system that do the matching for you and return a convenient result: `ensure_signed`, // `ensure_root` and `ensure_inherent`. decl_module! { - // Simple declaration of the `Module` type. Lets the macro know what its working on. - pub struct Module for enum Call where origin: T::Origin { - /// Deposit one of this module's events by using the default implementation. - /// It is also possible to provide a custom implementation. - /// For non-generic events, the generic parameter just needs to be dropped, so that it - /// looks like: `fn deposit_event() = default;`. - fn deposit_event() = default; - /// This is your public interface. Be extremely careful. - /// This is just a simple example of how to interact with the module from the external - /// world. - // This just increases the value of `Dummy` by `increase_by`. - // - // Since this is a dispatched function there are two extremely important things to - // remember: - // - // - MUST NOT PANIC: Under no circumstances (save, perhaps, storage getting into an - // irreparably damaged state) must this function panic. - // - NO SIDE-EFFECTS ON ERROR: This function must either complete totally (and return - // `Ok(())` or it must have no side-effects on storage and return `Err('Some reason')`. - // - // The first is relatively easy to audit for - just ensure all panickers are removed from - // logic that executes in production (which you do anyway, right?!). To ensure the second - // is followed, you should do all tests for validity at the top of your function. This - // is stuff like checking the sender (`origin`) or that state is such that the operation - // makes sense. - // - // Once you've determined that it's all good, then enact the operation and change storage. - // If you can't be certain that the operation will succeed without substantial computation - // then you have a classic blockchain attack scenario. The normal way of managing this is - // to attach a bond to the operation. As the first major alteration of storage, reserve - // some value from the sender's account (`Balances` module has a `reserve` function for - // exactly this scenario). This amount should be enough to cover any costs of the - // substantial execution in case it turns out that you can't proceed with the operation. - // - // If it eventually transpires that the operation is fine and, therefore, that the - // expense of the checks should be borne by the network, then you can refund the reserved - // deposit. If, however, the operation turns out to be invalid and the computation is - // wasted, then you can burn it or repatriate elsewhere. - // - // Security bonds ensure that attackers can't game it by ensuring that anyone interacting - // with the system either progresses it or pays for the trouble of faffing around with - // no progress. - // - // If you don't respect these rules, it is likely that your chain will be attackable. - fn accumulate_dummy(origin, increase_by: T::Balance) -> Result { - // This is a public call, so we ensure that the origin is some signed account. - let _sender = ensure_signed(origin)?; - - // Read the value of dummy from storage. - // let dummy = Self::dummy(); - // Will also work using the `::get` on the storage item type itself: - // let dummy = >::get(); - - // Calculate the new value. - // let new_dummy = dummy.map_or(increase_by, |dummy| dummy + increase_by); - - // Put the new value into storage. - // >::put(new_dummy); - // Will also work with a reference: - // >::put(&new_dummy); - - // Here's the new one of read and then modify the value. - >::mutate(|dummy| { - let new_dummy = dummy.map_or(increase_by, |dummy| dummy + increase_by); - *dummy = Some(new_dummy); - }); - - // Let's deposit an event to let the outside world know this happened. - Self::deposit_event(RawEvent::Dummy(increase_by)); - - // All good. - Ok(()) - } - - /// A privileged call; in this case it resets our dummy value to something new. - // Implementation of a privileged call. This doesn't have an `origin` parameter because - // it's not (directly) from an extrinsic, but rather the system as a whole has decided - // to execute it. Different runtimes have different reasons for allow privileged - // calls to be executed - we don't need to care why. Because it's privileged, we can - // assume it's a one-off operation and substantial processing/storage/memory can be used - // without worrying about gameability or attack scenarios. - // If you not specify `Result` explicitly as return value, it will be added automatically - // for you and `Ok(())` will be returned. - fn set_dummy(#[compact] new_value: T::Balance) { - // Put the new value into storage. - >::put(new_value); - } - - // The signature could also look like: `fn on_initialize()` - fn on_initialize(_n: T::BlockNumber) { - // Anything that needs to be done at the start of the block. - // We don't do anything here. - } - - // The signature could also look like: `fn on_finalize()` - fn on_finalize(_n: T::BlockNumber) { - // Anything that needs to be done at the end of the block. - // We just kill our dummy storage item. - >::kill(); - } - - // A runtime code run after every block and have access to extended set of APIs. - // - // For instance you can generate extrinsics for the upcoming produced block. - fn offchain_worker(_n: T::BlockNumber) { - // We don't do anything here. - // but we could dispatch extrinsic (transaction/inherent) using - // runtime_io::submit_extrinsic - } - } + // Simple declaration of the `Module` type. Lets the macro know what its working on. + pub struct Module for enum Call where origin: T::Origin { + /// Deposit one of this module's events by using the default implementation. + /// It is also possible to provide a custom implementation. + /// For non-generic events, the generic parameter just needs to be dropped, so that it + /// looks like: `fn deposit_event() = default;`. + fn deposit_event() = default; + /// This is your public interface. Be extremely careful. + /// This is just a simple example of how to interact with the module from the external + /// world. + // This just increases the value of `Dummy` by `increase_by`. + // + // Since this is a dispatched function there are two extremely important things to + // remember: + // + // - MUST NOT PANIC: Under no circumstances (save, perhaps, storage getting into an + // irreparably damaged state) must this function panic. + // - NO SIDE-EFFECTS ON ERROR: This function must either complete totally (and return + // `Ok(())` or it must have no side-effects on storage and return `Err('Some reason')`. + // + // The first is relatively easy to audit for - just ensure all panickers are removed from + // logic that executes in production (which you do anyway, right?!). To ensure the second + // is followed, you should do all tests for validity at the top of your function. This + // is stuff like checking the sender (`origin`) or that state is such that the operation + // makes sense. + // + // Once you've determined that it's all good, then enact the operation and change storage. + // If you can't be certain that the operation will succeed without substantial computation + // then you have a classic blockchain attack scenario. The normal way of managing this is + // to attach a bond to the operation. As the first major alteration of storage, reserve + // some value from the sender's account (`Balances` module has a `reserve` function for + // exactly this scenario). This amount should be enough to cover any costs of the + // substantial execution in case it turns out that you can't proceed with the operation. + // + // If it eventually transpires that the operation is fine and, therefore, that the + // expense of the checks should be borne by the network, then you can refund the reserved + // deposit. If, however, the operation turns out to be invalid and the computation is + // wasted, then you can burn it or repatriate elsewhere. + // + // Security bonds ensure that attackers can't game it by ensuring that anyone interacting + // with the system either progresses it or pays for the trouble of faffing around with + // no progress. + // + // If you don't respect these rules, it is likely that your chain will be attackable. + fn accumulate_dummy(origin, increase_by: T::Balance) -> Result { + // This is a public call, so we ensure that the origin is some signed account. + let _sender = ensure_signed(origin)?; + + // Read the value of dummy from storage. + // let dummy = Self::dummy(); + // Will also work using the `::get` on the storage item type itself: + // let dummy = >::get(); + + // Calculate the new value. + // let new_dummy = dummy.map_or(increase_by, |dummy| dummy + increase_by); + + // Put the new value into storage. + // >::put(new_dummy); + // Will also work with a reference: + // >::put(&new_dummy); + + // Here's the new one of read and then modify the value. + >::mutate(|dummy| { + let new_dummy = dummy.map_or(increase_by, |dummy| dummy + increase_by); + *dummy = Some(new_dummy); + }); + + // Let's deposit an event to let the outside world know this happened. + Self::deposit_event(RawEvent::Dummy(increase_by)); + + // All good. + Ok(()) + } + + /// A privileged call; in this case it resets our dummy value to something new. + // Implementation of a privileged call. This doesn't have an `origin` parameter because + // it's not (directly) from an extrinsic, but rather the system as a whole has decided + // to execute it. Different runtimes have different reasons for allow privileged + // calls to be executed - we don't need to care why. Because it's privileged, we can + // assume it's a one-off operation and substantial processing/storage/memory can be used + // without worrying about gameability or attack scenarios. + // If you not specify `Result` explicitly as return value, it will be added automatically + // for you and `Ok(())` will be returned. + fn set_dummy(#[compact] new_value: T::Balance) { + // Put the new value into storage. + >::put(new_value); + } + + // The signature could also look like: `fn on_initialize()` + fn on_initialize(_n: T::BlockNumber) { + // Anything that needs to be done at the start of the block. + // We don't do anything here. + } + + // The signature could also look like: `fn on_finalize()` + fn on_finalize(_n: T::BlockNumber) { + // Anything that needs to be done at the end of the block. + // We just kill our dummy storage item. + >::kill(); + } + + // A runtime code run after every block and have access to extended set of APIs. + // + // For instance you can generate extrinsics for the upcoming produced block. + fn offchain_worker(_n: T::BlockNumber) { + // We don't do anything here. + // but we could dispatch extrinsic (transaction/inherent) using + // runtime_io::submit_extrinsic + } + } } // The main implementation block for the module. Functions here fall into three broad @@ -231,115 +234,129 @@ decl_module! { // functions that do not write to storage and operation functions that do. // - Private functions. These are your usual private utilities unavailable to other modules. impl Module { - // Add public immutables and private mutables. - #[allow(dead_code)] - fn accumulate_foo(origin: T::Origin, increase_by: T::Balance) -> Result { - let _sender = ensure_signed(origin)?; - - let prev = >::get(); - // Because Foo has 'default', the type of 'foo' in closure is the raw type instead of an Option<> type. - let result = >::mutate(|foo| { - *foo = *foo + increase_by; - *foo - }); - assert!(prev + increase_by == result); - - Ok(()) - } + // Add public immutables and private mutables. + #[allow(dead_code)] + fn accumulate_foo(origin: T::Origin, increase_by: T::Balance) -> Result { + let _sender = ensure_signed(origin)?; + + let prev = >::get(); + // Because Foo has 'default', the type of 'foo' in closure is the raw type instead of an Option<> type. + let result = >::mutate(|foo| { + *foo = *foo + increase_by; + *foo + }); + assert!(prev + increase_by == result); + + Ok(()) + } } #[cfg(test)] mod tests { - use super::*; - - use srml_support::{impl_outer_origin, assert_ok}; - use sr_io::with_externalities; - use substrate_primitives::{H256, Blake2Hasher}; - // The testing primitives are very useful for avoiding having to work with signatures - // or public keys. `u64` is used as the `AccountId` and no `Signature`s are requried. - use sr_primitives::{ - BuildStorage, traits::{BlakeTwo256, OnInitialize, OnFinalize, IdentityLookup}, - testing::{Digest, DigestItem, Header} - }; - - impl_outer_origin! { - pub enum Origin for Test {} - } - - // For testing the module, we construct most of a mock runtime. This means - // first constructing a configuration type (`Test`) which `impl`s each of the - // configuration traits of modules we want to use. - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - impl system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type Digest = Digest; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type Log = DigestItem; - } - impl balances::Trait for Test { - type Balance = u64; - type OnFreeBalanceZero = (); - type OnNewAccount = (); - type Event = (); - type TransactionPayment = (); - type TransferPayment = (); - type DustRemoval = (); - } - impl Trait for Test { - type Event = (); - } - type Example = Module; - - // This function basically just builds a genesis storage key/value store according to - // our desired mockup. - fn new_test_ext() -> sr_io::TestExternalities { - let mut t = system::GenesisConfig::::default().build_storage().unwrap().0; - // We use default for brevity, but you can configure as desired if needed. - t.extend(balances::GenesisConfig::::default().build_storage().unwrap().0); - t.extend(GenesisConfig::{ - dummy: 42, - // we configure the map with (key, value) pairs. - bar: vec![(1, 2), (2, 3)], - foo: 24, - }.build_storage().unwrap().0); - t.into() - } - - #[test] - fn it_works_for_optional_value() { - with_externalities(&mut new_test_ext(), || { - // Check that GenesisBuilder works properly. - assert_eq!(Example::dummy(), Some(42)); - - // Check that accumulate works when we have Some value in Dummy already. - assert_ok!(Example::accumulate_dummy(Origin::signed(1), 27)); - assert_eq!(Example::dummy(), Some(69)); - - // Check that finalizing the block removes Dummy from storage. - >::on_finalize(1); - assert_eq!(Example::dummy(), None); - - // Check that accumulate works when we Dummy has None in it. - >::on_initialize(2); - assert_ok!(Example::accumulate_dummy(Origin::signed(1), 42)); - assert_eq!(Example::dummy(), Some(42)); - }); - } - - #[test] - fn it_works_for_default_value() { - with_externalities(&mut new_test_ext(), || { - assert_eq!(Example::foo(), 24); - assert_ok!(Example::accumulate_foo(Origin::signed(1), 1)); - assert_eq!(Example::foo(), 25); - }); - } + use super::*; + + use sr_io::with_externalities; + use srml_support::{assert_ok, impl_outer_origin}; + use substrate_primitives::{Blake2Hasher, H256}; + // The testing primitives are very useful for avoiding having to work with signatures + // or public keys. `u64` is used as the `AccountId` and no `Signature`s are requried. + use sr_primitives::{ + testing::{Digest, DigestItem, Header}, + traits::{BlakeTwo256, IdentityLookup, OnFinalize, OnInitialize}, + BuildStorage, + }; + + impl_outer_origin! { + pub enum Origin for Test {} + } + + // For testing the module, we construct most of a mock runtime. This means + // first constructing a configuration type (`Test`) which `impl`s each of the + // configuration traits of modules we want to use. + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + impl system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type Digest = Digest; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type Log = DigestItem; + } + impl balances::Trait for Test { + type Balance = u64; + type OnFreeBalanceZero = (); + type OnNewAccount = (); + type Event = (); + type TransactionPayment = (); + type TransferPayment = (); + type DustRemoval = (); + } + impl Trait for Test { + type Event = (); + } + type Example = Module; + + // This function basically just builds a genesis storage key/value store according to + // our desired mockup. + fn new_test_ext() -> sr_io::TestExternalities { + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0; + // We use default for brevity, but you can configure as desired if needed. + t.extend( + balances::GenesisConfig::::default() + .build_storage() + .unwrap() + .0, + ); + t.extend( + GenesisConfig:: { + dummy: 42, + // we configure the map with (key, value) pairs. + bar: vec![(1, 2), (2, 3)], + foo: 24, + } + .build_storage() + .unwrap() + .0, + ); + t.into() + } + + #[test] + fn it_works_for_optional_value() { + with_externalities(&mut new_test_ext(), || { + // Check that GenesisBuilder works properly. + assert_eq!(Example::dummy(), Some(42)); + + // Check that accumulate works when we have Some value in Dummy already. + assert_ok!(Example::accumulate_dummy(Origin::signed(1), 27)); + assert_eq!(Example::dummy(), Some(69)); + + // Check that finalizing the block removes Dummy from storage. + >::on_finalize(1); + assert_eq!(Example::dummy(), None); + + // Check that accumulate works when we Dummy has None in it. + >::on_initialize(2); + assert_ok!(Example::accumulate_dummy(Origin::signed(1), 42)); + assert_eq!(Example::dummy(), Some(42)); + }); + } + + #[test] + fn it_works_for_default_value() { + with_externalities(&mut new_test_ext(), || { + assert_eq!(Example::foo(), 24); + assert_ok!(Example::accumulate_foo(Origin::signed(1), 1)); + assert_eq!(Example::foo(), 25); + }); + } } diff --git a/srml/executive/src/lib.rs b/srml/executive/src/lib.rs index 48908a01b3..fe1a3ed361 100644 --- a/srml/executive/src/lib.rs +++ b/srml/executive/src/lib.rs @@ -18,46 +18,51 @@ #![cfg_attr(not(feature = "std"), no_std)] -use rstd::prelude::*; -use rstd::marker::PhantomData; -use rstd::result; +use parity_codec::{Codec, Encode}; use primitives::traits::{ - self, Header, Zero, One, Checkable, Applyable, CheckEqual, OnFinalize, - OnInitialize, Hash, As, Digest, NumberFor, Block as BlockT, OffchainWorker + self, Applyable, As, Block as BlockT, CheckEqual, Checkable, Digest, Hash, Header, NumberFor, + OffchainWorker, OnFinalize, OnInitialize, One, Zero, }; -use srml_support::{Dispatchable, traits::MakePayment}; -use parity_codec::{Codec, Encode}; +use primitives::transaction_validity::{ + TransactionLongevity, TransactionPriority, TransactionValidity, +}; +use primitives::{ApplyError, ApplyOutcome}; +use rstd::marker::PhantomData; +use rstd::prelude::*; +use rstd::result; +use srml_support::{traits::MakePayment, Dispatchable}; use system::extrinsics_root; -use primitives::{ApplyOutcome, ApplyError}; -use primitives::transaction_validity::{TransactionValidity, TransactionPriority, TransactionLongevity}; mod internal { - pub const MAX_TRANSACTIONS_SIZE: u32 = 4 * 1024 * 1024; - - pub enum ApplyError { - BadSignature(&'static str), - Stale, - Future, - CantPay, - FullBlock, - } - - pub enum ApplyOutcome { - Success, - Fail(&'static str), - } + pub const MAX_TRANSACTIONS_SIZE: u32 = 4 * 1024 * 1024; + + pub enum ApplyError { + BadSignature(&'static str), + Stale, + Future, + CantPay, + FullBlock, + } + + pub enum ApplyOutcome { + Success, + Fail(&'static str), + } } /// Something that can be used to execute a block. pub trait ExecuteBlock { - /// Actually execute all transitioning for `block`. - fn execute_block(block: Block); - /// Execute all extrinsics like when executing a `block`, but with dropping intial and final checks. - fn execute_extrinsics_without_checks(block_number: NumberFor, extrinsics: Vec); + /// Actually execute all transitioning for `block`. + fn execute_block(block: Block); + /// Execute all extrinsics like when executing a `block`, but with dropping intial and final checks. + fn execute_extrinsics_without_checks( + block_number: NumberFor, + extrinsics: Vec, + ); } pub struct Executive( - PhantomData<(System, Block, Context, Payment, AllModules)> + PhantomData<(System, Block, Context, Payment, AllModules)>, ); impl< @@ -328,175 +333,230 @@ impl< #[cfg(test)] mod tests { - use super::*; - use balances::Call; - use runtime_io::with_externalities; - use substrate_primitives::{H256, Blake2Hasher}; - use primitives::BuildStorage; - use primitives::traits::{Header as HeaderT, BlakeTwo256, IdentityLookup}; - use primitives::testing::{Digest, DigestItem, Header, Block}; - use srml_support::{traits::Currency, impl_outer_origin, impl_outer_event}; - use system; - use hex_literal::{hex, hex_impl}; - - impl_outer_origin! { - pub enum Origin for Runtime { - } - } - - impl_outer_event!{ - pub enum MetaEvent for Runtime { - balances, - } - } - - // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. - #[derive(Clone, Eq, PartialEq)] - pub struct Runtime; - impl system::Trait for Runtime { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = substrate_primitives::H256; - type Hashing = BlakeTwo256; - type Digest = Digest; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = MetaEvent; - type Log = DigestItem; - } - impl balances::Trait for Runtime { - type Balance = u64; - type OnFreeBalanceZero = (); - type OnNewAccount = (); - type Event = MetaEvent; - type TransactionPayment = (); - type DustRemoval = (); - type TransferPayment = (); - } - - type TestXt = primitives::testing::TestXt>; - type Executive = super::Executive, system::ChainContext, balances::Module, ()>; - - #[test] - fn balance_transfer_dispatch_works() { - let mut t = system::GenesisConfig::::default().build_storage().unwrap().0; - t.extend(balances::GenesisConfig:: { - transaction_base_fee: 10, - transaction_byte_fee: 0, - balances: vec![(1, 111)], - existential_deposit: 0, - transfer_fee: 0, - creation_fee: 0, - vesting: vec![], - }.build_storage().unwrap().0); - let xt = primitives::testing::TestXt(Some(1), 0, Call::transfer(2, 69)); - let mut t = runtime_io::TestExternalities::::new(t); - with_externalities(&mut t, || { - Executive::initialize_block(&Header::new(1, H256::default(), H256::default(), - [69u8; 32].into(), Digest::default())); - Executive::apply_extrinsic(xt).unwrap(); - assert_eq!(>::total_balance(&1), 32); - assert_eq!(>::total_balance(&2), 69); - }); - } - - fn new_test_ext() -> runtime_io::TestExternalities { - let mut t = system::GenesisConfig::::default().build_storage().unwrap().0; - t.extend(balances::GenesisConfig::::default().build_storage().unwrap().0); - t.into() - } - - #[test] - fn block_import_works() { - with_externalities(&mut new_test_ext(), || { - Executive::execute_block(Block { - header: Header { - parent_hash: [69u8; 32].into(), - number: 1, - state_root: hex!("49cd58a254ccf6abc4a023d9a22dcfc421e385527a250faec69f8ad0d8ed3e48").into(), - extrinsics_root: hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into(), - digest: Digest { logs: vec![], }, - }, - extrinsics: vec![], - }); - }); - } - - #[test] - #[should_panic] - fn block_import_of_bad_state_root_fails() { - with_externalities(&mut new_test_ext(), || { - Executive::execute_block(Block { - header: Header { - parent_hash: [69u8; 32].into(), - number: 1, - state_root: [0u8; 32].into(), - extrinsics_root: hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into(), - digest: Digest { logs: vec![], }, - }, - extrinsics: vec![], - }); - }); - } - - #[test] - #[should_panic] - fn block_import_of_bad_extrinsic_root_fails() { - with_externalities(&mut new_test_ext(), || { - Executive::execute_block(Block { - header: Header { - parent_hash: [69u8; 32].into(), - number: 1, - state_root: hex!("49cd58a254ccf6abc4a023d9a22dcfc421e385527a250faec69f8ad0d8ed3e48").into(), - extrinsics_root: [0u8; 32].into(), - digest: Digest { logs: vec![], }, - }, - extrinsics: vec![], - }); - }); - } - - #[test] - fn bad_extrinsic_not_inserted() { - let mut t = new_test_ext(); - let xt = primitives::testing::TestXt(Some(1), 42, Call::transfer(33, 69)); - with_externalities(&mut t, || { - Executive::initialize_block(&Header::new(1, H256::default(), H256::default(), [69u8; 32].into(), Digest::default())); - assert!(Executive::apply_extrinsic(xt).is_err()); - assert_eq!(>::extrinsic_index(), Some(0)); - }); - } - - #[test] - fn block_size_limit_enforced() { - let run_test = |should_fail: bool| { - let mut t = new_test_ext(); - let xt = primitives::testing::TestXt(Some(1), 0, Call::transfer(33, 69)); - let xt2 = primitives::testing::TestXt(Some(1), 1, Call::transfer(33, 69)); - let encoded = xt2.encode(); - let len = if should_fail { (internal::MAX_TRANSACTIONS_SIZE - 1) as usize } else { encoded.len() }; - with_externalities(&mut t, || { - Executive::initialize_block(&Header::new(1, H256::default(), H256::default(), [69u8; 32].into(), Digest::default())); - assert_eq!(>::all_extrinsics_len(), 0); - - Executive::apply_extrinsic(xt).unwrap(); - let res = Executive::apply_extrinsic_with_len(xt2, len, Some(encoded)); - - if should_fail { - assert!(res.is_err()); - assert_eq!(>::all_extrinsics_len(), 28); - assert_eq!(>::extrinsic_index(), Some(1)); - } else { - assert!(res.is_ok()); - assert_eq!(>::all_extrinsics_len(), 56); - assert_eq!(>::extrinsic_index(), Some(2)); - } - }); - }; - - run_test(false); - run_test(true); - } + use super::*; + use balances::Call; + use hex_literal::{hex, hex_impl}; + use primitives::testing::{Block, Digest, DigestItem, Header}; + use primitives::traits::{BlakeTwo256, Header as HeaderT, IdentityLookup}; + use primitives::BuildStorage; + use runtime_io::with_externalities; + use srml_support::{impl_outer_event, impl_outer_origin, traits::Currency}; + use substrate_primitives::{Blake2Hasher, H256}; + use system; + + impl_outer_origin! { + pub enum Origin for Runtime { + } + } + + impl_outer_event! { + pub enum MetaEvent for Runtime { + balances, + } + } + + // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. + #[derive(Clone, Eq, PartialEq)] + pub struct Runtime; + impl system::Trait for Runtime { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = substrate_primitives::H256; + type Hashing = BlakeTwo256; + type Digest = Digest; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = MetaEvent; + type Log = DigestItem; + } + impl balances::Trait for Runtime { + type Balance = u64; + type OnFreeBalanceZero = (); + type OnNewAccount = (); + type Event = MetaEvent; + type TransactionPayment = (); + type DustRemoval = (); + type TransferPayment = (); + } + + type TestXt = primitives::testing::TestXt>; + type Executive = super::Executive< + Runtime, + Block, + system::ChainContext, + balances::Module, + (), + >; + + #[test] + fn balance_transfer_dispatch_works() { + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0; + t.extend( + balances::GenesisConfig:: { + transaction_base_fee: 10, + transaction_byte_fee: 0, + balances: vec![(1, 111)], + existential_deposit: 0, + transfer_fee: 0, + creation_fee: 0, + vesting: vec![], + } + .build_storage() + .unwrap() + .0, + ); + let xt = primitives::testing::TestXt(Some(1), 0, Call::transfer(2, 69)); + let mut t = runtime_io::TestExternalities::::new(t); + with_externalities(&mut t, || { + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + Executive::apply_extrinsic(xt).unwrap(); + assert_eq!(>::total_balance(&1), 32); + assert_eq!(>::total_balance(&2), 69); + }); + } + + fn new_test_ext() -> runtime_io::TestExternalities { + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0; + t.extend( + balances::GenesisConfig::::default() + .build_storage() + .unwrap() + .0, + ); + t.into() + } + + #[test] + fn block_import_works() { + with_externalities(&mut new_test_ext(), || { + Executive::execute_block(Block { + header: Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root: hex!( + "49cd58a254ccf6abc4a023d9a22dcfc421e385527a250faec69f8ad0d8ed3e48" + ) + .into(), + extrinsics_root: hex!( + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" + ) + .into(), + digest: Digest { logs: vec![] }, + }, + extrinsics: vec![], + }); + }); + } + + #[test] + #[should_panic] + fn block_import_of_bad_state_root_fails() { + with_externalities(&mut new_test_ext(), || { + Executive::execute_block(Block { + header: Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root: [0u8; 32].into(), + extrinsics_root: hex!( + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" + ) + .into(), + digest: Digest { logs: vec![] }, + }, + extrinsics: vec![], + }); + }); + } + + #[test] + #[should_panic] + fn block_import_of_bad_extrinsic_root_fails() { + with_externalities(&mut new_test_ext(), || { + Executive::execute_block(Block { + header: Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root: hex!( + "49cd58a254ccf6abc4a023d9a22dcfc421e385527a250faec69f8ad0d8ed3e48" + ) + .into(), + extrinsics_root: [0u8; 32].into(), + digest: Digest { logs: vec![] }, + }, + extrinsics: vec![], + }); + }); + } + + #[test] + fn bad_extrinsic_not_inserted() { + let mut t = new_test_ext(); + let xt = primitives::testing::TestXt(Some(1), 42, Call::transfer(33, 69)); + with_externalities(&mut t, || { + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + assert!(Executive::apply_extrinsic(xt).is_err()); + assert_eq!(>::extrinsic_index(), Some(0)); + }); + } + + #[test] + fn block_size_limit_enforced() { + let run_test = |should_fail: bool| { + let mut t = new_test_ext(); + let xt = primitives::testing::TestXt(Some(1), 0, Call::transfer(33, 69)); + let xt2 = primitives::testing::TestXt(Some(1), 1, Call::transfer(33, 69)); + let encoded = xt2.encode(); + let len = if should_fail { + (internal::MAX_TRANSACTIONS_SIZE - 1) as usize + } else { + encoded.len() + }; + with_externalities(&mut t, || { + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + assert_eq!(>::all_extrinsics_len(), 0); + + Executive::apply_extrinsic(xt).unwrap(); + let res = Executive::apply_extrinsic_with_len(xt2, len, Some(encoded)); + + if should_fail { + assert!(res.is_err()); + assert_eq!(>::all_extrinsics_len(), 28); + assert_eq!(>::extrinsic_index(), Some(1)); + } else { + assert!(res.is_ok()); + assert_eq!(>::all_extrinsics_len(), 56); + assert_eq!(>::extrinsic_index(), Some(2)); + } + }); + }; + + run_test(false); + run_test(true); + } } diff --git a/srml/finality-tracker/src/lib.rs b/srml/finality-tracker/src/lib.rs index 34be4ea66a..26a6597291 100644 --- a/srml/finality-tracker/src/lib.rs +++ b/srml/finality-tracker/src/lib.rs @@ -21,14 +21,11 @@ #[macro_use] extern crate srml_support; -use inherents::{ - RuntimeString, InherentIdentifier, ProvideInherent, - InherentData, MakeFatalError, -}; -use srml_support::StorageValue; -use primitives::traits::{As, One, Zero}; -use rstd::{prelude::*, result, cmp, vec}; +use inherents::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent, RuntimeString}; use parity_codec::Decode; +use primitives::traits::{As, One, Zero}; +use rstd::{cmp, prelude::*, result, vec}; +use srml_support::StorageValue; use srml_system::{ensure_inherent, Trait as SystemTrait}; #[cfg(feature = "std")] @@ -42,177 +39,185 @@ pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"finalnum"; /// Auxiliary trait to extract finalized inherent data. pub trait FinalizedInherentData { - /// Get finalized inherent data. - fn finalized_number(&self) -> Result; + /// Get finalized inherent data. + fn finalized_number(&self) -> Result; } impl FinalizedInherentData for InherentData { - fn finalized_number(&self) -> Result { - self.get_data(&INHERENT_IDENTIFIER) - .and_then(|r| r.ok_or_else(|| "Finalized number inherent data not found".into())) - } + fn finalized_number(&self) -> Result { + self.get_data(&INHERENT_IDENTIFIER) + .and_then(|r| r.ok_or_else(|| "Finalized number inherent data not found".into())) + } } /// Provider for inherent data. #[cfg(feature = "std")] pub struct InherentDataProvider { - inner: F, - _marker: std::marker::PhantomData, + inner: F, + _marker: std::marker::PhantomData, } #[cfg(feature = "std")] impl InherentDataProvider { - pub fn new(final_oracle: F) -> Self { - InherentDataProvider { inner: final_oracle, _marker: Default::default() } - } + pub fn new(final_oracle: F) -> Self { + InherentDataProvider { + inner: final_oracle, + _marker: Default::default(), + } + } } #[cfg(feature = "std")] impl inherents::ProvideInherentData for InherentDataProvider - where F: Fn() -> Result +where + F: Fn() -> Result, { - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &INHERENT_IDENTIFIER - } + fn inherent_identifier(&self) -> &'static InherentIdentifier { + &INHERENT_IDENTIFIER + } - fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), RuntimeString> { - (self.inner)() - .and_then(|n| inherent_data.put_data(INHERENT_IDENTIFIER, &n)) - } + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), RuntimeString> { + (self.inner)().and_then(|n| inherent_data.put_data(INHERENT_IDENTIFIER, &n)) + } - fn error_to_string(&self, _error: &[u8]) -> Option { - Some(format!("no further information")) - } + fn error_to_string(&self, _error: &[u8]) -> Option { + Some(format!("no further information")) + } } - pub trait Trait: SystemTrait { - /// Something which can be notified when the timestamp is set. Set this to `()` if not needed. - type OnFinalizationStalled: OnFinalizationStalled; + /// Something which can be notified when the timestamp is set. Set this to `()` if not needed. + type OnFinalizationStalled: OnFinalizationStalled; } decl_storage! { - trait Store for Module as Timestamp { - /// Recent hints. - RecentHints get(recent_hints) build(|_| vec![T::BlockNumber::zero()]): Vec; - /// Ordered recent hints. - OrderedHints get(ordered_hints) build(|_| vec![T::BlockNumber::zero()]): Vec; - /// The median. - Median get(median) build(|_| T::BlockNumber::zero()): T::BlockNumber; - /// The number of recent samples to keep from this chain. Default is n-100 - pub WindowSize get(window_size) config(window_size): T::BlockNumber = T::BlockNumber::sa(DEFAULT_WINDOW_SIZE); - /// The delay after which point things become suspicious. - pub ReportLatency get(report_latency) config(report_latency): T::BlockNumber = T::BlockNumber::sa(DEFAULT_DELAY); - - /// Final hint to apply in the block. `None` means "same as parent". - Update: Option; - - // when initialized through config this is set in the beginning. - Initialized get(initialized) build(|_| false): bool; - } + trait Store for Module as Timestamp { + /// Recent hints. + RecentHints get(recent_hints) build(|_| vec![T::BlockNumber::zero()]): Vec; + /// Ordered recent hints. + OrderedHints get(ordered_hints) build(|_| vec![T::BlockNumber::zero()]): Vec; + /// The median. + Median get(median) build(|_| T::BlockNumber::zero()): T::BlockNumber; + /// The number of recent samples to keep from this chain. Default is n-100 + pub WindowSize get(window_size) config(window_size): T::BlockNumber = T::BlockNumber::sa(DEFAULT_WINDOW_SIZE); + /// The delay after which point things become suspicious. + pub ReportLatency get(report_latency) config(report_latency): T::BlockNumber = T::BlockNumber::sa(DEFAULT_DELAY); + + /// Final hint to apply in the block. `None` means "same as parent". + Update: Option; + + // when initialized through config this is set in the beginning. + Initialized get(initialized) build(|_| false): bool; + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - /// Hint that the author of this block thinks the best finalized - /// block is the given number. - fn final_hint(origin, #[compact] hint: T::BlockNumber) { - ensure_inherent(origin)?; - assert!(!::Update::exists(), "Final hint must be updated only once in the block"); - assert!( - srml_system::Module::::block_number() >= hint, - "Finalized height above block number", - ); - ::Update::put(hint); - } - - fn on_finalize() { - Self::update_hint(::Update::take()) - } - } + pub struct Module for enum Call where origin: T::Origin { + /// Hint that the author of this block thinks the best finalized + /// block is the given number. + fn final_hint(origin, #[compact] hint: T::BlockNumber) { + ensure_inherent(origin)?; + assert!(!::Update::exists(), "Final hint must be updated only once in the block"); + assert!( + srml_system::Module::::block_number() >= hint, + "Finalized height above block number", + ); + ::Update::put(hint); + } + + fn on_finalize() { + Self::update_hint(::Update::take()) + } + } } impl Module { - fn update_hint(hint: Option) { - if !Self::initialized() { - ::RecentHints::put(vec![T::BlockNumber::zero()]); - ::OrderedHints::put(vec![T::BlockNumber::zero()]); - ::Median::put(T::BlockNumber::zero()); - - ::Initialized::put(true); - } - - let mut recent = Self::recent_hints(); - let mut ordered = Self::ordered_hints(); - let window_size = cmp::max(T::BlockNumber::one(), Self::window_size()); - - let hint = hint.unwrap_or_else(|| recent.last() - .expect("always at least one recent sample; qed").clone() - ); - - // prune off the front of the list -- typically 1 except for when - // the sample size has just been shrunk. - { - // take into account the item we haven't pushed yet. - let to_prune = (recent.len() + 1).saturating_sub(window_size.as_() as usize); - - for drained in recent.drain(..to_prune) { - let idx = ordered.binary_search(&drained) - .expect("recent and ordered contain the same items; qed"); - - ordered.remove(idx); - } - } - - // find the position in the ordered list where the new item goes. - let ordered_idx = ordered.binary_search(&hint) - .unwrap_or_else(|idx| idx); - - ordered.insert(ordered_idx, hint); - recent.push(hint); - - let two = T::BlockNumber::one() + T::BlockNumber::one(); - - let median = { - let len = ordered.len(); - assert!(len > 0, "pruning dictated by window_size which is always saturated at 1; qed"); - - if len % 2 == 0 { - let a = ordered[len / 2]; - let b = ordered[(len / 2) - 1]; - - // compute average. - (a + b) / two - } else { - ordered[len / 2] - } - }; - - let our_window_size = recent.len(); - - ::RecentHints::put(recent); - ::OrderedHints::put(ordered); - ::Median::put(median); - - if T::BlockNumber::sa(our_window_size as u64) == window_size { - let now = srml_system::Module::::block_number(); - let latency = Self::report_latency(); - - // the delay is the latency plus half the window size. - let delay = latency + (window_size / two); - // median may be at most n - delay - if median + delay <= now { - T::OnFinalizationStalled::on_stalled(window_size - T::BlockNumber::one()); - } - } - } + fn update_hint(hint: Option) { + if !Self::initialized() { + ::RecentHints::put(vec![T::BlockNumber::zero()]); + ::OrderedHints::put(vec![T::BlockNumber::zero()]); + ::Median::put(T::BlockNumber::zero()); + + ::Initialized::put(true); + } + + let mut recent = Self::recent_hints(); + let mut ordered = Self::ordered_hints(); + let window_size = cmp::max(T::BlockNumber::one(), Self::window_size()); + + let hint = hint.unwrap_or_else(|| { + recent + .last() + .expect("always at least one recent sample; qed") + .clone() + }); + + // prune off the front of the list -- typically 1 except for when + // the sample size has just been shrunk. + { + // take into account the item we haven't pushed yet. + let to_prune = (recent.len() + 1).saturating_sub(window_size.as_() as usize); + + for drained in recent.drain(..to_prune) { + let idx = ordered + .binary_search(&drained) + .expect("recent and ordered contain the same items; qed"); + + ordered.remove(idx); + } + } + + // find the position in the ordered list where the new item goes. + let ordered_idx = ordered.binary_search(&hint).unwrap_or_else(|idx| idx); + + ordered.insert(ordered_idx, hint); + recent.push(hint); + + let two = T::BlockNumber::one() + T::BlockNumber::one(); + + let median = { + let len = ordered.len(); + assert!( + len > 0, + "pruning dictated by window_size which is always saturated at 1; qed" + ); + + if len % 2 == 0 { + let a = ordered[len / 2]; + let b = ordered[(len / 2) - 1]; + + // compute average. + (a + b) / two + } else { + ordered[len / 2] + } + }; + + let our_window_size = recent.len(); + + ::RecentHints::put(recent); + ::OrderedHints::put(ordered); + ::Median::put(median); + + if T::BlockNumber::sa(our_window_size as u64) == window_size { + let now = srml_system::Module::::block_number(); + let latency = Self::report_latency(); + + // the delay is the latency plus half the window size. + let delay = latency + (window_size / two); + // median may be at most n - delay + if median + delay <= now { + T::OnFinalizationStalled::on_stalled(window_size - T::BlockNumber::one()); + } + } + } } /// Called when finalization stalled at a given number. pub trait OnFinalizationStalled { - /// The parameter here is how many more blocks to wait before applying - /// changes triggered by finality stalling. - fn on_stalled(further_wait: N); + /// The parameter here is how many more blocks to wait before applying + /// changes triggered by finality stalling. + fn on_stalled(further_wait: N); } macro_rules! impl_on_stalled { @@ -234,152 +239,180 @@ macro_rules! impl_on_stalled { for_each_tuple!(impl_on_stalled); impl ProvideInherent for Module { - type Call = Call; - type Error = MakeFatalError<()>; - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - - fn create_inherent(data: &InherentData) -> Option { - let final_num = - data.finalized_number().expect("Gets and decodes final number inherent data"); - - // make hint only when not same as last to avoid bloat. - Self::recent_hints().last().and_then(|last| if last == &final_num { - None - } else { - Some(Call::final_hint(final_num)) - }) - } - - fn check_inherent(_call: &Self::Call, _data: &InherentData) -> result::Result<(), Self::Error> { - Ok(()) - } + type Call = Call; + type Error = MakeFatalError<()>; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(data: &InherentData) -> Option { + let final_num = data + .finalized_number() + .expect("Gets and decodes final number inherent data"); + + // make hint only when not same as last to avoid bloat. + Self::recent_hints().last().and_then(|last| { + if last == &final_num { + None + } else { + Some(Call::final_hint(final_num)) + } + }) + } + + fn check_inherent(_call: &Self::Call, _data: &InherentData) -> result::Result<(), Self::Error> { + Ok(()) + } } #[cfg(test)] mod tests { - use super::*; - - use sr_io::{with_externalities, TestExternalities}; - use substrate_primitives::H256; - use primitives::BuildStorage; - use primitives::traits::{BlakeTwo256, IdentityLookup, OnFinalize, Header as HeaderT}; - use primitives::testing::{Digest, DigestItem, Header}; - use srml_support::impl_outer_origin; - use srml_system as system; - use lazy_static::lazy_static; - use parking_lot::Mutex; - - #[derive(Clone, PartialEq, Debug)] - pub struct StallEvent { - at: u64, - further_wait: u64, - } - - macro_rules! make_test_context { - () => { - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - - impl_outer_origin! { - pub enum Origin for Test {} - } - - impl system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type Digest = Digest; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type Log = DigestItem; - } - - type System = system::Module; - - lazy_static! { - static ref NOTIFICATIONS: Mutex> = Mutex::new(Vec::new()); - } - - pub struct StallTracker; - impl OnFinalizationStalled for StallTracker { - fn on_stalled(further_wait: u64) { - let now = System::block_number(); - NOTIFICATIONS.lock().push(StallEvent { at: now, further_wait }); - } - } - - impl Trait for Test { - type OnFinalizationStalled = StallTracker; - } - - type FinalityTracker = Module; - } - } - - #[test] - fn median_works() { - make_test_context!(); - let t = system::GenesisConfig::::default().build_storage().unwrap().0; - - with_externalities(&mut TestExternalities::new(t), || { - FinalityTracker::update_hint(Some(500)); - assert_eq!(FinalityTracker::median(), 250); - assert!(NOTIFICATIONS.lock().is_empty()); - }); - } - - #[test] - fn notifies_when_stalled() { - make_test_context!(); - let mut t = system::GenesisConfig::::default().build_storage().unwrap().0; - t.extend(GenesisConfig:: { - window_size: 11, - report_latency: 100 - }.build_storage().unwrap().0); - - with_externalities(&mut TestExternalities::new(t), || { - let mut parent_hash = System::parent_hash(); - for i in 2..106 { - System::initialize(&i, &parent_hash, &Default::default()); - FinalityTracker::on_finalize(i); - let hdr = System::finalize(); - parent_hash = hdr.hash(); - } - - assert_eq!( - NOTIFICATIONS.lock().to_vec(), - vec![StallEvent { at: 105, further_wait: 10 }] - ) - }); - } - - #[test] - fn recent_notifications_prevent_stalling() { - make_test_context!(); - let mut t = system::GenesisConfig::::default().build_storage().unwrap().0; - t.extend(GenesisConfig:: { - window_size: 11, - report_latency: 100 - }.build_storage().unwrap().0); - - with_externalities(&mut TestExternalities::new(t), || { - let mut parent_hash = System::parent_hash(); - for i in 2..106 { - System::initialize(&i, &parent_hash, &Default::default()); - assert_ok!(FinalityTracker::dispatch( - Call::final_hint(i-1), - Origin::INHERENT, - )); - FinalityTracker::on_finalize(i); - let hdr = System::finalize(); - parent_hash = hdr.hash(); - } - - assert!(NOTIFICATIONS.lock().is_empty()); - }); - } + use super::*; + + use lazy_static::lazy_static; + use parking_lot::Mutex; + use primitives::testing::{Digest, DigestItem, Header}; + use primitives::traits::{BlakeTwo256, Header as HeaderT, IdentityLookup, OnFinalize}; + use primitives::BuildStorage; + use sr_io::{with_externalities, TestExternalities}; + use srml_support::impl_outer_origin; + use srml_system as system; + use substrate_primitives::H256; + + #[derive(Clone, PartialEq, Debug)] + pub struct StallEvent { + at: u64, + further_wait: u64, + } + + macro_rules! make_test_context { + () => { + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + + impl_outer_origin! { + pub enum Origin for Test {} + } + + impl system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type Digest = Digest; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type Log = DigestItem; + } + + type System = system::Module; + + lazy_static! { + static ref NOTIFICATIONS: Mutex> = Mutex::new(Vec::new()); + } + + pub struct StallTracker; + impl OnFinalizationStalled for StallTracker { + fn on_stalled(further_wait: u64) { + let now = System::block_number(); + NOTIFICATIONS.lock().push(StallEvent { + at: now, + further_wait, + }); + } + } + + impl Trait for Test { + type OnFinalizationStalled = StallTracker; + } + + type FinalityTracker = Module; + }; + } + + #[test] + fn median_works() { + make_test_context!(); + let t = system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0; + + with_externalities(&mut TestExternalities::new(t), || { + FinalityTracker::update_hint(Some(500)); + assert_eq!(FinalityTracker::median(), 250); + assert!(NOTIFICATIONS.lock().is_empty()); + }); + } + + #[test] + fn notifies_when_stalled() { + make_test_context!(); + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0; + t.extend( + GenesisConfig:: { + window_size: 11, + report_latency: 100, + } + .build_storage() + .unwrap() + .0, + ); + + with_externalities(&mut TestExternalities::new(t), || { + let mut parent_hash = System::parent_hash(); + for i in 2..106 { + System::initialize(&i, &parent_hash, &Default::default()); + FinalityTracker::on_finalize(i); + let hdr = System::finalize(); + parent_hash = hdr.hash(); + } + + assert_eq!( + NOTIFICATIONS.lock().to_vec(), + vec![StallEvent { + at: 105, + further_wait: 10 + }] + ) + }); + } + + #[test] + fn recent_notifications_prevent_stalling() { + make_test_context!(); + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0; + t.extend( + GenesisConfig:: { + window_size: 11, + report_latency: 100, + } + .build_storage() + .unwrap() + .0, + ); + + with_externalities(&mut TestExternalities::new(t), || { + let mut parent_hash = System::parent_hash(); + for i in 2..106 { + System::initialize(&i, &parent_hash, &Default::default()); + assert_ok!(FinalityTracker::dispatch( + Call::final_hint(i - 1), + Origin::INHERENT, + )); + FinalityTracker::on_finalize(i); + let hdr = System::finalize(); + parent_hash = hdr.hash(); + } + + assert!(NOTIFICATIONS.lock().is_empty()); + }); + } } diff --git a/srml/grandpa/src/lib.rs b/srml/grandpa/src/lib.rs index e9b00662d6..1edc8d8742 100644 --- a/srml/grandpa/src/lib.rs +++ b/srml/grandpa/src/lib.rs @@ -30,109 +30,117 @@ // re-export since this is necessary for `impl_apis` in runtime. pub use substrate_finality_grandpa_primitives as fg_primitives; +use codec::{Decode, Encode}; +use ed25519::Public as AuthorityId; +use fg_primitives::ScheduledChange; +use parity_codec as codec; +use primitives::traits::CurrentHeight; +use primitives::traits::MaybeSerializeDebug; +use rstd::prelude::*; #[cfg(feature = "std")] use serde_derive::Serialize; -use rstd::prelude::*; -use parity_codec as codec; -use codec::{Encode, Decode}; -use fg_primitives::ScheduledChange; -use srml_support::{Parameter, decl_event, decl_storage, decl_module}; use srml_support::dispatch::Result; -use srml_support::storage::StorageValue; use srml_support::storage::unhashed::StorageVec; -use primitives::traits::CurrentHeight; +use srml_support::storage::StorageValue; +use srml_support::{decl_event, decl_module, decl_storage, Parameter}; use substrate_primitives::ed25519; use system::ensure_signed; -use primitives::traits::MaybeSerializeDebug; -use ed25519::Public as AuthorityId; mod mock; mod tests; struct AuthorityStorageVec(rstd::marker::PhantomData); impl StorageVec for AuthorityStorageVec { - type Item = (S, u64); - const PREFIX: &'static [u8] = crate::fg_primitives::well_known_keys::AUTHORITY_PREFIX; + type Item = (S, u64); + const PREFIX: &'static [u8] = crate::fg_primitives::well_known_keys::AUTHORITY_PREFIX; } /// The log type of this crate, projected from module trait type. -pub type Log = RawLog< - ::BlockNumber, - ::SessionKey, ->; +pub type Log = RawLog<::BlockNumber, ::SessionKey>; /// Logs which can be scanned by GRANDPA for authorities change events. pub trait GrandpaChangeSignal { - /// Try to cast the log entry as a contained signal. - fn as_signal(&self) -> Option>; - /// Try to cast the log entry as a contained forced signal. - fn as_forced_signal(&self) -> Option<(N, ScheduledChange)>; + /// Try to cast the log entry as a contained signal. + fn as_signal(&self) -> Option>; + /// Try to cast the log entry as a contained forced signal. + fn as_forced_signal(&self) -> Option<(N, ScheduledChange)>; } /// A logs in this module. #[cfg_attr(feature = "std", derive(Serialize, Debug))] #[derive(Encode, Decode, PartialEq, Eq, Clone)] pub enum RawLog { - /// Authorities set change has been signaled. Contains the new set of authorities - /// and the delay in blocks _to finalize_ before applying. - AuthoritiesChangeSignal(N, Vec<(SessionKey, u64)>), - /// A forced authorities set change. Contains in this order: the median last - /// finalized block when the change was signaled, the delay in blocks _to import_ - /// before applying and the new set of authorities. - ForcedAuthoritiesChangeSignal(N, N, Vec<(SessionKey, u64)>), + /// Authorities set change has been signaled. Contains the new set of authorities + /// and the delay in blocks _to finalize_ before applying. + AuthoritiesChangeSignal(N, Vec<(SessionKey, u64)>), + /// A forced authorities set change. Contains in this order: the median last + /// finalized block when the change was signaled, the delay in blocks _to import_ + /// before applying and the new set of authorities. + ForcedAuthoritiesChangeSignal(N, N, Vec<(SessionKey, u64)>), } impl RawLog { - /// Try to cast the log entry as a contained signal. - pub fn as_signal(&self) -> Option<(N, &[(SessionKey, u64)])> { - match *self { - RawLog::AuthoritiesChangeSignal(ref delay, ref signal) => Some((delay.clone(), signal)), - RawLog::ForcedAuthoritiesChangeSignal(_, _, _) => None, - } - } - - /// Try to cast the log entry as a contained forced signal. - pub fn as_forced_signal(&self) -> Option<(N, N, &[(SessionKey, u64)])> { - match *self { - RawLog::ForcedAuthoritiesChangeSignal(ref median, ref delay, ref signal) => Some((median.clone(), delay.clone(), signal)), - RawLog::AuthoritiesChangeSignal(_, _) => None, - } - } + /// Try to cast the log entry as a contained signal. + pub fn as_signal(&self) -> Option<(N, &[(SessionKey, u64)])> { + match *self { + RawLog::AuthoritiesChangeSignal(ref delay, ref signal) => Some((delay.clone(), signal)), + RawLog::ForcedAuthoritiesChangeSignal(_, _, _) => None, + } + } + + /// Try to cast the log entry as a contained forced signal. + pub fn as_forced_signal(&self) -> Option<(N, N, &[(SessionKey, u64)])> { + match *self { + RawLog::ForcedAuthoritiesChangeSignal(ref median, ref delay, ref signal) => { + Some((median.clone(), delay.clone(), signal)) + } + RawLog::AuthoritiesChangeSignal(_, _) => None, + } + } } impl GrandpaChangeSignal for RawLog - where N: Clone, SessionKey: Clone + Into, +where + N: Clone, + SessionKey: Clone + Into, { - fn as_signal(&self) -> Option> { - RawLog::as_signal(self).map(|(delay, next_authorities)| ScheduledChange { - delay, - next_authorities: next_authorities.iter() - .cloned() - .map(|(k, w)| (k.into(), w)) - .collect(), - }) - } - - fn as_forced_signal(&self) -> Option<(N, ScheduledChange)> { - RawLog::as_forced_signal(self).map(|(median, delay, next_authorities)| (median, ScheduledChange { - delay, - next_authorities: next_authorities.iter() - .cloned() - .map(|(k, w)| (k.into(), w)) - .collect(), - })) - } + fn as_signal(&self) -> Option> { + RawLog::as_signal(self).map(|(delay, next_authorities)| ScheduledChange { + delay, + next_authorities: next_authorities + .iter() + .cloned() + .map(|(k, w)| (k.into(), w)) + .collect(), + }) + } + + fn as_forced_signal(&self) -> Option<(N, ScheduledChange)> { + RawLog::as_forced_signal(self).map(|(median, delay, next_authorities)| { + ( + median, + ScheduledChange { + delay, + next_authorities: next_authorities + .iter() + .cloned() + .map(|(k, w)| (k.into(), w)) + .collect(), + }, + ) + }) + } } pub trait Trait: system::Trait { - /// Type for all log entries of this module. - type Log: From> + Into>; + /// Type for all log entries of this module. + type Log: From> + Into>; - /// The session key type used by authorities. - type SessionKey: Parameter + Default + MaybeSerializeDebug; + /// The session key type used by authorities. + type SessionKey: Parameter + Default + MaybeSerializeDebug; - /// The event type of this module. - type Event: From> + Into<::Event>; + /// The event type of this module. + type Event: From> + Into<::Event>; } /// A stored pending change, old format. @@ -140,40 +148,40 @@ pub trait Trait: system::Trait { // https://github.com/paritytech/substrate/issues/1614 #[derive(Encode, Decode)] pub struct OldStoredPendingChange { - /// The block number this was scheduled at. - pub scheduled_at: N, - /// The delay in blocks until it will be applied. - pub delay: N, - /// The next authority set. - pub next_authorities: Vec<(SessionKey, u64)>, + /// The block number this was scheduled at. + pub scheduled_at: N, + /// The delay in blocks until it will be applied. + pub delay: N, + /// The next authority set. + pub next_authorities: Vec<(SessionKey, u64)>, } /// A stored pending change. #[derive(Encode)] pub struct StoredPendingChange { - /// The block number this was scheduled at. - pub scheduled_at: N, - /// The delay in blocks until it will be applied. - pub delay: N, - /// The next authority set. - pub next_authorities: Vec<(SessionKey, u64)>, - /// If defined it means the change was forced and the given block number - /// indicates the median last finalized block when the change was signaled. - pub forced: Option, + /// The block number this was scheduled at. + pub scheduled_at: N, + /// The delay in blocks until it will be applied. + pub delay: N, + /// The next authority set. + pub next_authorities: Vec<(SessionKey, u64)>, + /// If defined it means the change was forced and the given block number + /// indicates the median last finalized block when the change was signaled. + pub forced: Option, } impl Decode for StoredPendingChange { - fn decode(value: &mut I) -> Option { - let old = OldStoredPendingChange::decode(value)?; - let forced = >::decode(value).unwrap_or(None); - - Some(StoredPendingChange { - scheduled_at: old.scheduled_at, - delay: old.delay, - next_authorities: old.next_authorities, - forced, - }) - } + fn decode(value: &mut I) -> Option { + let old = OldStoredPendingChange::decode(value)?; + let forced = >::decode(value).unwrap_or(None); + + Some(StoredPendingChange { + scheduled_at: old.scheduled_at, + delay: old.delay, + next_authorities: old.next_authorities, + forced, + }) + } } decl_event!( @@ -184,145 +192,146 @@ decl_event!( ); decl_storage! { - trait Store for Module as GrandpaFinality { - // Pending change: (signaled at, scheduled change). - PendingChange get(pending_change): Option>; - // next block number where we can force a change. - NextForced get(next_forced): Option; - } - add_extra_genesis { - config(authorities): Vec<(T::SessionKey, u64)>; - - build(|storage: &mut primitives::StorageOverlay, _: &mut primitives::ChildrenStorageOverlay, config: &GenesisConfig| { - use codec::{Encode, KeyedVec}; - - let auth_count = config.authorities.len() as u32; - config.authorities.iter().enumerate().for_each(|(i, v)| { - storage.insert((i as u32).to_keyed_vec( - crate::fg_primitives::well_known_keys::AUTHORITY_PREFIX), - v.encode() - ); - }); - storage.insert( - crate::fg_primitives::well_known_keys::AUTHORITY_COUNT.to_vec(), - auth_count.encode(), - ); - }); - } + trait Store for Module as GrandpaFinality { + // Pending change: (signaled at, scheduled change). + PendingChange get(pending_change): Option>; + // next block number where we can force a change. + NextForced get(next_forced): Option; + } + add_extra_genesis { + config(authorities): Vec<(T::SessionKey, u64)>; + + build(|storage: &mut primitives::StorageOverlay, _: &mut primitives::ChildrenStorageOverlay, config: &GenesisConfig| { + use codec::{Encode, KeyedVec}; + + let auth_count = config.authorities.len() as u32; + config.authorities.iter().enumerate().for_each(|(i, v)| { + storage.insert((i as u32).to_keyed_vec( + crate::fg_primitives::well_known_keys::AUTHORITY_PREFIX), + v.encode() + ); + }); + storage.insert( + crate::fg_primitives::well_known_keys::AUTHORITY_COUNT.to_vec(), + auth_count.encode(), + ); + }); + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - fn deposit_event() = default; - - /// Report some misbehavior. - fn report_misbehavior(origin, _report: Vec) { - ensure_signed(origin)?; - // FIXME: https://github.com/paritytech/substrate/issues/1112 - } - - fn on_finalize(block_number: T::BlockNumber) { - if let Some(pending_change) = >::get() { - if block_number == pending_change.scheduled_at { - if let Some(median) = pending_change.forced { - Self::deposit_log(RawLog::ForcedAuthoritiesChangeSignal( - median, - pending_change.delay, - pending_change.next_authorities.clone(), - )); - } else { - Self::deposit_log(RawLog::AuthoritiesChangeSignal( - pending_change.delay, - pending_change.next_authorities.clone(), - )); - } - } - - if block_number == pending_change.scheduled_at + pending_change.delay { - Self::deposit_event( - RawEvent::NewAuthorities(pending_change.next_authorities.clone()) - ); - >::set_items(pending_change.next_authorities); - >::kill(); - } - } - } - } + pub struct Module for enum Call where origin: T::Origin { + fn deposit_event() = default; + + /// Report some misbehavior. + fn report_misbehavior(origin, _report: Vec) { + ensure_signed(origin)?; + // FIXME: https://github.com/paritytech/substrate/issues/1112 + } + + fn on_finalize(block_number: T::BlockNumber) { + if let Some(pending_change) = >::get() { + if block_number == pending_change.scheduled_at { + if let Some(median) = pending_change.forced { + Self::deposit_log(RawLog::ForcedAuthoritiesChangeSignal( + median, + pending_change.delay, + pending_change.next_authorities.clone(), + )); + } else { + Self::deposit_log(RawLog::AuthoritiesChangeSignal( + pending_change.delay, + pending_change.next_authorities.clone(), + )); + } + } + + if block_number == pending_change.scheduled_at + pending_change.delay { + Self::deposit_event( + RawEvent::NewAuthorities(pending_change.next_authorities.clone()) + ); + >::set_items(pending_change.next_authorities); + >::kill(); + } + } + } + } } impl Module { - /// Get the current set of authorities, along with their respective weights. - pub fn grandpa_authorities() -> Vec<(T::SessionKey, u64)> { - >::items() - } - - /// Schedule a change in the authorities. - /// - /// The change will be applied at the end of execution of the block - /// `in_blocks` after the current block. This value may be 0, in which - /// case the change is applied at the end of the current block. - /// - /// If the `forced` parameter is defined, this indicates that the current - /// set has been synchronously determined to be offline and that after - /// `in_blocks` the given change should be applied. The given block number - /// indicates the median last finalized block number and it should be used - /// as the canon block when starting the new grandpa voter. - /// - /// No change should be signaled while any change is pending. Returns - /// an error if a change is already pending. - pub fn schedule_change( - next_authorities: Vec<(T::SessionKey, u64)>, - in_blocks: T::BlockNumber, - forced: Option, - ) -> Result { - use primitives::traits::As; - - if Self::pending_change().is_none() { - let scheduled_at = system::ChainContext::::default().current_height(); - - if let Some(_) = forced { - if Self::next_forced().map_or(false, |next| next > scheduled_at) { - return Err("Cannot signal forced change so soon after last."); - } - - // only allow the next forced change when twice the window has passed since - // this one. - >::put(scheduled_at + in_blocks * T::BlockNumber::sa(2)); - } - - >::put(StoredPendingChange { - delay: in_blocks, - scheduled_at, - next_authorities, - forced, - }); - - Ok(()) - } else { - Err("Attempt to signal GRANDPA change with one already pending.") - } - } - - /// Deposit one of this module's logs. - fn deposit_log(log: Log) { - >::deposit_log(::Log::from(log).into()); - } + /// Get the current set of authorities, along with their respective weights. + pub fn grandpa_authorities() -> Vec<(T::SessionKey, u64)> { + >::items() + } + + /// Schedule a change in the authorities. + /// + /// The change will be applied at the end of execution of the block + /// `in_blocks` after the current block. This value may be 0, in which + /// case the change is applied at the end of the current block. + /// + /// If the `forced` parameter is defined, this indicates that the current + /// set has been synchronously determined to be offline and that after + /// `in_blocks` the given change should be applied. The given block number + /// indicates the median last finalized block number and it should be used + /// as the canon block when starting the new grandpa voter. + /// + /// No change should be signaled while any change is pending. Returns + /// an error if a change is already pending. + pub fn schedule_change( + next_authorities: Vec<(T::SessionKey, u64)>, + in_blocks: T::BlockNumber, + forced: Option, + ) -> Result { + use primitives::traits::As; + + if Self::pending_change().is_none() { + let scheduled_at = system::ChainContext::::default().current_height(); + + if let Some(_) = forced { + if Self::next_forced().map_or(false, |next| next > scheduled_at) { + return Err("Cannot signal forced change so soon after last."); + } + + // only allow the next forced change when twice the window has passed since + // this one. + >::put(scheduled_at + in_blocks * T::BlockNumber::sa(2)); + } + + >::put(StoredPendingChange { + delay: in_blocks, + scheduled_at, + next_authorities, + forced, + }); + + Ok(()) + } else { + Err("Attempt to signal GRANDPA change with one already pending.") + } + } + + /// Deposit one of this module's logs. + fn deposit_log(log: Log) { + >::deposit_log(::Log::from(log).into()); + } } -impl Module where AuthorityId: core::convert::From<::SessionKey> { - /// See if the digest contains any standard scheduled change. - pub fn scrape_digest_change(log: &Log) - -> Option> - { - as GrandpaChangeSignal>::as_signal(log) - } - - /// See if the digest contains any forced scheduled change. - pub fn scrape_digest_forced_change(log: &Log) - -> Option<(T::BlockNumber, ScheduledChange)> - { - as GrandpaChangeSignal>::as_forced_signal(log) - } +impl Module +where + AuthorityId: core::convert::From<::SessionKey>, +{ + /// See if the digest contains any standard scheduled change. + pub fn scrape_digest_change(log: &Log) -> Option> { + as GrandpaChangeSignal>::as_signal(log) + } + + /// See if the digest contains any forced scheduled change. + pub fn scrape_digest_forced_change( + log: &Log, + ) -> Option<(T::BlockNumber, ScheduledChange)> { + as GrandpaChangeSignal>::as_forced_signal(log) + } } /// Helper for authorities being synchronized with the general session authorities. @@ -334,49 +343,51 @@ pub struct SyncedAuthorities(::rstd::marker::PhantomData); // FIXME: remove when https://github.com/rust-lang/rust/issues/26925 is fixed impl Default for SyncedAuthorities { - fn default() -> Self { - SyncedAuthorities(::rstd::marker::PhantomData) - } + fn default() -> Self { + SyncedAuthorities(::rstd::marker::PhantomData) + } } -impl session::OnSessionChange for SyncedAuthorities where - T: Trait + consensus::Trait::SessionKey>, - ::Log: From::SessionKey>> +impl session::OnSessionChange for SyncedAuthorities +where + T: Trait + consensus::Trait::SessionKey>, + ::Log: From::SessionKey>>, { - fn on_session_change(_: X, _: bool) { - use primitives::traits::Zero; - - let next_authorities = >::authorities() - .into_iter() - .map(|key| (key, 1)) // evenly-weighted. - .collect::::SessionKey, u64)>>(); - - // instant changes - let last_authorities = >::grandpa_authorities(); - if next_authorities != last_authorities { - let _ = >::schedule_change(next_authorities, Zero::zero(), None); - } - } + fn on_session_change(_: X, _: bool) { + use primitives::traits::Zero; + + let next_authorities = >::authorities() + .into_iter() + .map(|key| (key, 1)) // evenly-weighted. + .collect::::SessionKey, u64)>>(); + + // instant changes + let last_authorities = >::grandpa_authorities(); + if next_authorities != last_authorities { + let _ = >::schedule_change(next_authorities, Zero::zero(), None); + } + } } -impl finality_tracker::OnFinalizationStalled for SyncedAuthorities where - T: Trait + consensus::Trait::SessionKey>, - ::Log: From::SessionKey>>, - T: finality_tracker::Trait, +impl finality_tracker::OnFinalizationStalled for SyncedAuthorities +where + T: Trait + consensus::Trait::SessionKey>, + ::Log: From::SessionKey>>, + T: finality_tracker::Trait, { - fn on_stalled(further_wait: T::BlockNumber) { - // when we record old authority sets, we can use `finality_tracker::median` - // to figure out _who_ failed. until then, we can't meaningfully guard - // against `next == last` the way that normal session changes do. + fn on_stalled(further_wait: T::BlockNumber) { + // when we record old authority sets, we can use `finality_tracker::median` + // to figure out _who_ failed. until then, we can't meaningfully guard + // against `next == last` the way that normal session changes do. - let next_authorities = >::authorities() - .into_iter() - .map(|key| (key, 1)) // evenly-weighted. - .collect::::SessionKey, u64)>>(); + let next_authorities = >::authorities() + .into_iter() + .map(|key| (key, 1)) // evenly-weighted. + .collect::::SessionKey, u64)>>(); - let median = >::median(); + let median = >::median(); - // schedule a change for `further_wait` blocks. - let _ = >::schedule_change(next_authorities, further_wait, Some(median)); - } + // schedule a change for `further_wait` blocks. + let _ = >::schedule_change(next_authorities, further_wait, Some(median)); + } } diff --git a/srml/grandpa/src/mock.rs b/srml/grandpa/src/mock.rs index 4405604ab1..4ede1f0ba9 100644 --- a/srml/grandpa/src/mock.rs +++ b/srml/grandpa/src/mock.rs @@ -18,62 +18,72 @@ #![cfg(test)] -use primitives::{BuildStorage, traits::IdentityLookup, testing::{Digest, DigestItem, Header}}; +use crate::{GenesisConfig, Module, RawLog, Trait}; +use parity_codec::{Decode, Encode}; use primitives::generic::DigestItem as GenDigestItem; +use primitives::{ + testing::{Digest, DigestItem, Header}, + traits::IdentityLookup, + BuildStorage, +}; use runtime_io; -use srml_support::{impl_outer_origin, impl_outer_event}; -use substrate_primitives::{H256, Blake2Hasher}; -use parity_codec::{Encode, Decode}; -use crate::{GenesisConfig, Trait, Module, RawLog}; +use srml_support::{impl_outer_event, impl_outer_origin}; +use substrate_primitives::{Blake2Hasher, H256}; -impl_outer_origin!{ - pub enum Origin for Test {} +impl_outer_origin! { + pub enum Origin for Test {} } impl From> for DigestItem { - fn from(log: RawLog) -> DigestItem { - GenDigestItem::Other(log.encode()) - } + fn from(log: RawLog) -> DigestItem { + GenDigestItem::Other(log.encode()) + } } // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. #[derive(Clone, PartialEq, Eq, Debug, Decode, Encode)] pub struct Test; impl Trait for Test { - type Log = DigestItem; - type SessionKey = u64; - type Event = TestEvent; + type Log = DigestItem; + type SessionKey = u64; + type Event = TestEvent; } impl system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = ::primitives::traits::BlakeTwo256; - type Digest = Digest; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = TestEvent; - type Log = DigestItem; + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = ::primitives::traits::BlakeTwo256; + type Digest = Digest; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = TestEvent; + type Log = DigestItem; } mod grandpa { - pub use crate::Event; + pub use crate::Event; } -impl_outer_event!{ - pub enum TestEvent for Test { - grandpa, - } +impl_outer_event! { + pub enum TestEvent for Test { + grandpa, + } } pub fn new_test_ext(authorities: Vec<(u64, u64)>) -> runtime_io::TestExternalities { - let mut t = system::GenesisConfig::::default().build_storage().unwrap().0; - t.extend(GenesisConfig:: { - authorities, - }.build_storage().unwrap().0); - t.into() + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0; + t.extend( + GenesisConfig:: { authorities } + .build_storage() + .unwrap() + .0, + ); + t.into() } pub type System = system::Module; diff --git a/srml/grandpa/src/tests.rs b/srml/grandpa/src/tests.rs index 3050b6a572..9e23981131 100644 --- a/srml/grandpa/src/tests.rs +++ b/srml/grandpa/src/tests.rs @@ -18,181 +18,181 @@ #![cfg(test)] -use primitives::{testing, traits::OnFinalize}; +use super::*; +use crate::mock::{new_test_ext, Grandpa, System}; +use crate::{RawEvent, RawLog}; +use codec::{Decode, Encode}; use primitives::traits::Header; +use primitives::{testing, traits::OnFinalize}; use runtime_io::with_externalities; -use crate::mock::{Grandpa, System, new_test_ext}; use system::{EventRecord, Phase}; -use crate::{RawLog, RawEvent}; -use codec::{Decode, Encode}; -use super::*; #[test] fn authorities_change_logged() { - with_externalities(&mut new_test_ext(vec![(1, 1), (2, 1), (3, 1)]), || { - System::initialize(&1, &Default::default(), &Default::default()); - Grandpa::schedule_change(vec![(4, 1), (5, 1), (6, 1)], 0, None).unwrap(); - - System::note_finished_extrinsics(); - Grandpa::on_finalize(1); - - let header = System::finalize(); - assert_eq!(header.digest, testing::Digest { - logs: vec![ - RawLog::AuthoritiesChangeSignal(0, vec![(4, 1), (5, 1), (6, 1)]).into(), - ], - }); - - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Finalization, - event: RawEvent::NewAuthorities(vec![(4, 1), (5, 1), (6, 1)]).into(), - }, - ]); - }); + with_externalities(&mut new_test_ext(vec![(1, 1), (2, 1), (3, 1)]), || { + System::initialize(&1, &Default::default(), &Default::default()); + Grandpa::schedule_change(vec![(4, 1), (5, 1), (6, 1)], 0, None).unwrap(); + + System::note_finished_extrinsics(); + Grandpa::on_finalize(1); + + let header = System::finalize(); + assert_eq!( + header.digest, + testing::Digest { + logs: vec![RawLog::AuthoritiesChangeSignal(0, vec![(4, 1), (5, 1), (6, 1)]).into(),], + } + ); + + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Finalization, + event: RawEvent::NewAuthorities(vec![(4, 1), (5, 1), (6, 1)]).into(), + },] + ); + }); } #[test] fn authorities_change_logged_after_delay() { - with_externalities(&mut new_test_ext(vec![(1, 1), (2, 1), (3, 1)]), || { - System::initialize(&1, &Default::default(), &Default::default()); - Grandpa::schedule_change(vec![(4, 1), (5, 1), (6, 1)], 1, None).unwrap(); - Grandpa::on_finalize(1); - let header = System::finalize(); - assert_eq!(header.digest, testing::Digest { - logs: vec![ - RawLog::AuthoritiesChangeSignal(1, vec![(4, 1), (5, 1), (6, 1)]).into(), - ], - }); - - // no change at this height. - assert_eq!(System::events(), vec![]); - - System::initialize(&2, &header.hash(), &Default::default()); - System::note_finished_extrinsics(); - Grandpa::on_finalize(2); - - let _header = System::finalize(); - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Finalization, - event: RawEvent::NewAuthorities(vec![(4, 1), (5, 1), (6, 1)]).into(), - }, - ]); - }); + with_externalities(&mut new_test_ext(vec![(1, 1), (2, 1), (3, 1)]), || { + System::initialize(&1, &Default::default(), &Default::default()); + Grandpa::schedule_change(vec![(4, 1), (5, 1), (6, 1)], 1, None).unwrap(); + Grandpa::on_finalize(1); + let header = System::finalize(); + assert_eq!( + header.digest, + testing::Digest { + logs: vec![RawLog::AuthoritiesChangeSignal(1, vec![(4, 1), (5, 1), (6, 1)]).into(),], + } + ); + + // no change at this height. + assert_eq!(System::events(), vec![]); + + System::initialize(&2, &header.hash(), &Default::default()); + System::note_finished_extrinsics(); + Grandpa::on_finalize(2); + + let _header = System::finalize(); + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Finalization, + event: RawEvent::NewAuthorities(vec![(4, 1), (5, 1), (6, 1)]).into(), + },] + ); + }); } #[test] fn cannot_schedule_change_when_one_pending() { - with_externalities(&mut new_test_ext(vec![(1, 1), (2, 1), (3, 1)]), || { - System::initialize(&1, &Default::default(), &Default::default()); - Grandpa::schedule_change(vec![(4, 1), (5, 1), (6, 1)], 1, None).unwrap(); - assert!(Grandpa::pending_change().is_some()); - assert!(Grandpa::schedule_change(vec![(5, 1)], 1, None).is_err()); + with_externalities(&mut new_test_ext(vec![(1, 1), (2, 1), (3, 1)]), || { + System::initialize(&1, &Default::default(), &Default::default()); + Grandpa::schedule_change(vec![(4, 1), (5, 1), (6, 1)], 1, None).unwrap(); + assert!(Grandpa::pending_change().is_some()); + assert!(Grandpa::schedule_change(vec![(5, 1)], 1, None).is_err()); - Grandpa::on_finalize(1); - let header = System::finalize(); + Grandpa::on_finalize(1); + let header = System::finalize(); - System::initialize(&2, &header.hash(), &Default::default()); - assert!(Grandpa::pending_change().is_some()); - assert!(Grandpa::schedule_change(vec![(5, 1)], 1, None).is_err()); + System::initialize(&2, &header.hash(), &Default::default()); + assert!(Grandpa::pending_change().is_some()); + assert!(Grandpa::schedule_change(vec![(5, 1)], 1, None).is_err()); - Grandpa::on_finalize(2); - let header = System::finalize(); + Grandpa::on_finalize(2); + let header = System::finalize(); - System::initialize(&3, &header.hash(), &Default::default()); - assert!(Grandpa::pending_change().is_none()); - assert!(Grandpa::schedule_change(vec![(5, 1)], 1, None).is_ok()); + System::initialize(&3, &header.hash(), &Default::default()); + assert!(Grandpa::pending_change().is_none()); + assert!(Grandpa::schedule_change(vec![(5, 1)], 1, None).is_ok()); - Grandpa::on_finalize(3); - let _header = System::finalize(); - }); + Grandpa::on_finalize(3); + let _header = System::finalize(); + }); } #[test] fn new_decodes_from_old() { - let old = OldStoredPendingChange { - scheduled_at: 5u32, - delay: 100u32, - next_authorities: vec![(1u64, 5), (2u64, 10), (3u64, 2)], - }; - - let encoded = old.encode(); - let new = StoredPendingChange::::decode(&mut &encoded[..]).unwrap(); - assert!(new.forced.is_none()); - assert_eq!(new.scheduled_at, old.scheduled_at); - assert_eq!(new.delay, old.delay); - assert_eq!(new.next_authorities, old.next_authorities); + let old = OldStoredPendingChange { + scheduled_at: 5u32, + delay: 100u32, + next_authorities: vec![(1u64, 5), (2u64, 10), (3u64, 2)], + }; + + let encoded = old.encode(); + let new = StoredPendingChange::::decode(&mut &encoded[..]).unwrap(); + assert!(new.forced.is_none()); + assert_eq!(new.scheduled_at, old.scheduled_at); + assert_eq!(new.delay, old.delay); + assert_eq!(new.next_authorities, old.next_authorities); } #[test] fn dispatch_forced_change() { - with_externalities(&mut new_test_ext(vec![(1, 1), (2, 1), (3, 1)]), || { - System::initialize(&1, &Default::default(), &Default::default()); - Grandpa::schedule_change( - vec![(4, 1), (5, 1), (6, 1)], - 5, - Some(0), - ).unwrap(); - - assert!(Grandpa::pending_change().is_some()); - assert!(Grandpa::schedule_change(vec![(5, 1)], 1, Some(0)).is_err()); - - Grandpa::on_finalize(1); - let mut header = System::finalize(); - - for i in 2..7 { - System::initialize(&i, &header.hash(), &Default::default()); - assert!(Grandpa::pending_change().unwrap().forced.is_some()); - assert_eq!(Grandpa::next_forced(), Some(11)); - assert!(Grandpa::schedule_change(vec![(5, 1)], 1, None).is_err()); - assert!(Grandpa::schedule_change(vec![(5, 1)], 1, Some(0)).is_err()); - - Grandpa::on_finalize(i); - header = System::finalize(); - } - - // change has been applied at the end of block 6. - // add a normal change. - { - System::initialize(&7, &header.hash(), &Default::default()); - assert!(Grandpa::pending_change().is_none()); - assert_eq!(Grandpa::grandpa_authorities(), vec![(4, 1), (5, 1), (6, 1)]); - assert!(Grandpa::schedule_change(vec![(5, 1)], 1, None).is_ok()); - Grandpa::on_finalize(7); - header = System::finalize(); - } - - // run the normal change. - { - System::initialize(&8, &header.hash(), &Default::default()); - assert!(Grandpa::pending_change().is_some()); - assert_eq!(Grandpa::grandpa_authorities(), vec![(4, 1), (5, 1), (6, 1)]); - assert!(Grandpa::schedule_change(vec![(5, 1)], 1, None).is_err()); - Grandpa::on_finalize(8); - header = System::finalize(); - } - - // normal change applied. but we can't apply a new forced change for some - // time. - for i in 9..11 { - System::initialize(&i, &header.hash(), &Default::default()); - assert!(Grandpa::pending_change().is_none()); - assert_eq!(Grandpa::grandpa_authorities(), vec![(5, 1)]); - assert_eq!(Grandpa::next_forced(), Some(11)); - assert!(Grandpa::schedule_change(vec![(5, 1), (6, 1)], 5, Some(0)).is_err()); - Grandpa::on_finalize(i); - header = System::finalize(); - } - - { - System::initialize(&11, &header.hash(), &Default::default()); - assert!(Grandpa::pending_change().is_none()); - assert!(Grandpa::schedule_change(vec![(5, 1), (6, 1), (7, 1)], 5, Some(0)).is_ok()); - assert_eq!(Grandpa::next_forced(), Some(21)); - Grandpa::on_finalize(11); - header = System::finalize(); - } - let _ = header; - }); + with_externalities(&mut new_test_ext(vec![(1, 1), (2, 1), (3, 1)]), || { + System::initialize(&1, &Default::default(), &Default::default()); + Grandpa::schedule_change(vec![(4, 1), (5, 1), (6, 1)], 5, Some(0)).unwrap(); + + assert!(Grandpa::pending_change().is_some()); + assert!(Grandpa::schedule_change(vec![(5, 1)], 1, Some(0)).is_err()); + + Grandpa::on_finalize(1); + let mut header = System::finalize(); + + for i in 2..7 { + System::initialize(&i, &header.hash(), &Default::default()); + assert!(Grandpa::pending_change().unwrap().forced.is_some()); + assert_eq!(Grandpa::next_forced(), Some(11)); + assert!(Grandpa::schedule_change(vec![(5, 1)], 1, None).is_err()); + assert!(Grandpa::schedule_change(vec![(5, 1)], 1, Some(0)).is_err()); + + Grandpa::on_finalize(i); + header = System::finalize(); + } + + // change has been applied at the end of block 6. + // add a normal change. + { + System::initialize(&7, &header.hash(), &Default::default()); + assert!(Grandpa::pending_change().is_none()); + assert_eq!(Grandpa::grandpa_authorities(), vec![(4, 1), (5, 1), (6, 1)]); + assert!(Grandpa::schedule_change(vec![(5, 1)], 1, None).is_ok()); + Grandpa::on_finalize(7); + header = System::finalize(); + } + + // run the normal change. + { + System::initialize(&8, &header.hash(), &Default::default()); + assert!(Grandpa::pending_change().is_some()); + assert_eq!(Grandpa::grandpa_authorities(), vec![(4, 1), (5, 1), (6, 1)]); + assert!(Grandpa::schedule_change(vec![(5, 1)], 1, None).is_err()); + Grandpa::on_finalize(8); + header = System::finalize(); + } + + // normal change applied. but we can't apply a new forced change for some + // time. + for i in 9..11 { + System::initialize(&i, &header.hash(), &Default::default()); + assert!(Grandpa::pending_change().is_none()); + assert_eq!(Grandpa::grandpa_authorities(), vec![(5, 1)]); + assert_eq!(Grandpa::next_forced(), Some(11)); + assert!(Grandpa::schedule_change(vec![(5, 1), (6, 1)], 5, Some(0)).is_err()); + Grandpa::on_finalize(i); + header = System::finalize(); + } + + { + System::initialize(&11, &header.hash(), &Default::default()); + assert!(Grandpa::pending_change().is_none()); + assert!(Grandpa::schedule_change(vec![(5, 1), (6, 1), (7, 1)], 5, Some(0)).is_ok()); + assert_eq!(Grandpa::next_forced(), Some(21)); + Grandpa::on_finalize(11); + header = System::finalize(); + } + let _ = header; + }); } diff --git a/srml/indices/src/address.rs b/srml/indices/src/address.rs index c7709e3bec..13e02f898a 100644 --- a/srml/indices/src/address.rs +++ b/srml/indices/src/address.rs @@ -16,123 +16,145 @@ //! Address type that is union of index and id for an account. +use crate::{As, Decode, Encode, Input, Member, Output}; #[cfg(feature = "std")] use std::fmt; -use crate::{Member, Decode, Encode, As, Input, Output}; /// An indices-aware address, which can be either a direct `AccountId` or /// an index. #[derive(PartialEq, Eq, Clone)] #[cfg_attr(feature = "std", derive(Debug, Hash))] -pub enum Address where - AccountId: Member, - AccountIndex: Member, +pub enum Address +where + AccountId: Member, + AccountIndex: Member, { - /// It's an account ID (pubkey). - Id(AccountId), - /// It's an account index. - Index(AccountIndex), + /// It's an account ID (pubkey). + Id(AccountId), + /// It's an account index. + Index(AccountIndex), } #[cfg(feature = "std")] -impl fmt::Display for Address where - AccountId: Member, - AccountIndex: Member, +impl fmt::Display for Address +where + AccountId: Member, + AccountIndex: Member, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self) + } } -impl From for Address where - AccountId: Member, - AccountIndex: Member, +impl From for Address +where + AccountId: Member, + AccountIndex: Member, { - fn from(a: AccountId) -> Self { - Address::Id(a) - } + fn from(a: AccountId) -> Self { + Address::Id(a) + } } fn need_more_than(a: T, b: T) -> Option { - if a < b { Some(b) } else { None } + if a < b { + Some(b) + } else { + None + } } -impl Decode for Address where - AccountId: Member + Decode, - AccountIndex: Member + Decode + PartialOrd + Ord + As + As + As + Copy, +impl Decode for Address +where + AccountId: Member + Decode, + AccountIndex: + Member + Decode + PartialOrd + Ord + As + As + As + Copy, { - fn decode(input: &mut I) -> Option { - Some(match input.read_byte()? { - x @ 0x00...0xef => Address::Index(As::sa(x)), - 0xfc => Address::Index(As::sa(need_more_than(0xef, u16::decode(input)?)?)), - 0xfd => Address::Index(As::sa(need_more_than(0xffff, u32::decode(input)?)?)), - 0xfe => Address::Index(need_more_than(As::sa(0xffffffffu32), Decode::decode(input)?)?), - 0xff => Address::Id(Decode::decode(input)?), - _ => return None, - }) - } + fn decode(input: &mut I) -> Option { + Some(match input.read_byte()? { + x @ 0x00...0xef => Address::Index(As::sa(x)), + 0xfc => Address::Index(As::sa(need_more_than(0xef, u16::decode(input)?)?)), + 0xfd => Address::Index(As::sa(need_more_than(0xffff, u32::decode(input)?)?)), + 0xfe => Address::Index(need_more_than( + As::sa(0xffffffffu32), + Decode::decode(input)?, + )?), + 0xff => Address::Id(Decode::decode(input)?), + _ => return None, + }) + } } -impl Encode for Address where - AccountId: Member + Encode, - AccountIndex: Member + Encode + PartialOrd + Ord + As + As + As + Copy, +impl Encode for Address +where + AccountId: Member + Encode, + AccountIndex: + Member + Encode + PartialOrd + Ord + As + As + As + Copy, { - fn encode_to(&self, dest: &mut T) { - match *self { - Address::Id(ref i) => { - dest.push_byte(255); - dest.push(i); - } - Address::Index(i) if i > As::sa(0xffffffffu32) => { - dest.push_byte(254); - dest.push(&i); - } - Address::Index(i) if i > As::sa(0xffffu32) => { - dest.push_byte(253); - dest.push(&As::::as_(i)); - } - Address::Index(i) if i >= As::sa(0xf0u32) => { - dest.push_byte(252); - dest.push(&As::::as_(i)); - } - Address::Index(i) => dest.push_byte(As::::as_(i)), - } - } + fn encode_to(&self, dest: &mut T) { + match *self { + Address::Id(ref i) => { + dest.push_byte(255); + dest.push(i); + } + Address::Index(i) if i > As::sa(0xffffffffu32) => { + dest.push_byte(254); + dest.push(&i); + } + Address::Index(i) if i > As::sa(0xffffu32) => { + dest.push_byte(253); + dest.push(&As::::as_(i)); + } + Address::Index(i) if i >= As::sa(0xf0u32) => { + dest.push_byte(252); + dest.push(&As::::as_(i)); + } + Address::Index(i) => dest.push_byte(As::::as_(i)), + } + } } -impl Default for Address where - AccountId: Member + Default, - AccountIndex: Member, +impl Default for Address +where + AccountId: Member + Default, + AccountIndex: Member, { - fn default() -> Self { - Address::Id(Default::default()) - } + fn default() -> Self { + Address::Id(Default::default()) + } } #[cfg(test)] mod tests { - use crate::{Encode, Decode}; - - type Address = super::Address<[u8; 8], u32>; - fn index(i: u32) -> Address { super::Address::Index(i) } - fn id(i: [u8; 8]) -> Address { super::Address::Id(i) } - - fn compare(a: Option
, d: &[u8]) { - if let Some(ref a) = a { - assert_eq!(d, &a.encode()[..]); - } - assert_eq!(Address::decode(&mut &d[..]), a); - } - - #[test] - fn it_should_work() { - compare(Some(index(2)), &[2][..]); - compare(None, &[240][..]); - compare(None, &[252, 239, 0][..]); - compare(Some(index(240)), &[252, 240, 0][..]); - compare(Some(index(304)), &[252, 48, 1][..]); - compare(None, &[253, 255, 255, 0, 0][..]); - compare(Some(index(0x10000)), &[253, 0, 0, 1, 0][..]); - compare(Some(id([42, 69, 42, 69, 42, 69, 42, 69])), &[255, 42, 69, 42, 69, 42, 69, 42, 69][..]); - } + use crate::{Decode, Encode}; + + type Address = super::Address<[u8; 8], u32>; + fn index(i: u32) -> Address { + super::Address::Index(i) + } + fn id(i: [u8; 8]) -> Address { + super::Address::Id(i) + } + + fn compare(a: Option
, d: &[u8]) { + if let Some(ref a) = a { + assert_eq!(d, &a.encode()[..]); + } + assert_eq!(Address::decode(&mut &d[..]), a); + } + + #[test] + fn it_should_work() { + compare(Some(index(2)), &[2][..]); + compare(None, &[240][..]); + compare(None, &[252, 239, 0][..]); + compare(Some(index(240)), &[252, 240, 0][..]); + compare(Some(index(304)), &[252, 48, 1][..]); + compare(None, &[253, 255, 255, 0, 0][..]); + compare(Some(index(0x10000)), &[253, 0, 0, 1, 0][..]); + compare( + Some(id([42, 69, 42, 69, 42, 69, 42, 69])), + &[255, 42, 69, 42, 69, 42, 69, 42, 69][..], + ); + } } diff --git a/srml/indices/src/lib.rs b/srml/indices/src/lib.rs index 76261796c8..8acef61f41 100644 --- a/srml/indices/src/lib.rs +++ b/srml/indices/src/lib.rs @@ -19,10 +19,10 @@ #![cfg_attr(not(feature = "std"), no_std)] -use rstd::{prelude::*, result, marker::PhantomData}; -use parity_codec::{Encode, Decode, Codec, Input, Output}; -use srml_support::{StorageValue, StorageMap, Parameter, decl_module, decl_event, decl_storage}; -use primitives::traits::{One, SimpleArithmetic, As, StaticLookup, Member}; +use parity_codec::{Codec, Decode, Encode, Input, Output}; +use primitives::traits::{As, Member, One, SimpleArithmetic, StaticLookup}; +use rstd::{marker::PhantomData, prelude::*, result}; +use srml_support::{decl_event, decl_module, decl_storage, Parameter, StorageMap, StorageValue}; use system::{IsDeadAccount, OnNewAccount}; use self::address::Address as RawAddress; @@ -40,39 +40,53 @@ pub type Address = RawAddress<::AccountId, :: /// Turn an Id into an Index, or None for the purpose of getting /// a hint at a possibly desired index. pub trait ResolveHint> { - /// Turn an Id into an Index, or None for the purpose of getting - /// a hint at a possibly desired index. - fn resolve_hint(who: &AccountId) -> Option; + /// Turn an Id into an Index, or None for the purpose of getting + /// a hint at a possibly desired index. + fn resolve_hint(who: &AccountId) -> Option; } /// Simple encode-based resolve hint implemenntation. pub struct SimpleResolveHint(PhantomData<(AccountId, AccountIndex)>); -impl> ResolveHint for SimpleResolveHint { - fn resolve_hint(who: &AccountId) -> Option { - Some(AccountIndex::sa(who.using_encoded(|e| e[0] as usize + e[1] as usize * 256))) - } +impl> ResolveHint + for SimpleResolveHint +{ + fn resolve_hint(who: &AccountId) -> Option { + Some(AccountIndex::sa( + who.using_encoded(|e| e[0] as usize + e[1] as usize * 256), + )) + } } /// The module's config trait. pub trait Trait: system::Trait { - /// Type used for storing an account's index; implies the maximum number of accounts the system - /// can hold. - type AccountIndex: Parameter + Member + Codec + Default + SimpleArithmetic + As + As + As + As + As + Copy; - - /// Whether an account is dead or not. - type IsDeadAccount: IsDeadAccount; - - /// How to turn an id into an index. - type ResolveHint: ResolveHint; - - /// The overarching event type. - type Event: From> + Into<::Event>; + /// Type used for storing an account's index; implies the maximum number of accounts the system + /// can hold. + type AccountIndex: Parameter + + Member + + Codec + + Default + + SimpleArithmetic + + As + + As + + As + + As + + As + + Copy; + + /// Whether an account is dead or not. + type IsDeadAccount: IsDeadAccount; + + /// How to turn an id into an index. + type ResolveHint: ResolveHint; + + /// The overarching event type. + type Event: From> + Into<::Event>; } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - fn deposit_event() = default; - } + pub struct Module for enum Call where origin: T::Origin { + fn deposit_event() = default; + } } decl_event!( @@ -89,116 +103,118 @@ decl_event!( ); decl_storage! { - trait Store for Module as Indices { - /// The next free enumeration set. - pub NextEnumSet get(next_enum_set) build(|config: &GenesisConfig| { - T::AccountIndex::sa(config.ids.len() / ENUM_SET_SIZE) - }): T::AccountIndex; - - /// The enumeration sets. - pub EnumSet get(enum_set): map T::AccountIndex => Vec; - } - add_extra_genesis { - config(ids): Vec; - build(|storage: &mut primitives::StorageOverlay, _: &mut primitives::ChildrenStorageOverlay, config: &GenesisConfig| { - for i in 0..(config.ids.len() + ENUM_SET_SIZE - 1) / ENUM_SET_SIZE { - storage.insert(GenesisConfig::::hash(&>::key_for(T::AccountIndex::sa(i))).to_vec(), - config.ids[i * ENUM_SET_SIZE..config.ids.len().min((i + 1) * ENUM_SET_SIZE)].to_owned().encode()); - } - }); - } + trait Store for Module as Indices { + /// The next free enumeration set. + pub NextEnumSet get(next_enum_set) build(|config: &GenesisConfig| { + T::AccountIndex::sa(config.ids.len() / ENUM_SET_SIZE) + }): T::AccountIndex; + + /// The enumeration sets. + pub EnumSet get(enum_set): map T::AccountIndex => Vec; + } + add_extra_genesis { + config(ids): Vec; + build(|storage: &mut primitives::StorageOverlay, _: &mut primitives::ChildrenStorageOverlay, config: &GenesisConfig| { + for i in 0..(config.ids.len() + ENUM_SET_SIZE - 1) / ENUM_SET_SIZE { + storage.insert(GenesisConfig::::hash(&>::key_for(T::AccountIndex::sa(i))).to_vec(), + config.ids[i * ENUM_SET_SIZE..config.ids.len().min((i + 1) * ENUM_SET_SIZE)].to_owned().encode()); + } + }); + } } impl Module { - // PUBLIC IMMUTABLES - - /// Lookup an T::AccountIndex to get an Id, if there's one there. - pub fn lookup_index(index: T::AccountIndex) -> Option { - let enum_set_size = Self::enum_set_size(); - let set = Self::enum_set(index / enum_set_size); - let i: usize = (index % enum_set_size).as_(); - set.get(i).cloned() - } - - /// `true` if the account `index` is ready for reclaim. - pub fn can_reclaim(try_index: T::AccountIndex) -> bool { - let enum_set_size = Self::enum_set_size(); - let try_set = Self::enum_set(try_index / enum_set_size); - let i = (try_index % enum_set_size).as_(); - i < try_set.len() && T::IsDeadAccount::is_dead_account(&try_set[i]) - } - - /// Lookup an address to get an Id, if there's one there. - pub fn lookup_address(a: address::Address) -> Option { - match a { - address::Address::Id(i) => Some(i), - address::Address::Index(i) => Self::lookup_index(i), - } - } - - // PUBLIC MUTABLES (DANGEROUS) - - fn enum_set_size() -> T::AccountIndex { - T::AccountIndex::sa(ENUM_SET_SIZE) - } + // PUBLIC IMMUTABLES + + /// Lookup an T::AccountIndex to get an Id, if there's one there. + pub fn lookup_index(index: T::AccountIndex) -> Option { + let enum_set_size = Self::enum_set_size(); + let set = Self::enum_set(index / enum_set_size); + let i: usize = (index % enum_set_size).as_(); + set.get(i).cloned() + } + + /// `true` if the account `index` is ready for reclaim. + pub fn can_reclaim(try_index: T::AccountIndex) -> bool { + let enum_set_size = Self::enum_set_size(); + let try_set = Self::enum_set(try_index / enum_set_size); + let i = (try_index % enum_set_size).as_(); + i < try_set.len() && T::IsDeadAccount::is_dead_account(&try_set[i]) + } + + /// Lookup an address to get an Id, if there's one there. + pub fn lookup_address( + a: address::Address, + ) -> Option { + match a { + address::Address::Id(i) => Some(i), + address::Address::Index(i) => Self::lookup_index(i), + } + } + + // PUBLIC MUTABLES (DANGEROUS) + + fn enum_set_size() -> T::AccountIndex { + T::AccountIndex::sa(ENUM_SET_SIZE) + } } impl OnNewAccount for Module { - fn on_new_account(who: &T::AccountId) { - let enum_set_size = Self::enum_set_size(); - let next_set_index = Self::next_enum_set(); - - if let Some(try_index) = T::ResolveHint::resolve_hint(who) { - // then check to see if this account id identifies a dead account index. - let set_index = try_index / enum_set_size; - let mut try_set = Self::enum_set(set_index); - let item_index = (try_index % enum_set_size).as_(); - if item_index < try_set.len() { - if T::IsDeadAccount::is_dead_account(&try_set[item_index]) { - // yup - this index refers to a dead account. can be reused. - try_set[item_index] = who.clone(); - >::insert(set_index, try_set); - - return - } - } - } - - // insert normally as a back up - let mut set_index = next_set_index; - // defensive only: this loop should never iterate since we keep NextEnumSet up to date later. - let mut set = loop { - let set = Self::enum_set(set_index); - if set.len() < ENUM_SET_SIZE { - break set; - } - set_index += One::one(); - }; - - let index = T::AccountIndex::sa(set_index.as_() * ENUM_SET_SIZE + set.len()); - - // update set. - set.push(who.clone()); - - // keep NextEnumSet up to date - if set.len() == ENUM_SET_SIZE { - >::put(set_index + One::one()); - } - - // write set. - >::insert(set_index, set); - - Self::deposit_event(RawEvent::NewAccountIndex(who.clone(), index)); - } + fn on_new_account(who: &T::AccountId) { + let enum_set_size = Self::enum_set_size(); + let next_set_index = Self::next_enum_set(); + + if let Some(try_index) = T::ResolveHint::resolve_hint(who) { + // then check to see if this account id identifies a dead account index. + let set_index = try_index / enum_set_size; + let mut try_set = Self::enum_set(set_index); + let item_index = (try_index % enum_set_size).as_(); + if item_index < try_set.len() { + if T::IsDeadAccount::is_dead_account(&try_set[item_index]) { + // yup - this index refers to a dead account. can be reused. + try_set[item_index] = who.clone(); + >::insert(set_index, try_set); + + return; + } + } + } + + // insert normally as a back up + let mut set_index = next_set_index; + // defensive only: this loop should never iterate since we keep NextEnumSet up to date later. + let mut set = loop { + let set = Self::enum_set(set_index); + if set.len() < ENUM_SET_SIZE { + break set; + } + set_index += One::one(); + }; + + let index = T::AccountIndex::sa(set_index.as_() * ENUM_SET_SIZE + set.len()); + + // update set. + set.push(who.clone()); + + // keep NextEnumSet up to date + if set.len() == ENUM_SET_SIZE { + >::put(set_index + One::one()); + } + + // write set. + >::insert(set_index, set); + + Self::deposit_event(RawEvent::NewAccountIndex(who.clone(), index)); + } } impl StaticLookup for Module { - type Source = address::Address; - type Target = T::AccountId; - fn lookup(a: Self::Source) -> result::Result { - Self::lookup_address(a).ok_or("invalid account index") - } - fn unlookup(a: Self::Target) -> Self::Source { - address::Address::Id(a) - } + type Source = address::Address; + type Target = T::AccountId; + fn lookup(a: Self::Source) -> result::Result { + Self::lookup_address(a).ok_or("invalid account index") + } + fn unlookup(a: Self::Target) -> Self::Source { + address::Address::Id(a) + } } diff --git a/srml/indices/src/mock.rs b/srml/indices/src/mock.rs index 80d3fa2c4f..ddbe5b7c7f 100644 --- a/srml/indices/src/mock.rs +++ b/srml/indices/src/mock.rs @@ -18,85 +18,95 @@ #![cfg(test)] -use std::collections::HashSet; -use ref_thread_local::{ref_thread_local, RefThreadLocal}; -use primitives::BuildStorage; +use crate::{GenesisConfig, IsDeadAccount, Module, OnNewAccount, ResolveHint, Trait}; use primitives::testing::{Digest, DigestItem, Header}; -use substrate_primitives::{H256, Blake2Hasher}; +use primitives::BuildStorage; +use ref_thread_local::{ref_thread_local, RefThreadLocal}; use srml_support::impl_outer_origin; +use std::collections::HashSet; +use substrate_primitives::{Blake2Hasher, H256}; use {runtime_io, system}; -use crate::{GenesisConfig, Module, Trait, IsDeadAccount, OnNewAccount, ResolveHint}; -impl_outer_origin!{ - pub enum Origin for Runtime {} +impl_outer_origin! { + pub enum Origin for Runtime {} } ref_thread_local! { - static managed ALIVE: HashSet = HashSet::new(); + static managed ALIVE: HashSet = HashSet::new(); } pub fn make_account(who: u64) { - ALIVE.borrow_mut().insert(who); - Indices::on_new_account(&who); + ALIVE.borrow_mut().insert(who); + Indices::on_new_account(&who); } pub fn kill_account(who: u64) { - ALIVE.borrow_mut().remove(&who); + ALIVE.borrow_mut().remove(&who); } pub struct TestIsDeadAccount {} impl IsDeadAccount for TestIsDeadAccount { - fn is_dead_account(who: &u64) -> bool { - !ALIVE.borrow_mut().contains(who) - } + fn is_dead_account(who: &u64) -> bool { + !ALIVE.borrow_mut().contains(who) + } } pub struct TestResolveHint; impl ResolveHint for TestResolveHint { - fn resolve_hint(who: &u64) -> Option { - if *who < 256 { - None - } else { - Some(*who - 256) - } - } + fn resolve_hint(who: &u64) -> Option { + if *who < 256 { + None + } else { + Some(*who - 256) + } + } } // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. #[derive(Clone, PartialEq, Eq, Debug)] pub struct Runtime; impl system::Trait for Runtime { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = ::primitives::traits::BlakeTwo256; - type Digest = Digest; - type AccountId = u64; - type Lookup = Indices; - type Header = Header; - type Event = (); - type Log = DigestItem; + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = ::primitives::traits::BlakeTwo256; + type Digest = Digest; + type AccountId = u64; + type Lookup = Indices; + type Header = Header; + type Event = (); + type Log = DigestItem; } impl Trait for Runtime { - type AccountIndex = u64; - type IsDeadAccount = TestIsDeadAccount; - type ResolveHint = TestResolveHint; - type Event = (); + type AccountIndex = u64; + type IsDeadAccount = TestIsDeadAccount; + type ResolveHint = TestResolveHint; + type Event = (); } pub fn new_test_ext() -> runtime_io::TestExternalities { - { - let mut h = ALIVE.borrow_mut(); - h.clear(); - for i in 1..5 { h.insert(i); } - } - - let mut t = system::GenesisConfig::::default().build_storage().unwrap().0; - t.extend(GenesisConfig:: { - ids: vec![1, 2, 3, 4] - }.build_storage().unwrap().0); - t.into() + { + let mut h = ALIVE.borrow_mut(); + h.clear(); + for i in 1..5 { + h.insert(i); + } + } + + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0; + t.extend( + GenesisConfig:: { + ids: vec![1, 2, 3, 4], + } + .build_storage() + .unwrap() + .0, + ); + t.into() } pub type Indices = Module; diff --git a/srml/indices/src/tests.rs b/srml/indices/src/tests.rs index 7b60e30527..e8b7540414 100644 --- a/srml/indices/src/tests.rs +++ b/srml/indices/src/tests.rs @@ -19,62 +19,50 @@ #![cfg(test)] use super::*; -use crate::mock::{Indices, new_test_ext, make_account, kill_account, TestIsDeadAccount}; +use crate::mock::{kill_account, make_account, new_test_ext, Indices, TestIsDeadAccount}; use runtime_io::with_externalities; #[test] fn indexing_lookup_should_work() { - with_externalities( - &mut new_test_ext(), - || { - assert_eq!(Indices::lookup_index(0), Some(1)); - assert_eq!(Indices::lookup_index(1), Some(2)); - assert_eq!(Indices::lookup_index(2), Some(3)); - assert_eq!(Indices::lookup_index(3), Some(4)); - assert_eq!(Indices::lookup_index(4), None); - }, - ); + with_externalities(&mut new_test_ext(), || { + assert_eq!(Indices::lookup_index(0), Some(1)); + assert_eq!(Indices::lookup_index(1), Some(2)); + assert_eq!(Indices::lookup_index(2), Some(3)); + assert_eq!(Indices::lookup_index(3), Some(4)); + assert_eq!(Indices::lookup_index(4), None); + }); } #[test] fn default_indexing_on_new_accounts_should_work() { - with_externalities( - &mut new_test_ext(), - || { - assert_eq!(Indices::lookup_index(4), None); - make_account(5); - assert_eq!(Indices::lookup_index(4), Some(5)); - }, - ); + with_externalities(&mut new_test_ext(), || { + assert_eq!(Indices::lookup_index(4), None); + make_account(5); + assert_eq!(Indices::lookup_index(4), Some(5)); + }); } #[test] fn reclaim_indexing_on_new_accounts_should_work() { - with_externalities( - &mut new_test_ext(), - || { - assert_eq!(Indices::lookup_index(1), Some(2)); - assert_eq!(Indices::lookup_index(4), None); + with_externalities(&mut new_test_ext(), || { + assert_eq!(Indices::lookup_index(1), Some(2)); + assert_eq!(Indices::lookup_index(4), None); - kill_account(2); // index 1 no longer locked to id 2 + kill_account(2); // index 1 no longer locked to id 2 - make_account(1 + 256); // id 257 takes index 1. - assert_eq!(Indices::lookup_index(1), Some(257)); - }, - ); + make_account(1 + 256); // id 257 takes index 1. + assert_eq!(Indices::lookup_index(1), Some(257)); + }); } #[test] fn alive_account_should_prevent_reclaim() { - with_externalities( - &mut new_test_ext(), - || { - assert!(!TestIsDeadAccount::is_dead_account(&2)); - assert_eq!(Indices::lookup_index(1), Some(2)); - assert_eq!(Indices::lookup_index(4), None); + with_externalities(&mut new_test_ext(), || { + assert!(!TestIsDeadAccount::is_dead_account(&2)); + assert_eq!(Indices::lookup_index(1), Some(2)); + assert_eq!(Indices::lookup_index(4), None); - make_account(1 + 256); // id 257 takes index 1. - assert_eq!(Indices::lookup_index(4), Some(257)); - }, - ); + make_account(1 + 256); // id 257 takes index 1. + assert_eq!(Indices::lookup_index(4), Some(257)); + }); } diff --git a/srml/metadata/src/lib.rs b/srml/metadata/src/lib.rs index 9b03daafa6..d60fd339eb 100644 --- a/srml/metadata/src/lib.rs +++ b/srml/metadata/src/lib.rs @@ -22,12 +22,12 @@ #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(feature = "std")] -use serde_derive::Serialize; #[cfg(feature = "std")] use parity_codec::{Decode, Input}; use parity_codec::{Encode, Output}; use rstd::vec::Vec; +#[cfg(feature = "std")] +use serde_derive::Serialize; #[cfg(feature = "std")] type StringBuf = String; @@ -45,75 +45,88 @@ type StringBuf = &'static str; /// /// For example a `&'static [ &'static str ]` can be decoded to a `Vec`. #[derive(Clone)] -pub enum DecodeDifferent where B: 'static, O: 'static { - Encode(B), - Decoded(O), +pub enum DecodeDifferent +where + B: 'static, + O: 'static, +{ + Encode(B), + Decoded(O), } -impl Encode for DecodeDifferent where B: Encode + 'static, O: Encode + 'static { - fn encode_to(&self, dest: &mut W) { - match self { - DecodeDifferent::Encode(b) => b.encode_to(dest), - DecodeDifferent::Decoded(o) => o.encode_to(dest), - } - } +impl Encode for DecodeDifferent +where + B: Encode + 'static, + O: Encode + 'static, +{ + fn encode_to(&self, dest: &mut W) { + match self { + DecodeDifferent::Encode(b) => b.encode_to(dest), + DecodeDifferent::Decoded(o) => o.encode_to(dest), + } + } } #[cfg(feature = "std")] -impl Decode for DecodeDifferent where B: 'static, O: Decode + 'static { - fn decode(input: &mut I) -> Option { - ::decode(input).and_then(|val| { - Some(DecodeDifferent::Decoded(val)) - }) - } +impl Decode for DecodeDifferent +where + B: 'static, + O: Decode + 'static, +{ + fn decode(input: &mut I) -> Option { + ::decode(input).and_then(|val| Some(DecodeDifferent::Decoded(val))) + } } impl PartialEq for DecodeDifferent where - B: Encode + Eq + PartialEq + 'static, - O: Encode + Eq + PartialEq + 'static, + B: Encode + Eq + PartialEq + 'static, + O: Encode + Eq + PartialEq + 'static, { - fn eq(&self, other: &Self) -> bool { - self.encode() == other.encode() - } + fn eq(&self, other: &Self) -> bool { + self.encode() == other.encode() + } } impl Eq for DecodeDifferent - where B: Encode + Eq + PartialEq + 'static, O: Encode + Eq + PartialEq + 'static -{} +where + B: Encode + Eq + PartialEq + 'static, + O: Encode + Eq + PartialEq + 'static, +{ +} #[cfg(feature = "std")] impl std::fmt::Debug for DecodeDifferent - where - B: std::fmt::Debug + Eq + 'static, - O: std::fmt::Debug + Eq + 'static, +where + B: std::fmt::Debug + Eq + 'static, + O: std::fmt::Debug + Eq + 'static, { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - DecodeDifferent::Encode(b) => b.fmt(f), - DecodeDifferent::Decoded(o) => o.fmt(f), - } - } + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + DecodeDifferent::Encode(b) => b.fmt(f), + DecodeDifferent::Decoded(o) => o.fmt(f), + } + } } #[cfg(feature = "std")] impl serde::Serialize for DecodeDifferent - where - B: serde::Serialize + 'static, - O: serde::Serialize + 'static, +where + B: serde::Serialize + 'static, + O: serde::Serialize + 'static, { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - match self { - DecodeDifferent::Encode(b) => b.serialize(serializer), - DecodeDifferent::Decoded(o) => o.serialize(serializer), - } - } + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + match self { + DecodeDifferent::Encode(b) => b.serialize(serializer), + DecodeDifferent::Decoded(o) => o.serialize(serializer), + } + } } -pub type DecodeDifferentArray = DecodeDifferent<&'static [B], Vec>; +pub type DecodeDifferentArray = DecodeDifferent<&'static [B], Vec>; #[cfg(feature = "std")] type DecodeDifferentStr = DecodeDifferent<&'static str, StringBuf>; @@ -124,93 +137,95 @@ type DecodeDifferentStr = DecodeDifferent<&'static str, StringBuf>; #[derive(Clone, PartialEq, Eq, Encode)] #[cfg_attr(feature = "std", derive(Decode, Debug, Serialize))] pub struct FunctionMetadata { - pub name: DecodeDifferentStr, - pub arguments: DecodeDifferentArray, - pub documentation: DecodeDifferentArray<&'static str, StringBuf>, + pub name: DecodeDifferentStr, + pub arguments: DecodeDifferentArray, + pub documentation: DecodeDifferentArray<&'static str, StringBuf>, } /// All the metadata about a function argument. #[derive(Clone, PartialEq, Eq, Encode)] #[cfg_attr(feature = "std", derive(Decode, Debug, Serialize))] pub struct FunctionArgumentMetadata { - pub name: DecodeDifferentStr, - pub ty: DecodeDifferentStr, + pub name: DecodeDifferentStr, + pub ty: DecodeDifferentStr, } /// Newtype wrapper for support encoding functions (actual the result of the function). #[derive(Clone, Eq)] -pub struct FnEncode(pub fn() -> E) where E: Encode + 'static; +pub struct FnEncode(pub fn() -> E) +where + E: Encode + 'static; impl Encode for FnEncode { - fn encode_to(&self, dest: &mut W) { - self.0().encode_to(dest); - } + fn encode_to(&self, dest: &mut W) { + self.0().encode_to(dest); + } } impl PartialEq for FnEncode { - fn eq(&self, other: &Self) -> bool { - self.0().eq(&other.0()) - } + fn eq(&self, other: &Self) -> bool { + self.0().eq(&other.0()) + } } #[cfg(feature = "std")] impl std::fmt::Debug for FnEncode { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - self.0().fmt(f) - } + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + self.0().fmt(f) + } } #[cfg(feature = "std")] impl serde::Serialize for FnEncode { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - self.0().serialize(serializer) - } + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + self.0().serialize(serializer) + } } /// All the metadata about an outer event. #[derive(Clone, PartialEq, Eq, Encode)] #[cfg_attr(feature = "std", derive(Decode, Debug, Serialize))] pub struct OuterEventMetadata { - pub name: DecodeDifferentStr, - pub events: DecodeDifferentArray< - (&'static str, FnEncode<&'static [EventMetadata]>), - (StringBuf, Vec) - >, + pub name: DecodeDifferentStr, + pub events: DecodeDifferentArray< + (&'static str, FnEncode<&'static [EventMetadata]>), + (StringBuf, Vec), + >, } /// All the metadata about a event. #[derive(Clone, PartialEq, Eq, Encode)] #[cfg_attr(feature = "std", derive(Decode, Debug, Serialize))] pub struct EventMetadata { - pub name: DecodeDifferentStr, - pub arguments: DecodeDifferentArray<&'static str, StringBuf>, - pub documentation: DecodeDifferentArray<&'static str, StringBuf>, + pub name: DecodeDifferentStr, + pub arguments: DecodeDifferentArray<&'static str, StringBuf>, + pub documentation: DecodeDifferentArray<&'static str, StringBuf>, } /// All the metadata about a storage. #[derive(Clone, PartialEq, Eq, Encode)] #[cfg_attr(feature = "std", derive(Decode, Debug, Serialize))] pub struct StorageMetadata { - pub functions: DecodeDifferentArray, + pub functions: DecodeDifferentArray, } /// All the metadata about a storage function. #[derive(Clone, PartialEq, Eq, Encode)] #[cfg_attr(feature = "std", derive(Decode, Debug, Serialize))] pub struct StorageFunctionMetadata { - pub name: DecodeDifferentStr, - pub modifier: StorageFunctionModifier, - pub ty: StorageFunctionType, - pub default: ByteGetter, - pub documentation: DecodeDifferentArray<&'static str, StringBuf>, + pub name: DecodeDifferentStr, + pub modifier: StorageFunctionModifier, + pub ty: StorageFunctionType, + pub default: ByteGetter, + pub documentation: DecodeDifferentArray<&'static str, StringBuf>, } /// A technical trait to store lazy initiated vec value as static dyn pointer. pub trait DefaultByte { - fn default_byte(&self) -> Vec; + fn default_byte(&self) -> Vec; } /// Wrapper over dyn pointer for accessing a cached once byte value. @@ -221,78 +236,78 @@ pub struct DefaultByteGetter(pub &'static dyn DefaultByte); pub type ByteGetter = DecodeDifferent>; impl Encode for DefaultByteGetter { - fn encode_to(&self, dest: &mut W) { - self.0.default_byte().encode_to(dest) - } + fn encode_to(&self, dest: &mut W) { + self.0.default_byte().encode_to(dest) + } } impl PartialEq for DefaultByteGetter { - fn eq(&self, other: &DefaultByteGetter) -> bool { - let left = self.0.default_byte(); - let right = other.0.default_byte(); - left.eq(&right) - } + fn eq(&self, other: &DefaultByteGetter) -> bool { + let left = self.0.default_byte(); + let right = other.0.default_byte(); + left.eq(&right) + } } -impl Eq for DefaultByteGetter { } +impl Eq for DefaultByteGetter {} #[cfg(feature = "std")] impl serde::Serialize for DefaultByteGetter { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - self.0.default_byte().serialize(serializer) - } + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + self.0.default_byte().serialize(serializer) + } } #[cfg(feature = "std")] impl std::fmt::Debug for DefaultByteGetter { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - self.0.default_byte().fmt(f) - } + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + self.0.default_byte().fmt(f) + } } /// A storage function type. #[derive(Clone, PartialEq, Eq, Encode)] #[cfg_attr(feature = "std", derive(Decode, Debug, Serialize))] pub enum StorageFunctionType { - Plain(DecodeDifferentStr), - Map { - key: DecodeDifferentStr, - value: DecodeDifferentStr, - is_linked: bool, - }, - DoubleMap { - key1: DecodeDifferentStr, - key2: DecodeDifferentStr, - value: DecodeDifferentStr, - key2_hasher: DecodeDifferentStr, - }, + Plain(DecodeDifferentStr), + Map { + key: DecodeDifferentStr, + value: DecodeDifferentStr, + is_linked: bool, + }, + DoubleMap { + key1: DecodeDifferentStr, + key2: DecodeDifferentStr, + value: DecodeDifferentStr, + key2_hasher: DecodeDifferentStr, + }, } /// A storage function modifier. #[derive(Clone, PartialEq, Eq, Encode)] #[cfg_attr(feature = "std", derive(Decode, Debug, Serialize))] pub enum StorageFunctionModifier { - Optional, - Default, + Optional, + Default, } /// All metadata about the outer dispatch. #[derive(Clone, PartialEq, Eq, Encode)] #[cfg_attr(feature = "std", derive(Decode, Debug, Serialize))] pub struct OuterDispatchMetadata { - pub name: DecodeDifferentStr, - pub calls: DecodeDifferentArray, + pub name: DecodeDifferentStr, + pub calls: DecodeDifferentArray, } /// A Call from the outer dispatch. #[derive(Clone, PartialEq, Eq, Encode)] #[cfg_attr(feature = "std", derive(Decode, Debug, Serialize))] pub struct OuterDispatchCall { - pub name: DecodeDifferentStr, - pub index: u16, + pub name: DecodeDifferentStr, + pub index: u16, } #[derive(Eq, Encode, PartialEq)] @@ -306,61 +321,60 @@ pub struct RuntimeMetadataPrefixed(pub u32, pub RuntimeMetadata); #[derive(Eq, Encode, PartialEq)] #[cfg_attr(feature = "std", derive(Decode, Debug, Serialize))] pub enum RuntimeMetadata { - /// Unused; enum filler. - V0(RuntimeMetadataDeprecated), - /// Version 1 for runtime metadata. No longer used. - V1(RuntimeMetadataDeprecated), - /// Version 2 for runtime metadata. No longer used. - V2(RuntimeMetadataDeprecated), - /// Version 3 for runtime metadata. - V3(RuntimeMetadataV3), + /// Unused; enum filler. + V0(RuntimeMetadataDeprecated), + /// Version 1 for runtime metadata. No longer used. + V1(RuntimeMetadataDeprecated), + /// Version 2 for runtime metadata. No longer used. + V2(RuntimeMetadataDeprecated), + /// Version 3 for runtime metadata. + V3(RuntimeMetadataV3), } /// Enum that should fail. #[derive(Eq, PartialEq)] #[cfg_attr(feature = "std", derive(Debug, Serialize))] -pub enum RuntimeMetadataDeprecated { } +pub enum RuntimeMetadataDeprecated {} impl Encode for RuntimeMetadataDeprecated { - fn encode_to(&self, _dest: &mut W) { - } + fn encode_to(&self, _dest: &mut W) {} } #[cfg(feature = "std")] impl Decode for RuntimeMetadataDeprecated { - fn decode(_input: &mut I) -> Option { - unimplemented!() - } + fn decode(_input: &mut I) -> Option { + unimplemented!() + } } /// The metadata of a runtime. #[derive(Eq, Encode, PartialEq)] #[cfg_attr(feature = "std", derive(Decode, Debug, Serialize))] pub struct RuntimeMetadataV3 { - pub modules: DecodeDifferentArray, + pub modules: DecodeDifferentArray, } /// All metadata about an runtime module. #[derive(Clone, PartialEq, Eq, Encode)] #[cfg_attr(feature = "std", derive(Decode, Debug, Serialize))] pub struct ModuleMetadata { - pub name: DecodeDifferentStr, - pub prefix: DecodeDifferent, StringBuf>, - pub storage: ODFnA, - pub calls: ODFnA, - pub event: ODFnA, + pub name: DecodeDifferentStr, + pub prefix: DecodeDifferent, StringBuf>, + pub storage: ODFnA, + pub calls: ODFnA, + pub event: ODFnA, } type ODFnA = Option, Vec>>; impl Into for RuntimeMetadataPrefixed { - fn into(self) -> primitives::OpaqueMetadata { - primitives::OpaqueMetadata::new(self.encode()) - } + fn into(self) -> primitives::OpaqueMetadata { + primitives::OpaqueMetadata::new(self.encode()) + } } impl Into for RuntimeMetadata { - fn into(self) -> RuntimeMetadataPrefixed { - RuntimeMetadataPrefixed(META_RESERVED, self) - } + fn into(self) -> RuntimeMetadataPrefixed { + RuntimeMetadataPrefixed(META_RESERVED, self) + } } diff --git a/srml/session/src/lib.rs b/srml/session/src/lib.rs index 204eaccc1f..931d7d4a47 100644 --- a/srml/session/src/lib.rs +++ b/srml/session/src/lib.rs @@ -19,17 +19,19 @@ #![cfg_attr(not(feature = "std"), no_std)] +use primitives::traits::{As, Convert, One, Zero}; +use rstd::ops::Mul; use rstd::prelude::*; -use primitives::traits::{As, Zero, One, Convert}; -use srml_support::{StorageValue, StorageMap, for_each_tuple, decl_module, decl_event, decl_storage}; +use srml_support::{ + decl_event, decl_module, decl_storage, for_each_tuple, StorageMap, StorageValue, +}; use srml_support::{dispatch::Result, traits::OnFreeBalanceZero}; use system::ensure_signed; -use rstd::ops::Mul; /// A session has changed. pub trait OnSessionChange { - /// Session has changed. - fn on_session_change(time_elapsed: T, should_reward: bool); + /// Session has changed. + fn on_session_change(time_elapsed: T, should_reward: bool); } macro_rules! impl_session_change { @@ -51,37 +53,37 @@ macro_rules! impl_session_change { for_each_tuple!(impl_session_change); pub trait Trait: timestamp::Trait + consensus::Trait { - type ConvertAccountIdToSessionKey: Convert>; - type OnSessionChange: OnSessionChange; - type Event: From> + Into<::Event>; + type ConvertAccountIdToSessionKey: Convert>; + type OnSessionChange: OnSessionChange; + type Event: From> + Into<::Event>; } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - fn deposit_event() = default; - - /// Sets the session key of `_validator` to `_key`. This doesn't take effect until the next - /// session. - fn set_key(origin, key: T::SessionKey) { - let who = ensure_signed(origin)?; - // set new value for next session - >::insert(who, key); - } - - /// Set a new session length. Won't kick in until the next session change (at current length). - fn set_length(#[compact] new: T::BlockNumber) { - >::put(new); - } - - /// Forces a new session. - fn force_new_session(apply_rewards: bool) -> Result { - Self::apply_force_new_session(apply_rewards) - } - - fn on_finalize(n: T::BlockNumber) { - Self::check_rotate_session(n); - } - } + pub struct Module for enum Call where origin: T::Origin { + fn deposit_event() = default; + + /// Sets the session key of `_validator` to `_key`. This doesn't take effect until the next + /// session. + fn set_key(origin, key: T::SessionKey) { + let who = ensure_signed(origin)?; + // set new value for next session + >::insert(who, key); + } + + /// Set a new session length. Won't kick in until the next session change (at current length). + fn set_length(#[compact] new: T::BlockNumber) { + >::put(new); + } + + /// Forces a new session. + fn force_new_session(apply_rewards: bool) -> Result { + Self::apply_force_new_session(apply_rewards) + } + + fn on_finalize(n: T::BlockNumber) { + Self::check_rotate_session(n); + } + } } decl_event!( @@ -93,353 +95,402 @@ decl_event!( ); decl_storage! { - trait Store for Module as Session { - /// The current set of validators. - pub Validators get(validators) config(): Vec; - /// Current length of the session. - pub SessionLength get(length) config(session_length): T::BlockNumber = T::BlockNumber::sa(1000); - /// Current index of the session. - pub CurrentIndex get(current_index) build(|_| T::BlockNumber::sa(0)): T::BlockNumber; - /// Timestamp when current session started. - pub CurrentStart get(current_start) build(|_| T::Moment::zero()): T::Moment; - - /// New session is being forced if this entry exists; in which case, the boolean value is whether - /// the new session should be considered a normal rotation (rewardable) or exceptional (slashable). - pub ForcingNewSession get(forcing_new_session): Option; - /// Block at which the session length last changed. - LastLengthChange: Option; - /// The next key for a given validator. - NextKeyFor build(|config: &GenesisConfig| { - config.keys.clone() - }): map T::AccountId => Option; - /// The next session length. - NextSessionLength: Option; - } - add_extra_genesis { - config(keys): Vec<(T::AccountId, T::SessionKey)>; - } + trait Store for Module as Session { + /// The current set of validators. + pub Validators get(validators) config(): Vec; + /// Current length of the session. + pub SessionLength get(length) config(session_length): T::BlockNumber = T::BlockNumber::sa(1000); + /// Current index of the session. + pub CurrentIndex get(current_index) build(|_| T::BlockNumber::sa(0)): T::BlockNumber; + /// Timestamp when current session started. + pub CurrentStart get(current_start) build(|_| T::Moment::zero()): T::Moment; + + /// New session is being forced if this entry exists; in which case, the boolean value is whether + /// the new session should be considered a normal rotation (rewardable) or exceptional (slashable). + pub ForcingNewSession get(forcing_new_session): Option; + /// Block at which the session length last changed. + LastLengthChange: Option; + /// The next key for a given validator. + NextKeyFor build(|config: &GenesisConfig| { + config.keys.clone() + }): map T::AccountId => Option; + /// The next session length. + NextSessionLength: Option; + } + add_extra_genesis { + config(keys): Vec<(T::AccountId, T::SessionKey)>; + } } impl Module { - /// The current number of validators. - pub fn validator_count() -> u32 { - >::get().len() as u32 - } - - /// The last length change if there was one, zero if not. - pub fn last_length_change() -> T::BlockNumber { - >::get().unwrap_or_else(T::BlockNumber::zero) - } - - // INTERNAL API (available to other runtime modules) - /// Forces a new session, no origin. - pub fn apply_force_new_session(apply_rewards: bool) -> Result { - >::put(apply_rewards); - Ok(()) - } - - /// Set the current set of validators. - /// - /// Called by `staking::new_era` only. `rotate_session` must be called after this in order to - /// update the session keys to the next validator set. - pub fn set_validators(new: &[T::AccountId]) { - >::put(&new.to_vec()); - } - - /// Hook to be called after transaction processing. - pub fn check_rotate_session(block_number: T::BlockNumber) { - // Do this last, after the staking system has had the chance to switch out the authorities for the - // new set. - // Check block number and call `rotate_session` if necessary. - let is_final_block = ((block_number - Self::last_length_change()) % Self::length()).is_zero(); - let (should_end_session, apply_rewards) = >::take() - .map_or((is_final_block, is_final_block), |apply_rewards| (true, apply_rewards)); - if should_end_session { - Self::rotate_session(is_final_block, apply_rewards); - } - } - - /// Move on to next session: register the new authority set. - pub fn rotate_session(is_final_block: bool, apply_rewards: bool) { - let now = >::get(); - let time_elapsed = now.clone() - Self::current_start(); - let session_index = >::get() + One::one(); - - Self::deposit_event(RawEvent::NewSession(session_index)); - - // Increment current session index. - >::put(session_index); - >::put(now); - - // Enact session length change. - let len_changed = if let Some(next_len) = >::take() { - >::put(next_len); - true - } else { - false - }; - if len_changed || !is_final_block { - let block_number = >::block_number(); - >::put(block_number); - } - - T::OnSessionChange::on_session_change(time_elapsed, apply_rewards); - - // Update any changes in session keys. - let v = Self::validators(); - >::set_authority_count(v.len() as u32); - for (i, v) in v.into_iter().enumerate() { - >::set_authority( - i as u32, - &>::get(&v) - .or_else(|| T::ConvertAccountIdToSessionKey::convert(v)) - .unwrap_or_default() - ); - }; - } - - /// Get the time that should have elapsed over a session if everything was working perfectly. - pub fn ideal_session_duration() -> T::Moment { - let block_period: T::Moment = >::minimum_period(); - let session_length: T::BlockNumber = Self::length(); - Mul::::mul(block_period, session_length) - } - - /// Number of blocks remaining in this session, not counting this one. If the session is - /// due to rotate at the end of this block, then it will return 0. If the session just began, then - /// it will return `Self::length() - 1`. - pub fn blocks_remaining() -> T::BlockNumber { - let length = Self::length(); - let length_minus_1 = length - One::one(); - let block_number = >::block_number(); - length_minus_1 - (block_number - Self::last_length_change() + length_minus_1) % length - } + /// The current number of validators. + pub fn validator_count() -> u32 { + >::get().len() as u32 + } + + /// The last length change if there was one, zero if not. + pub fn last_length_change() -> T::BlockNumber { + >::get().unwrap_or_else(T::BlockNumber::zero) + } + + // INTERNAL API (available to other runtime modules) + /// Forces a new session, no origin. + pub fn apply_force_new_session(apply_rewards: bool) -> Result { + >::put(apply_rewards); + Ok(()) + } + + /// Set the current set of validators. + /// + /// Called by `staking::new_era` only. `rotate_session` must be called after this in order to + /// update the session keys to the next validator set. + pub fn set_validators(new: &[T::AccountId]) { + >::put(&new.to_vec()); + } + + /// Hook to be called after transaction processing. + pub fn check_rotate_session(block_number: T::BlockNumber) { + // Do this last, after the staking system has had the chance to switch out the authorities for the + // new set. + // Check block number and call `rotate_session` if necessary. + let is_final_block = + ((block_number - Self::last_length_change()) % Self::length()).is_zero(); + let (should_end_session, apply_rewards) = >::take() + .map_or((is_final_block, is_final_block), |apply_rewards| { + (true, apply_rewards) + }); + if should_end_session { + Self::rotate_session(is_final_block, apply_rewards); + } + } + + /// Move on to next session: register the new authority set. + pub fn rotate_session(is_final_block: bool, apply_rewards: bool) { + let now = >::get(); + let time_elapsed = now.clone() - Self::current_start(); + let session_index = >::get() + One::one(); + + Self::deposit_event(RawEvent::NewSession(session_index)); + + // Increment current session index. + >::put(session_index); + >::put(now); + + // Enact session length change. + let len_changed = if let Some(next_len) = >::take() { + >::put(next_len); + true + } else { + false + }; + if len_changed || !is_final_block { + let block_number = >::block_number(); + >::put(block_number); + } + + T::OnSessionChange::on_session_change(time_elapsed, apply_rewards); + + // Update any changes in session keys. + let v = Self::validators(); + >::set_authority_count(v.len() as u32); + for (i, v) in v.into_iter().enumerate() { + >::set_authority( + i as u32, + &>::get(&v) + .or_else(|| T::ConvertAccountIdToSessionKey::convert(v)) + .unwrap_or_default(), + ); + } + } + + /// Get the time that should have elapsed over a session if everything was working perfectly. + pub fn ideal_session_duration() -> T::Moment { + let block_period: T::Moment = >::minimum_period(); + let session_length: T::BlockNumber = Self::length(); + Mul::::mul(block_period, session_length) + } + + /// Number of blocks remaining in this session, not counting this one. If the session is + /// due to rotate at the end of this block, then it will return 0. If the session just began, then + /// it will return `Self::length() - 1`. + pub fn blocks_remaining() -> T::BlockNumber { + let length = Self::length(); + let length_minus_1 = length - One::one(); + let block_number = >::block_number(); + length_minus_1 - (block_number - Self::last_length_change() + length_minus_1) % length + } } impl OnFreeBalanceZero for Module { - fn on_free_balance_zero(who: &T::AccountId) { - >::remove(who); - } + fn on_free_balance_zero(who: &T::AccountId) { + >::remove(who); + } } #[cfg(test)] mod tests { - use super::*; - use std::cell::RefCell; - use srml_support::{impl_outer_origin, assert_ok}; - use runtime_io::with_externalities; - use substrate_primitives::{H256, Blake2Hasher}; - use primitives::BuildStorage; - use primitives::traits::{BlakeTwo256, IdentityLookup}; - use primitives::testing::{Digest, DigestItem, Header, UintAuthorityId, ConvertUintAuthorityId}; - - impl_outer_origin!{ - pub enum Origin for Test {} - } - - thread_local!{ - static NEXT_VALIDATORS: RefCell> = RefCell::new(vec![1, 2, 3]); - } - - pub struct TestOnSessionChange; - impl OnSessionChange for TestOnSessionChange { - fn on_session_change(_elapsed: u64, _should_reward: bool) { - NEXT_VALIDATORS.with(|v| Session::set_validators(&*v.borrow())); - } - } - - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - impl consensus::Trait for Test { - type Log = DigestItem; - type SessionKey = UintAuthorityId; - type InherentOfflineReport = (); - } - impl system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type Digest = Digest; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type Log = DigestItem; - } - impl timestamp::Trait for Test { - type Moment = u64; - type OnTimestampSet = (); - } - impl Trait for Test { - type ConvertAccountIdToSessionKey = ConvertUintAuthorityId; - type OnSessionChange = TestOnSessionChange; - type Event = (); - } - - type System = system::Module; - type Consensus = consensus::Module; - type Session = Module; - - fn new_test_ext() -> runtime_io::TestExternalities { - let mut t = system::GenesisConfig::::default().build_storage().unwrap().0; - t.extend(consensus::GenesisConfig::{ - code: vec![], - authorities: NEXT_VALIDATORS.with(|l| l.borrow().iter().cloned().map(UintAuthorityId).collect()), - }.build_storage().unwrap().0); - t.extend(timestamp::GenesisConfig::{ - minimum_period: 5, - }.build_storage().unwrap().0); - t.extend(GenesisConfig::{ - session_length: 2, - validators: NEXT_VALIDATORS.with(|l| l.borrow().clone()), - keys: vec![], - }.build_storage().unwrap().0); - runtime_io::TestExternalities::new(t) - } - - #[test] - fn simple_setup_should_work() { - with_externalities(&mut new_test_ext(), || { - assert_eq!(Consensus::authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - assert_eq!(Session::length(), 2); - assert_eq!(Session::validators(), vec![1, 2, 3]); - }); - } - - #[test] - fn authorities_should_track_validators() { - with_externalities(&mut new_test_ext(), || { - NEXT_VALIDATORS.with(|v| *v.borrow_mut() = vec![1, 2]); - assert_ok!(Session::force_new_session(false)); - Session::check_rotate_session(1); - assert_eq!(Session::validators(), vec![1, 2]); - assert_eq!(Consensus::authorities(), vec![UintAuthorityId(1), UintAuthorityId(2)]); - - NEXT_VALIDATORS.with(|v| *v.borrow_mut() = vec![1, 2, 4]); - assert_ok!(Session::force_new_session(false)); - Session::check_rotate_session(2); - assert_eq!(Session::validators(), vec![1, 2, 4]); - assert_eq!(Consensus::authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(4)]); - - NEXT_VALIDATORS.with(|v| *v.borrow_mut() = vec![1, 2, 3]); - assert_ok!(Session::force_new_session(false)); - Session::check_rotate_session(3); - assert_eq!(Session::validators(), vec![1, 2, 3]); - assert_eq!(Consensus::authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - }); - } - - #[test] - fn should_work_with_early_exit() { - with_externalities(&mut new_test_ext(), || { - System::set_block_number(1); - assert_ok!(Session::set_length(10)); - assert_eq!(Session::blocks_remaining(), 1); - Session::check_rotate_session(1); - - System::set_block_number(2); - assert_eq!(Session::blocks_remaining(), 0); - Session::check_rotate_session(2); - assert_eq!(Session::length(), 10); - - System::set_block_number(7); - assert_eq!(Session::current_index(), 1); - assert_eq!(Session::blocks_remaining(), 5); - assert_ok!(Session::force_new_session(false)); - Session::check_rotate_session(7); - - System::set_block_number(8); - assert_eq!(Session::current_index(), 2); - assert_eq!(Session::blocks_remaining(), 9); - Session::check_rotate_session(8); - - System::set_block_number(17); - assert_eq!(Session::current_index(), 2); - assert_eq!(Session::blocks_remaining(), 0); - Session::check_rotate_session(17); - - System::set_block_number(18); - assert_eq!(Session::current_index(), 3); - }); - } - - #[test] - fn session_length_change_should_work() { - with_externalities(&mut new_test_ext(), || { - // Block 1: Change to length 3; no visible change. - System::set_block_number(1); - assert_ok!(Session::set_length(3)); - Session::check_rotate_session(1); - assert_eq!(Session::length(), 2); - assert_eq!(Session::current_index(), 0); - - // Block 2: Length now changed to 3. Index incremented. - System::set_block_number(2); - assert_ok!(Session::set_length(3)); - Session::check_rotate_session(2); - assert_eq!(Session::length(), 3); - assert_eq!(Session::current_index(), 1); - - // Block 3: Length now changed to 3. Index incremented. - System::set_block_number(3); - Session::check_rotate_session(3); - assert_eq!(Session::length(), 3); - assert_eq!(Session::current_index(), 1); - - // Block 4: Change to length 2; no visible change. - System::set_block_number(4); - assert_ok!(Session::set_length(2)); - Session::check_rotate_session(4); - assert_eq!(Session::length(), 3); - assert_eq!(Session::current_index(), 1); - - // Block 5: Length now changed to 2. Index incremented. - System::set_block_number(5); - Session::check_rotate_session(5); - assert_eq!(Session::length(), 2); - assert_eq!(Session::current_index(), 2); - - // Block 6: No change. - System::set_block_number(6); - Session::check_rotate_session(6); - assert_eq!(Session::length(), 2); - assert_eq!(Session::current_index(), 2); - - // Block 7: Next index. - System::set_block_number(7); - Session::check_rotate_session(7); - assert_eq!(Session::length(), 2); - assert_eq!(Session::current_index(), 3); - }); - } - - #[test] - fn session_change_should_work() { - with_externalities(&mut new_test_ext(), || { - // Block 1: No change - System::set_block_number(1); - Session::check_rotate_session(1); - assert_eq!(Consensus::authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - - // Block 2: Session rollover, but no change. - System::set_block_number(2); - Session::check_rotate_session(2); - assert_eq!(Consensus::authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - - // Block 3: Set new key for validator 2; no visible change. - System::set_block_number(3); - assert_ok!(Session::set_key(Origin::signed(2), UintAuthorityId(5))); - assert_eq!(Consensus::authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - - Session::check_rotate_session(3); - assert_eq!(Consensus::authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - - // Block 4: Session rollover, authority 2 changes. - System::set_block_number(4); - Session::check_rotate_session(4); - assert_eq!(Consensus::authorities(), vec![UintAuthorityId(1), UintAuthorityId(5), UintAuthorityId(3)]); - }); - } + use super::*; + use primitives::testing::{ + ConvertUintAuthorityId, Digest, DigestItem, Header, UintAuthorityId, + }; + use primitives::traits::{BlakeTwo256, IdentityLookup}; + use primitives::BuildStorage; + use runtime_io::with_externalities; + use srml_support::{assert_ok, impl_outer_origin}; + use std::cell::RefCell; + use substrate_primitives::{Blake2Hasher, H256}; + + impl_outer_origin! { + pub enum Origin for Test {} + } + + thread_local! { + static NEXT_VALIDATORS: RefCell> = RefCell::new(vec![1, 2, 3]); + } + + pub struct TestOnSessionChange; + impl OnSessionChange for TestOnSessionChange { + fn on_session_change(_elapsed: u64, _should_reward: bool) { + NEXT_VALIDATORS.with(|v| Session::set_validators(&*v.borrow())); + } + } + + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + impl consensus::Trait for Test { + type Log = DigestItem; + type SessionKey = UintAuthorityId; + type InherentOfflineReport = (); + } + impl system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type Digest = Digest; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type Log = DigestItem; + } + impl timestamp::Trait for Test { + type Moment = u64; + type OnTimestampSet = (); + } + impl Trait for Test { + type ConvertAccountIdToSessionKey = ConvertUintAuthorityId; + type OnSessionChange = TestOnSessionChange; + type Event = (); + } + + type System = system::Module; + type Consensus = consensus::Module; + type Session = Module; + + fn new_test_ext() -> runtime_io::TestExternalities { + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0; + t.extend( + consensus::GenesisConfig:: { + code: vec![], + authorities: NEXT_VALIDATORS + .with(|l| l.borrow().iter().cloned().map(UintAuthorityId).collect()), + } + .build_storage() + .unwrap() + .0, + ); + t.extend( + timestamp::GenesisConfig:: { minimum_period: 5 } + .build_storage() + .unwrap() + .0, + ); + t.extend( + GenesisConfig:: { + session_length: 2, + validators: NEXT_VALIDATORS.with(|l| l.borrow().clone()), + keys: vec![], + } + .build_storage() + .unwrap() + .0, + ); + runtime_io::TestExternalities::new(t) + } + + #[test] + fn simple_setup_should_work() { + with_externalities(&mut new_test_ext(), || { + assert_eq!( + Consensus::authorities(), + vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)] + ); + assert_eq!(Session::length(), 2); + assert_eq!(Session::validators(), vec![1, 2, 3]); + }); + } + + #[test] + fn authorities_should_track_validators() { + with_externalities(&mut new_test_ext(), || { + NEXT_VALIDATORS.with(|v| *v.borrow_mut() = vec![1, 2]); + assert_ok!(Session::force_new_session(false)); + Session::check_rotate_session(1); + assert_eq!(Session::validators(), vec![1, 2]); + assert_eq!( + Consensus::authorities(), + vec![UintAuthorityId(1), UintAuthorityId(2)] + ); + + NEXT_VALIDATORS.with(|v| *v.borrow_mut() = vec![1, 2, 4]); + assert_ok!(Session::force_new_session(false)); + Session::check_rotate_session(2); + assert_eq!(Session::validators(), vec![1, 2, 4]); + assert_eq!( + Consensus::authorities(), + vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(4)] + ); + + NEXT_VALIDATORS.with(|v| *v.borrow_mut() = vec![1, 2, 3]); + assert_ok!(Session::force_new_session(false)); + Session::check_rotate_session(3); + assert_eq!(Session::validators(), vec![1, 2, 3]); + assert_eq!( + Consensus::authorities(), + vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)] + ); + }); + } + + #[test] + fn should_work_with_early_exit() { + with_externalities(&mut new_test_ext(), || { + System::set_block_number(1); + assert_ok!(Session::set_length(10)); + assert_eq!(Session::blocks_remaining(), 1); + Session::check_rotate_session(1); + + System::set_block_number(2); + assert_eq!(Session::blocks_remaining(), 0); + Session::check_rotate_session(2); + assert_eq!(Session::length(), 10); + + System::set_block_number(7); + assert_eq!(Session::current_index(), 1); + assert_eq!(Session::blocks_remaining(), 5); + assert_ok!(Session::force_new_session(false)); + Session::check_rotate_session(7); + + System::set_block_number(8); + assert_eq!(Session::current_index(), 2); + assert_eq!(Session::blocks_remaining(), 9); + Session::check_rotate_session(8); + + System::set_block_number(17); + assert_eq!(Session::current_index(), 2); + assert_eq!(Session::blocks_remaining(), 0); + Session::check_rotate_session(17); + + System::set_block_number(18); + assert_eq!(Session::current_index(), 3); + }); + } + + #[test] + fn session_length_change_should_work() { + with_externalities(&mut new_test_ext(), || { + // Block 1: Change to length 3; no visible change. + System::set_block_number(1); + assert_ok!(Session::set_length(3)); + Session::check_rotate_session(1); + assert_eq!(Session::length(), 2); + assert_eq!(Session::current_index(), 0); + + // Block 2: Length now changed to 3. Index incremented. + System::set_block_number(2); + assert_ok!(Session::set_length(3)); + Session::check_rotate_session(2); + assert_eq!(Session::length(), 3); + assert_eq!(Session::current_index(), 1); + + // Block 3: Length now changed to 3. Index incremented. + System::set_block_number(3); + Session::check_rotate_session(3); + assert_eq!(Session::length(), 3); + assert_eq!(Session::current_index(), 1); + + // Block 4: Change to length 2; no visible change. + System::set_block_number(4); + assert_ok!(Session::set_length(2)); + Session::check_rotate_session(4); + assert_eq!(Session::length(), 3); + assert_eq!(Session::current_index(), 1); + + // Block 5: Length now changed to 2. Index incremented. + System::set_block_number(5); + Session::check_rotate_session(5); + assert_eq!(Session::length(), 2); + assert_eq!(Session::current_index(), 2); + + // Block 6: No change. + System::set_block_number(6); + Session::check_rotate_session(6); + assert_eq!(Session::length(), 2); + assert_eq!(Session::current_index(), 2); + + // Block 7: Next index. + System::set_block_number(7); + Session::check_rotate_session(7); + assert_eq!(Session::length(), 2); + assert_eq!(Session::current_index(), 3); + }); + } + + #[test] + fn session_change_should_work() { + with_externalities(&mut new_test_ext(), || { + // Block 1: No change + System::set_block_number(1); + Session::check_rotate_session(1); + assert_eq!( + Consensus::authorities(), + vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)] + ); + + // Block 2: Session rollover, but no change. + System::set_block_number(2); + Session::check_rotate_session(2); + assert_eq!( + Consensus::authorities(), + vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)] + ); + + // Block 3: Set new key for validator 2; no visible change. + System::set_block_number(3); + assert_ok!(Session::set_key(Origin::signed(2), UintAuthorityId(5))); + assert_eq!( + Consensus::authorities(), + vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)] + ); + + Session::check_rotate_session(3); + assert_eq!( + Consensus::authorities(), + vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)] + ); + + // Block 4: Session rollover, authority 2 changes. + System::set_block_number(4); + Session::check_rotate_session(4); + assert_eq!( + Consensus::authorities(), + vec![UintAuthorityId(1), UintAuthorityId(5), UintAuthorityId(3)] + ); + }); + } } diff --git a/srml/staking/src/lib.rs b/srml/staking/src/lib.rs index 32b44f0264..9e933c421f 100644 --- a/srml/staking/src/lib.rs +++ b/srml/staking/src/lib.rs @@ -252,26 +252,26 @@ #![cfg_attr(not(feature = "std"), no_std)] +use parity_codec::{Decode, Encode, HasCompact}; +use primitives::traits::{As, Bounded, CheckedSub, Convert, One, Saturating, StaticLookup, Zero}; +use primitives::Perbill; #[cfg(feature = "std")] -use runtime_io::with_storage; +use primitives::{Deserialize, Serialize}; use rstd::{prelude::*, result}; -use parity_codec::{HasCompact, Encode, Decode}; -use srml_support::{StorageValue, StorageMap, EnumerableStorageMap, dispatch::Result}; -use srml_support::{decl_module, decl_event, decl_storage, ensure}; +#[cfg(feature = "std")] +use runtime_io::with_storage; +use session::OnSessionChange; use srml_support::traits::{ - Currency, OnFreeBalanceZero, OnDilution, LockIdentifier, LockableCurrency, WithdrawReasons, - OnUnbalanced, Imbalance, + Currency, Imbalance, LockIdentifier, LockableCurrency, OnDilution, OnFreeBalanceZero, + OnUnbalanced, WithdrawReasons, }; -use session::OnSessionChange; -use primitives::Perbill; -use primitives::traits::{Convert, Zero, One, As, StaticLookup, CheckedSub, Saturating, Bounded}; -#[cfg(feature = "std")] -use primitives::{Serialize, Deserialize}; +use srml_support::{decl_event, decl_module, decl_storage, ensure}; +use srml_support::{dispatch::Result, EnumerableStorageMap, StorageMap, StorageValue}; use system::ensure_signed; mod mock; -mod tests; mod phragmen; +mod tests; use phragmen::{elect, ElectionConfig}; @@ -283,482 +283,490 @@ const MAX_UNSTAKE_THRESHOLD: u32 = 10; /// Indicates the initial status of the staker. #[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] pub enum StakerStatus { - /// Chilling. - Idle, - /// Declared desire in validating or already participating in it. - Validator, - /// Nominating for a group of other stakers. - Nominator(Vec), + /// Chilling. + Idle, + /// Declared desire in validating or already participating in it. + Validator, + /// Nominating for a group of other stakers. + Nominator(Vec), } /// A destination account for payment. #[derive(PartialEq, Eq, Copy, Clone, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug))] pub enum RewardDestination { - /// Pay into the stash account, increasing the amount at stake accordingly. - Staked, - /// Pay into the stash account, not increasing the amount at stake. - Stash, - /// Pay into the controller account. - Controller, + /// Pay into the stash account, increasing the amount at stake accordingly. + Staked, + /// Pay into the stash account, not increasing the amount at stake. + Stash, + /// Pay into the controller account. + Controller, } impl Default for RewardDestination { - fn default() -> Self { - RewardDestination::Staked - } + fn default() -> Self { + RewardDestination::Staked + } } /// Preference of what happens on a slash event. #[derive(PartialEq, Eq, Clone, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug))] pub struct ValidatorPrefs { - /// Validator should ensure this many more slashes than is necessary before being unstaked. - #[codec(compact)] - pub unstake_threshold: u32, - /// Reward that validator takes up-front; only the rest is split between themselves and nominators. - #[codec(compact)] - pub validator_payment: Balance, + /// Validator should ensure this many more slashes than is necessary before being unstaked. + #[codec(compact)] + pub unstake_threshold: u32, + /// Reward that validator takes up-front; only the rest is split between themselves and nominators. + #[codec(compact)] + pub validator_payment: Balance, } impl Default for ValidatorPrefs { - fn default() -> Self { - ValidatorPrefs { - unstake_threshold: 3, - validator_payment: Default::default(), - } - } + fn default() -> Self { + ValidatorPrefs { + unstake_threshold: 3, + validator_payment: Default::default(), + } + } } /// Just a Balance/BlockNumber tuple to encode when a chunk of funds will be unlocked. #[derive(PartialEq, Eq, Clone, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug))] pub struct UnlockChunk { - /// Amount of funds to be unlocked. - #[codec(compact)] - value: Balance, - /// Era number at which point it'll be unlocked. - #[codec(compact)] - era: BlockNumber, + /// Amount of funds to be unlocked. + #[codec(compact)] + value: Balance, + /// Era number at which point it'll be unlocked. + #[codec(compact)] + era: BlockNumber, } /// The ledger of a (bonded) stash. #[derive(PartialEq, Eq, Clone, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug))] pub struct StakingLedger { - /// The stash account whose balance is actually locked and at stake. - pub stash: AccountId, - /// The total amount of the stash's balance that we are currently accounting for. - /// It's just `active` plus all the `unlocking` balances. - #[codec(compact)] - pub total: Balance, - /// The total amount of the stash's balance that will be at stake in any forthcoming - /// rounds. - #[codec(compact)] - pub active: Balance, - /// Any balance that is becoming free, which may eventually be transferred out - /// of the stash (assuming it doesn't get slashed first). - pub unlocking: Vec>, + /// The stash account whose balance is actually locked and at stake. + pub stash: AccountId, + /// The total amount of the stash's balance that we are currently accounting for. + /// It's just `active` plus all the `unlocking` balances. + #[codec(compact)] + pub total: Balance, + /// The total amount of the stash's balance that will be at stake in any forthcoming + /// rounds. + #[codec(compact)] + pub active: Balance, + /// Any balance that is becoming free, which may eventually be transferred out + /// of the stash (assuming it doesn't get slashed first). + pub unlocking: Vec>, } -impl< - AccountId, - Balance: HasCompact + Copy + Saturating, - BlockNumber: HasCompact + PartialOrd -> StakingLedger { - /// Remove entries from `unlocking` that are sufficiently old and reduce the - /// total by the sum of their balances. - fn consolidate_unlocked(self, current_era: BlockNumber) -> Self { - let mut total = self.total; - let unlocking = self.unlocking.into_iter() - .filter(|chunk| if chunk.era > current_era { - true - } else { - total = total.saturating_sub(chunk.value); - false - }) - .collect(); - Self { total, active: self.active, stash: self.stash, unlocking } - } +impl + StakingLedger +{ + /// Remove entries from `unlocking` that are sufficiently old and reduce the + /// total by the sum of their balances. + fn consolidate_unlocked(self, current_era: BlockNumber) -> Self { + let mut total = self.total; + let unlocking = self + .unlocking + .into_iter() + .filter(|chunk| { + if chunk.era > current_era { + true + } else { + total = total.saturating_sub(chunk.value); + false + } + }) + .collect(); + Self { + total, + active: self.active, + stash: self.stash, + unlocking, + } + } } /// The amount of exposure (to slashing) than an individual nominator has. #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug))] pub struct IndividualExposure { - /// The stash account of the nominator in question. - who: AccountId, - /// Amount of funds exposed. - #[codec(compact)] - value: Balance, + /// The stash account of the nominator in question. + who: AccountId, + /// Amount of funds exposed. + #[codec(compact)] + value: Balance, } /// A snapshot of the stake backing a single validator in the system. #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, Default)] #[cfg_attr(feature = "std", derive(Debug))] pub struct Exposure { - /// The total balance backing this validator. - #[codec(compact)] - pub total: Balance, - /// The validator's own stash that is exposed. - #[codec(compact)] - pub own: Balance, - /// The portions of nominators stashes that are exposed. - pub others: Vec>, + /// The total balance backing this validator. + #[codec(compact)] + pub total: Balance, + /// The validator's own stash that is exposed. + #[codec(compact)] + pub own: Balance, + /// The portions of nominators stashes that are exposed. + pub others: Vec>, } type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type PositiveImbalanceOf = <::Currency as Currency<::AccountId>>::PositiveImbalance; -type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +type PositiveImbalanceOf = + <::Currency as Currency<::AccountId>>::PositiveImbalance; +type NegativeImbalanceOf = + <::Currency as Currency<::AccountId>>::NegativeImbalance; pub trait Trait: system::Trait + session::Trait { - /// The staking balance. - type Currency: - Currency + - LockableCurrency; + /// The staking balance. + type Currency: Currency + + LockableCurrency; - /// Convert a balance into a number used for election calculation. - /// This must fit into a `u64` but is allowed to be sensibly lossy. - type CurrencyToVote: Convert, u64> + Convert>; + /// Convert a balance into a number used for election calculation. + /// This must fit into a `u64` but is allowed to be sensibly lossy. + type CurrencyToVote: Convert, u64> + Convert>; - /// Some tokens minted. - type OnRewardMinted: OnDilution>; + /// Some tokens minted. + type OnRewardMinted: OnDilution>; - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The overarching event type. + type Event: From> + Into<::Event>; - /// Handler for the unbalanced reduction when slashing a staker. - type Slash: OnUnbalanced>; + /// Handler for the unbalanced reduction when slashing a staker. + type Slash: OnUnbalanced>; - /// Handler for the unbalanced increment when rewarding a staker. - type Reward: OnUnbalanced>; + /// Handler for the unbalanced increment when rewarding a staker. + type Reward: OnUnbalanced>; } const STAKING_ID: LockIdentifier = *b"staking "; decl_storage! { - trait Store for Module as Staking { - - /// The ideal number of staking participants. - pub ValidatorCount get(validator_count) config(): u32; - /// Minimum number of staking participants before emergency conditions are imposed. - pub MinimumValidatorCount get(minimum_validator_count) config(): u32 = DEFAULT_MINIMUM_VALIDATOR_COUNT; - /// The length of a staking era in sessions. - pub SessionsPerEra get(sessions_per_era) config(): T::BlockNumber = T::BlockNumber::sa(1000); - /// Maximum reward, per validator, that is provided per acceptable session. - pub SessionReward get(session_reward) config(): Perbill = Perbill::from_billionths(60); - /// Slash, per validator that is taken for the first time they are found to be offline. - pub OfflineSlash get(offline_slash) config(): Perbill = Perbill::from_millionths(1000); // Perbill::from_fraction() is only for std, so use from_millionths(). - /// Number of instances of offline reports before slashing begins for validators. - pub OfflineSlashGrace get(offline_slash_grace) config(): u32; - /// The length of the bonding duration in blocks. - pub BondingDuration get(bonding_duration) config(): T::BlockNumber = T::BlockNumber::sa(1000); - - /// Any validators that may never be slashed or forcibly kicked. It's a Vec since they're easy to initialize - /// and the performance hit is minimal (we expect no more than four invulnerables) and restricted to testnets. - pub Invulnerables get(invulnerables) config(): Vec; - - /// Map from all locked "stash" accounts to the controller account. - pub Bonded get(bonded): map T::AccountId => Option; - /// Map from all (unlocked) "controller" accounts to the info regarding the staking. - pub Ledger get(ledger): map T::AccountId => Option, T::BlockNumber>>; - - /// Where the reward payment should be made. Keyed by stash. - pub Payee get(payee): map T::AccountId => RewardDestination; - - /// The map from (wannabe) validator stash key to the preferences of that validator. - pub Validators get(validators): linked_map T::AccountId => ValidatorPrefs>; - - /// The map from nominator stash key to the set of stash keys of all validators to nominate. - pub Nominators get(nominators): linked_map T::AccountId => Vec; - - /// Nominators for a particular account that is in action right now. You can't iterate through validators here, - /// but you can find them in the `sessions` module. - /// - /// This is keyed by the stash account. - pub Stakers get(stakers): map T::AccountId => Exposure>; - - // The historical validators and their nominations for a given era. Stored as a trie root of the mapping - // `T::AccountId` => `Exposure>`, which is just the contents of `Stakers`, - // under a key that is the `era`. - // - // Every era change, this will be appended with the trie root of the contents of `Stakers`, and the oldest - // entry removed down to a specific number of entries (probably around 90 for a 3 month history). - // pub HistoricalStakers get(historical_stakers): map T::BlockNumber => Option; - - /// The currently elected validator set keyed by stash account ID. - pub CurrentElected get(current_elected): Vec; - - /// The current era index. - pub CurrentEra get(current_era) config(): T::BlockNumber; - - /// Maximum reward, per validator, that is provided per acceptable session. - pub CurrentSessionReward get(current_session_reward) config(): BalanceOf; - - /// The accumulated reward for the current era. Reset to zero at the beginning of the era and - /// increased for every successfully finished session. - pub CurrentEraReward get(current_era_reward): BalanceOf; - - /// The next value of sessions per era. - pub NextSessionsPerEra get(next_sessions_per_era): Option; - /// The session index at which the era length last changed. - pub LastEraLengthChange get(last_era_length_change): T::BlockNumber; - - /// The amount of balance actively at stake for each validator slot, currently. - /// - /// This is used to derive rewards and punishments. - pub SlotStake get(slot_stake) build(|config: &GenesisConfig| { - config.stakers.iter().map(|&(_, _, value, _)| value).min().unwrap_or_default() - }): BalanceOf; - - /// The number of times a given validator has been reported offline. This gets decremented by one each era that passes. - pub SlashCount get(slash_count): map T::AccountId => u32; - - /// We are forcing a new era. - pub ForcingNewEra get(forcing_new_era): Option<()>; - - /// Most recent `RECENT_OFFLINE_COUNT` instances. (who it was, when it was reported, how many instances they were offline for). - pub RecentlyOffline get(recently_offline): Vec<(T::AccountId, T::BlockNumber, u32)>; - } - add_extra_genesis { - config(stakers): Vec<(T::AccountId, T::AccountId, BalanceOf, StakerStatus)>; - build(|storage: &mut primitives::StorageOverlay, _: &mut primitives::ChildrenStorageOverlay, config: &GenesisConfig| { - with_storage(storage, || { - for &(ref stash, ref controller, balance, ref status) in &config.stakers { - assert!(T::Currency::free_balance(&stash) >= balance); - let _ = >::bond( - T::Origin::from(Some(stash.clone()).into()), - T::Lookup::unlookup(controller.clone()), - balance, - RewardDestination::Staked - ); - let _ = match status { - StakerStatus::Validator => { - >::validate( - T::Origin::from(Some(controller.clone()).into()), - Default::default() - ) - }, StakerStatus::Nominator(votes) => { - >::nominate( - T::Origin::from(Some(controller.clone()).into()), - votes.iter().map(|l| {T::Lookup::unlookup(l.clone())}).collect() - ) - }, _ => Ok(()) - }; - } - - >::select_validators(); - }); - }); - } + trait Store for Module as Staking { + + /// The ideal number of staking participants. + pub ValidatorCount get(validator_count) config(): u32; + /// Minimum number of staking participants before emergency conditions are imposed. + pub MinimumValidatorCount get(minimum_validator_count) config(): u32 = DEFAULT_MINIMUM_VALIDATOR_COUNT; + /// The length of a staking era in sessions. + pub SessionsPerEra get(sessions_per_era) config(): T::BlockNumber = T::BlockNumber::sa(1000); + /// Maximum reward, per validator, that is provided per acceptable session. + pub SessionReward get(session_reward) config(): Perbill = Perbill::from_billionths(60); + /// Slash, per validator that is taken for the first time they are found to be offline. + pub OfflineSlash get(offline_slash) config(): Perbill = Perbill::from_millionths(1000); // Perbill::from_fraction() is only for std, so use from_millionths(). + /// Number of instances of offline reports before slashing begins for validators. + pub OfflineSlashGrace get(offline_slash_grace) config(): u32; + /// The length of the bonding duration in blocks. + pub BondingDuration get(bonding_duration) config(): T::BlockNumber = T::BlockNumber::sa(1000); + + /// Any validators that may never be slashed or forcibly kicked. It's a Vec since they're easy to initialize + /// and the performance hit is minimal (we expect no more than four invulnerables) and restricted to testnets. + pub Invulnerables get(invulnerables) config(): Vec; + + /// Map from all locked "stash" accounts to the controller account. + pub Bonded get(bonded): map T::AccountId => Option; + /// Map from all (unlocked) "controller" accounts to the info regarding the staking. + pub Ledger get(ledger): map T::AccountId => Option, T::BlockNumber>>; + + /// Where the reward payment should be made. Keyed by stash. + pub Payee get(payee): map T::AccountId => RewardDestination; + + /// The map from (wannabe) validator stash key to the preferences of that validator. + pub Validators get(validators): linked_map T::AccountId => ValidatorPrefs>; + + /// The map from nominator stash key to the set of stash keys of all validators to nominate. + pub Nominators get(nominators): linked_map T::AccountId => Vec; + + /// Nominators for a particular account that is in action right now. You can't iterate through validators here, + /// but you can find them in the `sessions` module. + /// + /// This is keyed by the stash account. + pub Stakers get(stakers): map T::AccountId => Exposure>; + + // The historical validators and their nominations for a given era. Stored as a trie root of the mapping + // `T::AccountId` => `Exposure>`, which is just the contents of `Stakers`, + // under a key that is the `era`. + // + // Every era change, this will be appended with the trie root of the contents of `Stakers`, and the oldest + // entry removed down to a specific number of entries (probably around 90 for a 3 month history). + // pub HistoricalStakers get(historical_stakers): map T::BlockNumber => Option; + + /// The currently elected validator set keyed by stash account ID. + pub CurrentElected get(current_elected): Vec; + + /// The current era index. + pub CurrentEra get(current_era) config(): T::BlockNumber; + + /// Maximum reward, per validator, that is provided per acceptable session. + pub CurrentSessionReward get(current_session_reward) config(): BalanceOf; + + /// The accumulated reward for the current era. Reset to zero at the beginning of the era and + /// increased for every successfully finished session. + pub CurrentEraReward get(current_era_reward): BalanceOf; + + /// The next value of sessions per era. + pub NextSessionsPerEra get(next_sessions_per_era): Option; + /// The session index at which the era length last changed. + pub LastEraLengthChange get(last_era_length_change): T::BlockNumber; + + /// The amount of balance actively at stake for each validator slot, currently. + /// + /// This is used to derive rewards and punishments. + pub SlotStake get(slot_stake) build(|config: &GenesisConfig| { + config.stakers.iter().map(|&(_, _, value, _)| value).min().unwrap_or_default() + }): BalanceOf; + + /// The number of times a given validator has been reported offline. This gets decremented by one each era that passes. + pub SlashCount get(slash_count): map T::AccountId => u32; + + /// We are forcing a new era. + pub ForcingNewEra get(forcing_new_era): Option<()>; + + /// Most recent `RECENT_OFFLINE_COUNT` instances. (who it was, when it was reported, how many instances they were offline for). + pub RecentlyOffline get(recently_offline): Vec<(T::AccountId, T::BlockNumber, u32)>; + } + add_extra_genesis { + config(stakers): Vec<(T::AccountId, T::AccountId, BalanceOf, StakerStatus)>; + build(|storage: &mut primitives::StorageOverlay, _: &mut primitives::ChildrenStorageOverlay, config: &GenesisConfig| { + with_storage(storage, || { + for &(ref stash, ref controller, balance, ref status) in &config.stakers { + assert!(T::Currency::free_balance(&stash) >= balance); + let _ = >::bond( + T::Origin::from(Some(stash.clone()).into()), + T::Lookup::unlookup(controller.clone()), + balance, + RewardDestination::Staked + ); + let _ = match status { + StakerStatus::Validator => { + >::validate( + T::Origin::from(Some(controller.clone()).into()), + Default::default() + ) + }, StakerStatus::Nominator(votes) => { + >::nominate( + T::Origin::from(Some(controller.clone()).into()), + votes.iter().map(|l| {T::Lookup::unlookup(l.clone())}).collect() + ) + }, _ => Ok(()) + }; + } + + >::select_validators(); + }); + }); + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - fn deposit_event() = default; - - /// Take the origin account as a stash and lock up `value` of its balance. `controller` will be the - /// account that controls it. - /// - /// The dispatch origin for this call must be _Signed_ by the stash account. - fn bond(origin, controller: ::Source, #[compact] value: BalanceOf, payee: RewardDestination) { - let stash = ensure_signed(origin)?; - - if >::exists(&stash) { - return Err("stash already bonded") - } - - let controller = T::Lookup::lookup(controller)?; - - if >::exists(&controller) { - return Err("controller already paired") - } - - // You're auto-bonded forever, here. We might improve this by only bonding when - // you actually validate/nominate. - >::insert(&stash, controller.clone()); - >::insert(&stash, payee); - - let stash_balance = T::Currency::free_balance(&stash); - let value = value.min(stash_balance); - Self::update_ledger(&controller, &StakingLedger { stash, total: value, active: value, unlocking: vec![] }); - } - - /// Add some extra amount that have appeared in the stash `free_balance` into the balance up for - /// staking. - /// - /// Use this if there are additional funds in your stash account that you wish to bond. - /// - /// The dispatch origin for this call must be _Signed_ by the stash, not the controller. - fn bond_extra(origin, #[compact] max_additional: BalanceOf) { - let stash = ensure_signed(origin)?; - - let controller = Self::bonded(&stash).ok_or("not a stash")?; - let mut ledger = Self::ledger(&controller).ok_or("not a controller")?; - - let stash_balance = T::Currency::free_balance(&stash); - - if let Some(extra) = stash_balance.checked_sub(&ledger.total) { - let extra = extra.min(max_additional); - ledger.total += extra; - ledger.active += extra; - Self::update_ledger(&controller, &ledger); - } - } - - /// Schedule a portion of the stash to be unlocked ready for transfer out after the bond - /// period ends. If this leaves an amount actively bonded less than - /// T::Currency::existential_deposit(), then it is increased to the full amount. - /// - /// Once the unlock period is done, you can call `withdraw_unbonded` to actually move - /// the funds out of management ready for transfer. - /// - /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. - /// - /// See also [`Call::withdraw_unbonded`]. - fn unbond(origin, #[compact] value: BalanceOf) { - let controller = ensure_signed(origin)?; - let mut ledger = Self::ledger(&controller).ok_or("not a controller")?; - - let mut value = value.min(ledger.active); - - if !value.is_zero() { - ledger.active -= value; - - // Avoid there being a dust balance left in the staking system. - if ledger.active < T::Currency::minimum_balance() { - value += ledger.active; - ledger.active = Zero::zero(); - } - - let era = Self::current_era() + Self::bonding_duration(); - ledger.unlocking.push(UnlockChunk { value, era }); - Self::update_ledger(&controller, &ledger); - } - } - - /// Remove any unlocked chunks from the `unlocking` queue from our management. - /// - /// This essentially frees up that balance to be used by the stash account to do - /// whatever it wants. - /// - /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. - /// - /// See also [`Call::unbond`]. - fn withdraw_unbonded(origin) { - let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or("not a controller")?; - let ledger = ledger.consolidate_unlocked(Self::current_era()); - Self::update_ledger(&controller, &ledger); - } - - /// Declare the desire to validate for the origin controller. - /// - /// Effects will be felt at the beginning of the next era. - /// - /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. - fn validate(origin, prefs: ValidatorPrefs>) { - let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or("not a controller")?; - let stash = &ledger.stash; - ensure!(prefs.unstake_threshold <= MAX_UNSTAKE_THRESHOLD, "unstake threshold too large"); - >::remove(stash); - >::insert(stash, prefs); - } - - /// Declare the desire to nominate `targets` for the origin controller. - /// - /// Effects will be felt at the beginning of the next era. - /// - /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. - fn nominate(origin, targets: Vec<::Source>) { - let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or("not a controller")?; - let stash = &ledger.stash; - ensure!(!targets.is_empty(), "targets cannot be empty"); - let targets = targets.into_iter() - .take(MAX_NOMINATIONS) - .map(T::Lookup::lookup) - .collect::, &'static str>>()?; - - >::remove(stash); - >::insert(stash, targets); - } - - /// Declare no desire to either validate or nominate. - /// - /// Effects will be felt at the beginning of the next era. - /// - /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. - fn chill(origin) { - let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or("not a controller")?; - let stash = &ledger.stash; - >::remove(stash); - >::remove(stash); - } - - /// (Re-)set the payment target for a controller. - /// - /// Effects will be felt at the beginning of the next era. - /// - /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. - fn set_payee(origin, payee: RewardDestination) { - let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or("not a controller")?; - let stash = &ledger.stash; - >::insert(stash, payee); - } - - /// (Re-)set the payment target for a controller. - /// - /// Effects will be felt at the beginning of the next era. - /// - /// The dispatch origin for this call must be _Signed_ by the stash, not the controller. - fn set_controller(origin, controller: ::Source) { - let stash = ensure_signed(origin)?; - let old_controller = Self::bonded(&stash).ok_or("not a stash")?; - let controller = T::Lookup::lookup(controller)?; - if >::exists(&controller) { - return Err("controller already paired") - } - if controller != old_controller { - >::insert(&stash, &controller); - if let Some(l) = >::take(&old_controller) { >::insert(&controller, l) }; - } - } - - /// Set the number of sessions in an era. - fn set_sessions_per_era(#[compact] new: T::BlockNumber) { - >::put(new); - } - - /// The length of the bonding duration in eras. - fn set_bonding_duration(#[compact] new: T::BlockNumber) { - >::put(new); - } - - /// The ideal number of validators. - fn set_validator_count(#[compact] new: u32) { - >::put(new); - } - - /// Force there to be a new era. This also forces a new session immediately after. - /// `apply_rewards` should be true for validators to get the session reward. - fn force_new_era(apply_rewards: bool) -> Result { - Self::apply_force_new_era(apply_rewards) - } - - /// Set the offline slash grace period. - fn set_offline_slash_grace(#[compact] new: u32) { - >::put(new); - } - - /// Set the validators who cannot be slashed (if any). - fn set_invulnerables(validators: Vec) { - >::put(validators); - } - } + pub struct Module for enum Call where origin: T::Origin { + fn deposit_event() = default; + + /// Take the origin account as a stash and lock up `value` of its balance. `controller` will be the + /// account that controls it. + /// + /// The dispatch origin for this call must be _Signed_ by the stash account. + fn bond(origin, controller: ::Source, #[compact] value: BalanceOf, payee: RewardDestination) { + let stash = ensure_signed(origin)?; + + if >::exists(&stash) { + return Err("stash already bonded") + } + + let controller = T::Lookup::lookup(controller)?; + + if >::exists(&controller) { + return Err("controller already paired") + } + + // You're auto-bonded forever, here. We might improve this by only bonding when + // you actually validate/nominate. + >::insert(&stash, controller.clone()); + >::insert(&stash, payee); + + let stash_balance = T::Currency::free_balance(&stash); + let value = value.min(stash_balance); + Self::update_ledger(&controller, &StakingLedger { stash, total: value, active: value, unlocking: vec![] }); + } + + /// Add some extra amount that have appeared in the stash `free_balance` into the balance up for + /// staking. + /// + /// Use this if there are additional funds in your stash account that you wish to bond. + /// + /// The dispatch origin for this call must be _Signed_ by the stash, not the controller. + fn bond_extra(origin, #[compact] max_additional: BalanceOf) { + let stash = ensure_signed(origin)?; + + let controller = Self::bonded(&stash).ok_or("not a stash")?; + let mut ledger = Self::ledger(&controller).ok_or("not a controller")?; + + let stash_balance = T::Currency::free_balance(&stash); + + if let Some(extra) = stash_balance.checked_sub(&ledger.total) { + let extra = extra.min(max_additional); + ledger.total += extra; + ledger.active += extra; + Self::update_ledger(&controller, &ledger); + } + } + + /// Schedule a portion of the stash to be unlocked ready for transfer out after the bond + /// period ends. If this leaves an amount actively bonded less than + /// T::Currency::existential_deposit(), then it is increased to the full amount. + /// + /// Once the unlock period is done, you can call `withdraw_unbonded` to actually move + /// the funds out of management ready for transfer. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// + /// See also [`Call::withdraw_unbonded`]. + fn unbond(origin, #[compact] value: BalanceOf) { + let controller = ensure_signed(origin)?; + let mut ledger = Self::ledger(&controller).ok_or("not a controller")?; + + let mut value = value.min(ledger.active); + + if !value.is_zero() { + ledger.active -= value; + + // Avoid there being a dust balance left in the staking system. + if ledger.active < T::Currency::minimum_balance() { + value += ledger.active; + ledger.active = Zero::zero(); + } + + let era = Self::current_era() + Self::bonding_duration(); + ledger.unlocking.push(UnlockChunk { value, era }); + Self::update_ledger(&controller, &ledger); + } + } + + /// Remove any unlocked chunks from the `unlocking` queue from our management. + /// + /// This essentially frees up that balance to be used by the stash account to do + /// whatever it wants. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// + /// See also [`Call::unbond`]. + fn withdraw_unbonded(origin) { + let controller = ensure_signed(origin)?; + let ledger = Self::ledger(&controller).ok_or("not a controller")?; + let ledger = ledger.consolidate_unlocked(Self::current_era()); + Self::update_ledger(&controller, &ledger); + } + + /// Declare the desire to validate for the origin controller. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + fn validate(origin, prefs: ValidatorPrefs>) { + let controller = ensure_signed(origin)?; + let ledger = Self::ledger(&controller).ok_or("not a controller")?; + let stash = &ledger.stash; + ensure!(prefs.unstake_threshold <= MAX_UNSTAKE_THRESHOLD, "unstake threshold too large"); + >::remove(stash); + >::insert(stash, prefs); + } + + /// Declare the desire to nominate `targets` for the origin controller. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + fn nominate(origin, targets: Vec<::Source>) { + let controller = ensure_signed(origin)?; + let ledger = Self::ledger(&controller).ok_or("not a controller")?; + let stash = &ledger.stash; + ensure!(!targets.is_empty(), "targets cannot be empty"); + let targets = targets.into_iter() + .take(MAX_NOMINATIONS) + .map(T::Lookup::lookup) + .collect::, &'static str>>()?; + + >::remove(stash); + >::insert(stash, targets); + } + + /// Declare no desire to either validate or nominate. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + fn chill(origin) { + let controller = ensure_signed(origin)?; + let ledger = Self::ledger(&controller).ok_or("not a controller")?; + let stash = &ledger.stash; + >::remove(stash); + >::remove(stash); + } + + /// (Re-)set the payment target for a controller. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + fn set_payee(origin, payee: RewardDestination) { + let controller = ensure_signed(origin)?; + let ledger = Self::ledger(&controller).ok_or("not a controller")?; + let stash = &ledger.stash; + >::insert(stash, payee); + } + + /// (Re-)set the payment target for a controller. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_ by the stash, not the controller. + fn set_controller(origin, controller: ::Source) { + let stash = ensure_signed(origin)?; + let old_controller = Self::bonded(&stash).ok_or("not a stash")?; + let controller = T::Lookup::lookup(controller)?; + if >::exists(&controller) { + return Err("controller already paired") + } + if controller != old_controller { + >::insert(&stash, &controller); + if let Some(l) = >::take(&old_controller) { >::insert(&controller, l) }; + } + } + + /// Set the number of sessions in an era. + fn set_sessions_per_era(#[compact] new: T::BlockNumber) { + >::put(new); + } + + /// The length of the bonding duration in eras. + fn set_bonding_duration(#[compact] new: T::BlockNumber) { + >::put(new); + } + + /// The ideal number of validators. + fn set_validator_count(#[compact] new: u32) { + >::put(new); + } + + /// Force there to be a new era. This also forces a new session immediately after. + /// `apply_rewards` should be true for validators to get the session reward. + fn force_new_era(apply_rewards: bool) -> Result { + Self::apply_force_new_era(apply_rewards) + } + + /// Set the offline slash grace period. + fn set_offline_slash_grace(#[compact] new: u32) { + >::put(new); + } + + /// Set the validators who cannot be slashed (if any). + fn set_invulnerables(validators: Vec) { + >::put(validators); + } + } } decl_event!( @@ -774,315 +782,342 @@ decl_event!( ); impl Module { - // Just force_new_era without origin check. - fn apply_force_new_era(apply_rewards: bool) -> Result { - >::put(()); - >::apply_force_new_session(apply_rewards) - } - - // PUBLIC IMMUTABLES - - /// The length of a staking era in blocks. - pub fn era_length() -> T::BlockNumber { - Self::sessions_per_era() * >::length() - } - - /// The total balance that can be slashed from a validator controller account as of - /// right now. - pub fn slashable_balance(who: &T::AccountId) -> BalanceOf { - Self::stakers(who).total - } - - // MUTABLES (DANGEROUS) - - /// Update the ledger for a controller. This will also update the stash lock. - fn update_ledger(controller: &T::AccountId, ledger: &StakingLedger, T::BlockNumber>) { - T::Currency::set_lock(STAKING_ID, &ledger.stash, ledger.total, T::BlockNumber::max_value(), WithdrawReasons::all()); - >::insert(controller, ledger); - } - - /// Slash a given validator by a specific amount. Removes the slash from their balance by preference, - /// and reduces the nominators' balance if needed. - fn slash_validator(stash: &T::AccountId, slash: BalanceOf) { - // The exposure (backing stake) information of the validator to be slashed. - let exposure = Self::stakers(stash); - // The amount we are actually going to slash (can't be bigger than their total exposure) - let slash = slash.min(exposure.total); - // The amount we'll slash from the validator's stash directly. - let own_slash = exposure.own.min(slash); - let (mut imbalance, missing) = T::Currency::slash(stash, own_slash); - let own_slash = own_slash - missing; - // The amount remaining that we can't slash from the validator, that must be taken from the nominators. - let rest_slash = slash - own_slash; - if !rest_slash.is_zero() { - // The total to be slashed from the nominators. - let total = exposure.total - exposure.own; - if !total.is_zero() { - let safe_mul_rational = |b| b * rest_slash / total;// FIXME #1572 avoid overflow - for i in exposure.others.iter() { - // best effort - not much that can be done on fail. - imbalance.subsume(T::Currency::slash(&i.who, safe_mul_rational(i.value)).0) - } - } - } - T::Slash::on_unbalanced(imbalance); - } - - /// Actually make a payment to a staker. This uses the currency's reward function - /// to pay the right payee for the given staker account. - fn make_payout(stash: &T::AccountId, amount: BalanceOf) -> Option> { - let dest = Self::payee(stash); - match dest { - RewardDestination::Controller => Self::bonded(stash) - .and_then(|controller| - T::Currency::deposit_into_existing(&controller, amount).ok() - ), - RewardDestination::Stash => - T::Currency::deposit_into_existing(stash, amount).ok(), - RewardDestination::Staked => Self::bonded(stash) - .and_then(|c| Self::ledger(&c).map(|l| (c, l))) - .and_then(|(controller, mut l)| { - l.active += amount; - l.total += amount; - let r = T::Currency::deposit_into_existing(stash, amount).ok(); - Self::update_ledger(&controller, &l); - r - }), - } - } - - /// Reward a given validator by a specific amount. Add the reward to their, and their nominators' - /// balance, pro-rata based on their exposure, after having removed the validator's pre-payout cut. - fn reward_validator(stash: &T::AccountId, reward: BalanceOf) { - let off_the_table = reward.min(Self::validators(stash).validator_payment); - let reward = reward - off_the_table; - let mut imbalance = >::zero(); - let validator_cut = if reward.is_zero() { - Zero::zero() - } else { - let exposure = Self::stakers(stash); - let total = exposure.total.max(One::one()); - let safe_mul_rational = |b| b * reward / total;// FIXME #1572: avoid overflow - for i in &exposure.others { - let nom_payout = safe_mul_rational(i.value); - imbalance.maybe_subsume(Self::make_payout(&i.who, nom_payout)); - } - safe_mul_rational(exposure.own) - }; - imbalance.maybe_subsume(Self::make_payout(stash, validator_cut + off_the_table)); - T::Reward::on_unbalanced(imbalance); - } - - /// Get the reward for the session, assuming it ends with this block. - fn this_session_reward(actual_elapsed: T::Moment) -> BalanceOf { - let ideal_elapsed = >::ideal_session_duration(); - if ideal_elapsed.is_zero() { - return Self::current_session_reward(); - } - let per65536: u64 = (T::Moment::sa(65536u64) * ideal_elapsed.clone() / actual_elapsed.max(ideal_elapsed)).as_(); - Self::current_session_reward() * >::sa(per65536) / >::sa(65536u64) - } - - /// Session has just changed. We need to determine whether we pay a reward, slash and/or - /// move to a new era. - fn new_session(actual_elapsed: T::Moment, should_reward: bool) { - if should_reward { - // accumulate good session reward - let reward = Self::this_session_reward(actual_elapsed); - >::mutate(|r| *r += reward); - } - - let session_index = >::current_index(); - if >::take().is_some() - || ((session_index - Self::last_era_length_change()) % Self::sessions_per_era()).is_zero() - { - Self::new_era(); - } - } - - /// The era has changed - enact new staking set. - /// - /// NOTE: This always happens immediately before a session change to ensure that new validators - /// get a chance to set their session keys. - fn new_era() { - // Payout - let reward = >::take(); - if !reward.is_zero() { - let validators = Self::current_elected(); - for v in validators.iter() { - Self::reward_validator(v, reward); - } - Self::deposit_event(RawEvent::Reward(reward)); - let total_minted = reward * as As>::sa(validators.len()); - let total_rewarded_stake = Self::slot_stake() * as As>::sa(validators.len()); - T::OnRewardMinted::on_dilution(total_minted, total_rewarded_stake); - } - - // Increment current era. - >::put(&(>::get() + One::one())); - - // Enact era length change. - if let Some(next_spe) = Self::next_sessions_per_era() { - if next_spe != Self::sessions_per_era() { - >::put(&next_spe); - >::put(&>::current_index()); - } - } - - // Reassign all Stakers. - let slot_stake = Self::select_validators(); - - // Update the balances for rewarding according to the stakes. - >::put(Self::session_reward() * slot_stake); - } - - fn slashable_balance_of(stash: &T::AccountId) -> BalanceOf { - Self::bonded(stash).and_then(Self::ledger).map(|l| l.total).unwrap_or_default() - } - - /// Select a new validator set from the assembled stakers and their role preferences. - /// - /// Returns the new SlotStake value. - fn select_validators() -> BalanceOf { - let rounds = || >::get() as usize; - let validators = || >::enumerate(); - let nominators = || >::enumerate(); - let min_validator_count = Self::minimum_validator_count() as usize; - let maybe_elected_candidates = elect::( - rounds, - validators, - nominators, - Self::slashable_balance_of, - min_validator_count, - ElectionConfig::> { - equalize: false, - tolerance: >::sa(10 as u64), - iterations: 10, - } - ); - - if let Some(elected_candidates) = maybe_elected_candidates { - // Clear Stakers and reduce their slash_count. - for v in Self::current_elected().iter() { - >::remove(v); - let slash_count = >::take(v); - if slash_count > 1 { - >::insert(v, slash_count - 1); - } - } - - // Populate Stakers and figure out the minimum stake behind a slot. - let mut slot_stake = elected_candidates[0].exposure.total; - for c in &elected_candidates { - if c.exposure.total < slot_stake { - slot_stake = c.exposure.total; - } - >::insert(c.who.clone(), c.exposure.clone()); - } - >::put(&slot_stake); - - // Set the new validator set. - let elected_stashes = elected_candidates.into_iter().map(|i| i.who).collect::>(); - >::put(&elected_stashes); - >::set_validators( - &elected_stashes.into_iter().map(|s| Self::bonded(s).unwrap_or_default()).collect::>() - ); - - slot_stake - } else { - // There were not enough candidates for even our minimal level of functionality. - // This is bad. - // We should probably disable all functionality except for block production - // and let the chain keep producing blocks until we can decide on a sufficiently - // substantial set. - Self::slot_stake() - } - } - - /// Call when a validator is determined to be offline. `count` is the - /// number of offenses the validator has committed. - /// - /// NOTE: This is called with the controller (not the stash) account id. - pub fn on_offline_validator(controller: T::AccountId, count: usize) { - use primitives::traits::CheckedShl; - - if let Some(l) = Self::ledger(&controller) { - let stash = l.stash; - - // Early exit if validator is invulnerable. - if Self::invulnerables().contains(&stash) { - return - } - - let slash_count = Self::slash_count(&stash); - let new_slash_count = slash_count + count as u32; - >::insert(&stash, new_slash_count); - let grace = Self::offline_slash_grace(); - - if RECENT_OFFLINE_COUNT > 0 { - let item = (stash.clone(), >::block_number(), count as u32); - >::mutate(|v| if v.len() >= RECENT_OFFLINE_COUNT { - let index = v.iter() - .enumerate() - .min_by_key(|(_, (_, block, _))| block) - .expect("v is non-empty; qed") - .0; - v[index] = item; - } else { - v.push(item); - }); - } - - let prefs = Self::validators(&stash); - let unstake_threshold = prefs.unstake_threshold.min(MAX_UNSTAKE_THRESHOLD); - let max_slashes = grace + unstake_threshold; - - let event = if new_slash_count > max_slashes { - let slash_exposure = Self::stakers(&stash).total; - let offline_slash_base = Self::offline_slash() * slash_exposure; - // They're bailing. - let slash = offline_slash_base - // Multiply slash_mantissa by 2^(unstake_threshold with upper bound) - .checked_shl(unstake_threshold) - .map(|x| x.min(slash_exposure)) - .unwrap_or(slash_exposure); - let _ = Self::slash_validator(&stash, slash); - >::remove(&stash); - let _ = Self::apply_force_new_era(false); - - RawEvent::OfflineSlash(stash.clone(), slash) - } else { - RawEvent::OfflineWarning(stash.clone(), slash_count) - }; - - Self::deposit_event(event); - } - } + // Just force_new_era without origin check. + fn apply_force_new_era(apply_rewards: bool) -> Result { + >::put(()); + >::apply_force_new_session(apply_rewards) + } + + // PUBLIC IMMUTABLES + + /// The length of a staking era in blocks. + pub fn era_length() -> T::BlockNumber { + Self::sessions_per_era() * >::length() + } + + /// The total balance that can be slashed from a validator controller account as of + /// right now. + pub fn slashable_balance(who: &T::AccountId) -> BalanceOf { + Self::stakers(who).total + } + + // MUTABLES (DANGEROUS) + + /// Update the ledger for a controller. This will also update the stash lock. + fn update_ledger( + controller: &T::AccountId, + ledger: &StakingLedger, T::BlockNumber>, + ) { + T::Currency::set_lock( + STAKING_ID, + &ledger.stash, + ledger.total, + T::BlockNumber::max_value(), + WithdrawReasons::all(), + ); + >::insert(controller, ledger); + } + + /// Slash a given validator by a specific amount. Removes the slash from their balance by preference, + /// and reduces the nominators' balance if needed. + fn slash_validator(stash: &T::AccountId, slash: BalanceOf) { + // The exposure (backing stake) information of the validator to be slashed. + let exposure = Self::stakers(stash); + // The amount we are actually going to slash (can't be bigger than their total exposure) + let slash = slash.min(exposure.total); + // The amount we'll slash from the validator's stash directly. + let own_slash = exposure.own.min(slash); + let (mut imbalance, missing) = T::Currency::slash(stash, own_slash); + let own_slash = own_slash - missing; + // The amount remaining that we can't slash from the validator, that must be taken from the nominators. + let rest_slash = slash - own_slash; + if !rest_slash.is_zero() { + // The total to be slashed from the nominators. + let total = exposure.total - exposure.own; + if !total.is_zero() { + let safe_mul_rational = |b| b * rest_slash / total; // FIXME #1572 avoid overflow + for i in exposure.others.iter() { + // best effort - not much that can be done on fail. + imbalance.subsume(T::Currency::slash(&i.who, safe_mul_rational(i.value)).0) + } + } + } + T::Slash::on_unbalanced(imbalance); + } + + /// Actually make a payment to a staker. This uses the currency's reward function + /// to pay the right payee for the given staker account. + fn make_payout(stash: &T::AccountId, amount: BalanceOf) -> Option> { + let dest = Self::payee(stash); + match dest { + RewardDestination::Controller => Self::bonded(stash).and_then(|controller| { + T::Currency::deposit_into_existing(&controller, amount).ok() + }), + RewardDestination::Stash => T::Currency::deposit_into_existing(stash, amount).ok(), + RewardDestination::Staked => Self::bonded(stash) + .and_then(|c| Self::ledger(&c).map(|l| (c, l))) + .and_then(|(controller, mut l)| { + l.active += amount; + l.total += amount; + let r = T::Currency::deposit_into_existing(stash, amount).ok(); + Self::update_ledger(&controller, &l); + r + }), + } + } + + /// Reward a given validator by a specific amount. Add the reward to their, and their nominators' + /// balance, pro-rata based on their exposure, after having removed the validator's pre-payout cut. + fn reward_validator(stash: &T::AccountId, reward: BalanceOf) { + let off_the_table = reward.min(Self::validators(stash).validator_payment); + let reward = reward - off_the_table; + let mut imbalance = >::zero(); + let validator_cut = if reward.is_zero() { + Zero::zero() + } else { + let exposure = Self::stakers(stash); + let total = exposure.total.max(One::one()); + let safe_mul_rational = |b| b * reward / total; // FIXME #1572: avoid overflow + for i in &exposure.others { + let nom_payout = safe_mul_rational(i.value); + imbalance.maybe_subsume(Self::make_payout(&i.who, nom_payout)); + } + safe_mul_rational(exposure.own) + }; + imbalance.maybe_subsume(Self::make_payout(stash, validator_cut + off_the_table)); + T::Reward::on_unbalanced(imbalance); + } + + /// Get the reward for the session, assuming it ends with this block. + fn this_session_reward(actual_elapsed: T::Moment) -> BalanceOf { + let ideal_elapsed = >::ideal_session_duration(); + if ideal_elapsed.is_zero() { + return Self::current_session_reward(); + } + let per65536: u64 = (T::Moment::sa(65536u64) * ideal_elapsed.clone() + / actual_elapsed.max(ideal_elapsed)) + .as_(); + Self::current_session_reward() * >::sa(per65536) / >::sa(65536u64) + } + + /// Session has just changed. We need to determine whether we pay a reward, slash and/or + /// move to a new era. + fn new_session(actual_elapsed: T::Moment, should_reward: bool) { + if should_reward { + // accumulate good session reward + let reward = Self::this_session_reward(actual_elapsed); + >::mutate(|r| *r += reward); + } + + let session_index = >::current_index(); + if >::take().is_some() + || ((session_index - Self::last_era_length_change()) % Self::sessions_per_era()) + .is_zero() + { + Self::new_era(); + } + } + + /// The era has changed - enact new staking set. + /// + /// NOTE: This always happens immediately before a session change to ensure that new validators + /// get a chance to set their session keys. + fn new_era() { + // Payout + let reward = >::take(); + if !reward.is_zero() { + let validators = Self::current_elected(); + for v in validators.iter() { + Self::reward_validator(v, reward); + } + Self::deposit_event(RawEvent::Reward(reward)); + let total_minted = reward * as As>::sa(validators.len()); + let total_rewarded_stake = + Self::slot_stake() * as As>::sa(validators.len()); + T::OnRewardMinted::on_dilution(total_minted, total_rewarded_stake); + } + + // Increment current era. + >::put(&(>::get() + One::one())); + + // Enact era length change. + if let Some(next_spe) = Self::next_sessions_per_era() { + if next_spe != Self::sessions_per_era() { + >::put(&next_spe); + >::put(&>::current_index()); + } + } + + // Reassign all Stakers. + let slot_stake = Self::select_validators(); + + // Update the balances for rewarding according to the stakes. + >::put(Self::session_reward() * slot_stake); + } + + fn slashable_balance_of(stash: &T::AccountId) -> BalanceOf { + Self::bonded(stash) + .and_then(Self::ledger) + .map(|l| l.total) + .unwrap_or_default() + } + + /// Select a new validator set from the assembled stakers and their role preferences. + /// + /// Returns the new SlotStake value. + fn select_validators() -> BalanceOf { + let rounds = || >::get() as usize; + let validators = || >::enumerate(); + let nominators = || >::enumerate(); + let min_validator_count = Self::minimum_validator_count() as usize; + let maybe_elected_candidates = elect::( + rounds, + validators, + nominators, + Self::slashable_balance_of, + min_validator_count, + ElectionConfig::> { + equalize: false, + tolerance: >::sa(10 as u64), + iterations: 10, + }, + ); + + if let Some(elected_candidates) = maybe_elected_candidates { + // Clear Stakers and reduce their slash_count. + for v in Self::current_elected().iter() { + >::remove(v); + let slash_count = >::take(v); + if slash_count > 1 { + >::insert(v, slash_count - 1); + } + } + + // Populate Stakers and figure out the minimum stake behind a slot. + let mut slot_stake = elected_candidates[0].exposure.total; + for c in &elected_candidates { + if c.exposure.total < slot_stake { + slot_stake = c.exposure.total; + } + >::insert(c.who.clone(), c.exposure.clone()); + } + >::put(&slot_stake); + + // Set the new validator set. + let elected_stashes = elected_candidates + .into_iter() + .map(|i| i.who) + .collect::>(); + >::put(&elected_stashes); + >::set_validators( + &elected_stashes + .into_iter() + .map(|s| Self::bonded(s).unwrap_or_default()) + .collect::>(), + ); + + slot_stake + } else { + // There were not enough candidates for even our minimal level of functionality. + // This is bad. + // We should probably disable all functionality except for block production + // and let the chain keep producing blocks until we can decide on a sufficiently + // substantial set. + Self::slot_stake() + } + } + + /// Call when a validator is determined to be offline. `count` is the + /// number of offenses the validator has committed. + /// + /// NOTE: This is called with the controller (not the stash) account id. + pub fn on_offline_validator(controller: T::AccountId, count: usize) { + use primitives::traits::CheckedShl; + + if let Some(l) = Self::ledger(&controller) { + let stash = l.stash; + + // Early exit if validator is invulnerable. + if Self::invulnerables().contains(&stash) { + return; + } + + let slash_count = Self::slash_count(&stash); + let new_slash_count = slash_count + count as u32; + >::insert(&stash, new_slash_count); + let grace = Self::offline_slash_grace(); + + if RECENT_OFFLINE_COUNT > 0 { + let item = ( + stash.clone(), + >::block_number(), + count as u32, + ); + >::mutate(|v| { + if v.len() >= RECENT_OFFLINE_COUNT { + let index = v + .iter() + .enumerate() + .min_by_key(|(_, (_, block, _))| block) + .expect("v is non-empty; qed") + .0; + v[index] = item; + } else { + v.push(item); + } + }); + } + + let prefs = Self::validators(&stash); + let unstake_threshold = prefs.unstake_threshold.min(MAX_UNSTAKE_THRESHOLD); + let max_slashes = grace + unstake_threshold; + + let event = if new_slash_count > max_slashes { + let slash_exposure = Self::stakers(&stash).total; + let offline_slash_base = Self::offline_slash() * slash_exposure; + // They're bailing. + let slash = offline_slash_base + // Multiply slash_mantissa by 2^(unstake_threshold with upper bound) + .checked_shl(unstake_threshold) + .map(|x| x.min(slash_exposure)) + .unwrap_or(slash_exposure); + let _ = Self::slash_validator(&stash, slash); + >::remove(&stash); + let _ = Self::apply_force_new_era(false); + + RawEvent::OfflineSlash(stash.clone(), slash) + } else { + RawEvent::OfflineWarning(stash.clone(), slash_count) + }; + + Self::deposit_event(event); + } + } } impl OnSessionChange for Module { - fn on_session_change(elapsed: T::Moment, should_reward: bool) { - Self::new_session(elapsed, should_reward); - } + fn on_session_change(elapsed: T::Moment, should_reward: bool) { + Self::new_session(elapsed, should_reward); + } } impl OnFreeBalanceZero for Module { - fn on_free_balance_zero(stash: &T::AccountId) { - if let Some(controller) = >::take(stash) { - >::remove(&controller); - } - >::remove(stash); - >::remove(stash); - >::remove(stash); - >::remove(stash); - } + fn on_free_balance_zero(stash: &T::AccountId) { + if let Some(controller) = >::take(stash) { + >::remove(&controller); + } + >::remove(stash); + >::remove(stash); + >::remove(stash); + >::remove(stash); + } } impl consensus::OnOfflineReport> for Module { - fn handle_report(reported_indices: Vec) { - for validator_index in reported_indices { - let v = >::validators()[validator_index as usize].clone(); - Self::on_offline_validator(v, 1); - } - } + fn handle_report(reported_indices: Vec) { + for validator_index in reported_indices { + let v = >::validators()[validator_index as usize].clone(); + Self::on_offline_validator(v, 1); + } + } } diff --git a/srml/staking/src/mock.rs b/srml/staking/src/mock.rs index 17723cb362..d3c10a150a 100644 --- a/srml/staking/src/mock.rs +++ b/srml/staking/src/mock.rs @@ -18,12 +18,15 @@ #![cfg(test)] -use primitives::{traits::{IdentityLookup, Convert}, BuildStorage, Perbill}; -use primitives::testing::{Digest, DigestItem, Header, UintAuthorityId, ConvertUintAuthorityId}; -use substrate_primitives::{H256, Blake2Hasher}; +use crate::{GenesisConfig, Module, StakerStatus, Trait}; +use primitives::testing::{ConvertUintAuthorityId, Digest, DigestItem, Header, UintAuthorityId}; +use primitives::{ + traits::{Convert, IdentityLookup}, + BuildStorage, Perbill, +}; use runtime_io; use srml_support::impl_outer_origin; -use crate::{GenesisConfig, Module, Trait, StakerStatus}; +use substrate_primitives::{Blake2Hasher, H256}; /// The AccountId alias in this test module. pub type AccountIdType = u64; @@ -31,208 +34,273 @@ pub type AccountIdType = u64; /// Simple structure that exposes how u64 currency can be represented as... u64. pub struct CurrencyToVoteHandler; impl Convert for CurrencyToVoteHandler { - fn convert(x: u64) -> u64 { x } + fn convert(x: u64) -> u64 { + x + } } impl Convert for CurrencyToVoteHandler { - fn convert(x: u128) -> u64 { - x as u64 - } + fn convert(x: u128) -> u64 { + x as u64 + } } -impl_outer_origin!{ - pub enum Origin for Test {} +impl_outer_origin! { + pub enum Origin for Test {} } // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. #[derive(Clone, PartialEq, Eq, Debug)] pub struct Test; impl consensus::Trait for Test { - type Log = DigestItem; - type SessionKey = UintAuthorityId; - type InherentOfflineReport = (); + type Log = DigestItem; + type SessionKey = UintAuthorityId; + type InherentOfflineReport = (); } impl system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = ::primitives::traits::BlakeTwo256; - type Digest = Digest; - type AccountId = AccountIdType; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type Log = DigestItem; + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = ::primitives::traits::BlakeTwo256; + type Digest = Digest; + type AccountId = AccountIdType; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type Log = DigestItem; } impl balances::Trait for Test { - type Balance = u64; - type OnFreeBalanceZero = Staking; - type OnNewAccount = (); - type Event = (); - type TransactionPayment = (); - type TransferPayment = (); - type DustRemoval = (); + type Balance = u64; + type OnFreeBalanceZero = Staking; + type OnNewAccount = (); + type Event = (); + type TransactionPayment = (); + type TransferPayment = (); + type DustRemoval = (); } impl session::Trait for Test { - type ConvertAccountIdToSessionKey = ConvertUintAuthorityId; - type OnSessionChange = Staking; - type Event = (); + type ConvertAccountIdToSessionKey = ConvertUintAuthorityId; + type OnSessionChange = Staking; + type Event = (); } impl timestamp::Trait for Test { - type Moment = u64; - type OnTimestampSet = (); + type Moment = u64; + type OnTimestampSet = (); } impl Trait for Test { - type Currency = balances::Module; - type CurrencyToVote = CurrencyToVoteHandler; - type OnRewardMinted = (); - type Event = (); - type Slash = (); - type Reward = (); + type Currency = balances::Module; + type CurrencyToVote = CurrencyToVoteHandler; + type OnRewardMinted = (); + type Event = (); + type Slash = (); + type Reward = (); } pub struct ExtBuilder { - existential_deposit: u64, - session_length: u64, - sessions_per_era: u64, - current_era: u64, - reward: u64, - validator_pool: bool, - nominate: bool, - validator_count: u32, - minimum_validator_count: u32, - fare: bool, + existential_deposit: u64, + session_length: u64, + sessions_per_era: u64, + current_era: u64, + reward: u64, + validator_pool: bool, + nominate: bool, + validator_count: u32, + minimum_validator_count: u32, + fare: bool, } impl Default for ExtBuilder { - fn default() -> Self { - Self { - existential_deposit: 0, - session_length: 1, - sessions_per_era: 1, - current_era: 0, - reward: 10, - validator_pool: false, - nominate: true, - validator_count: 2, - minimum_validator_count: 0, - fare: true - } - } + fn default() -> Self { + Self { + existential_deposit: 0, + session_length: 1, + sessions_per_era: 1, + current_era: 0, + reward: 10, + validator_pool: false, + nominate: true, + validator_count: 2, + minimum_validator_count: 0, + fare: true, + } + } } impl ExtBuilder { - pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { - self.existential_deposit = existential_deposit; - self - } - pub fn session_length(mut self, session_length: u64) -> Self { - self.session_length = session_length; - self - } - pub fn sessions_per_era(mut self, sessions_per_era: u64) -> Self { - self.sessions_per_era = sessions_per_era; - self - } - pub fn _current_era(mut self, current_era: u64) -> Self { - self.current_era = current_era; - self - } - pub fn validator_pool(mut self, validator_pool: bool) -> Self { - self.validator_pool = validator_pool; - self - } - pub fn nominate(mut self, nominate: bool) -> Self { - self.nominate = nominate; - self - } - pub fn validator_count(mut self, count: u32) -> Self { - self.validator_count = count; - self - } - pub fn minimum_validator_count(mut self, count: u32) -> Self { - self.minimum_validator_count = count; - self - } - pub fn fare(mut self, is_fare: bool) -> Self { - self.fare = is_fare; - self - } - pub fn build(self) -> runtime_io::TestExternalities { - let (mut t, mut c) = system::GenesisConfig::::default().build_storage().unwrap(); - let balance_factor = if self.existential_deposit > 0 { - 256 - } else { - 1 - }; - let _ = consensus::GenesisConfig::{ - code: vec![], - authorities: vec![], - }.assimilate_storage(&mut t, &mut c); - let _ = session::GenesisConfig::{ - session_length: self.session_length, - // NOTE: if config.nominate == false then 100 is also selected in the initial round. - validators: if self.validator_pool { vec![10, 20, 30, 40] } else { vec![10, 20] }, - keys: vec![], - }.assimilate_storage(&mut t, &mut c); - let _ = balances::GenesisConfig::{ - balances: vec![ - (1, 10 * balance_factor), - (2, 20 * balance_factor), - (3, 300 * balance_factor), - (4, 400 * balance_factor), - (10, balance_factor), - (11, balance_factor * 1000), - (20, balance_factor), - (21, balance_factor * 2000), - (30, balance_factor), - (31, balance_factor * 2000), - (40, balance_factor), - (41, balance_factor * 2000), - (100, 2000 * balance_factor), - (101, 2000 * balance_factor), - ], - transaction_base_fee: 0, - transaction_byte_fee: 0, - existential_deposit: self.existential_deposit, - transfer_fee: 0, - creation_fee: 0, - vesting: vec![], - }.assimilate_storage(&mut t, &mut c); - let _ = GenesisConfig::{ - sessions_per_era: self.sessions_per_era, - current_era: self.current_era, - stakers: if self.validator_pool { - vec![ - (11, 10, balance_factor * 1000, StakerStatus::::Validator), - (21, 20, balance_factor * if self.fare { 1000 } else { 2000 }, StakerStatus::::Validator), - (31, 30, balance_factor * 1000, if self.validator_pool { StakerStatus::::Validator } else { StakerStatus::::Idle }), - (41, 40, balance_factor * 1000, if self.validator_pool { StakerStatus::::Validator } else { StakerStatus::::Idle }), - // nominator - (101, 100, balance_factor * 500, if self.nominate { StakerStatus::::Nominator(vec![11, 21]) } else { StakerStatus::::Nominator(vec![]) }) - ] - } else { - vec![ - (11, 10, balance_factor * 1000, StakerStatus::::Validator), - (21, 20, balance_factor * if self.fare { 1000 } else { 2000 }, StakerStatus::::Validator), - // nominator - (101, 100, balance_factor * 500, if self.nominate { StakerStatus::::Nominator(vec![11, 21]) } else { StakerStatus::::Nominator(vec![]) }) - ] - }, - validator_count: self.validator_count, - minimum_validator_count: self.minimum_validator_count, - bonding_duration: self.sessions_per_era * self.session_length * 3, - session_reward: Perbill::from_millionths((1000000 * self.reward / balance_factor) as u32), - offline_slash: Perbill::from_percent(5), - current_session_reward: self.reward, - offline_slash_grace: 0, - invulnerables: vec![], - }.assimilate_storage(&mut t, &mut c); - let _ = timestamp::GenesisConfig::{ - minimum_period: 5, - }.assimilate_storage(&mut t, &mut c); - t.into() - } + pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { + self.existential_deposit = existential_deposit; + self + } + pub fn session_length(mut self, session_length: u64) -> Self { + self.session_length = session_length; + self + } + pub fn sessions_per_era(mut self, sessions_per_era: u64) -> Self { + self.sessions_per_era = sessions_per_era; + self + } + pub fn _current_era(mut self, current_era: u64) -> Self { + self.current_era = current_era; + self + } + pub fn validator_pool(mut self, validator_pool: bool) -> Self { + self.validator_pool = validator_pool; + self + } + pub fn nominate(mut self, nominate: bool) -> Self { + self.nominate = nominate; + self + } + pub fn validator_count(mut self, count: u32) -> Self { + self.validator_count = count; + self + } + pub fn minimum_validator_count(mut self, count: u32) -> Self { + self.minimum_validator_count = count; + self + } + pub fn fare(mut self, is_fare: bool) -> Self { + self.fare = is_fare; + self + } + pub fn build(self) -> runtime_io::TestExternalities { + let (mut t, mut c) = system::GenesisConfig::::default() + .build_storage() + .unwrap(); + let balance_factor = if self.existential_deposit > 0 { 256 } else { 1 }; + let _ = consensus::GenesisConfig:: { + code: vec![], + authorities: vec![], + } + .assimilate_storage(&mut t, &mut c); + let _ = session::GenesisConfig:: { + session_length: self.session_length, + // NOTE: if config.nominate == false then 100 is also selected in the initial round. + validators: if self.validator_pool { + vec![10, 20, 30, 40] + } else { + vec![10, 20] + }, + keys: vec![], + } + .assimilate_storage(&mut t, &mut c); + let _ = balances::GenesisConfig:: { + balances: vec![ + (1, 10 * balance_factor), + (2, 20 * balance_factor), + (3, 300 * balance_factor), + (4, 400 * balance_factor), + (10, balance_factor), + (11, balance_factor * 1000), + (20, balance_factor), + (21, balance_factor * 2000), + (30, balance_factor), + (31, balance_factor * 2000), + (40, balance_factor), + (41, balance_factor * 2000), + (100, 2000 * balance_factor), + (101, 2000 * balance_factor), + ], + transaction_base_fee: 0, + transaction_byte_fee: 0, + existential_deposit: self.existential_deposit, + transfer_fee: 0, + creation_fee: 0, + vesting: vec![], + } + .assimilate_storage(&mut t, &mut c); + let _ = GenesisConfig:: { + sessions_per_era: self.sessions_per_era, + current_era: self.current_era, + stakers: if self.validator_pool { + vec![ + ( + 11, + 10, + balance_factor * 1000, + StakerStatus::::Validator, + ), + ( + 21, + 20, + balance_factor * if self.fare { 1000 } else { 2000 }, + StakerStatus::::Validator, + ), + ( + 31, + 30, + balance_factor * 1000, + if self.validator_pool { + StakerStatus::::Validator + } else { + StakerStatus::::Idle + }, + ), + ( + 41, + 40, + balance_factor * 1000, + if self.validator_pool { + StakerStatus::::Validator + } else { + StakerStatus::::Idle + }, + ), + // nominator + ( + 101, + 100, + balance_factor * 500, + if self.nominate { + StakerStatus::::Nominator(vec![11, 21]) + } else { + StakerStatus::::Nominator(vec![]) + }, + ), + ] + } else { + vec![ + ( + 11, + 10, + balance_factor * 1000, + StakerStatus::::Validator, + ), + ( + 21, + 20, + balance_factor * if self.fare { 1000 } else { 2000 }, + StakerStatus::::Validator, + ), + // nominator + ( + 101, + 100, + balance_factor * 500, + if self.nominate { + StakerStatus::::Nominator(vec![11, 21]) + } else { + StakerStatus::::Nominator(vec![]) + }, + ), + ] + }, + validator_count: self.validator_count, + minimum_validator_count: self.minimum_validator_count, + bonding_duration: self.sessions_per_era * self.session_length * 3, + session_reward: Perbill::from_millionths( + (1000000 * self.reward / balance_factor) as u32, + ), + offline_slash: Perbill::from_percent(5), + current_session_reward: self.reward, + offline_slash_grace: 0, + invulnerables: vec![], + } + .assimilate_storage(&mut t, &mut c); + let _ = timestamp::GenesisConfig:: { minimum_period: 5 } + .assimilate_storage(&mut t, &mut c); + t.into() + } } pub type System = system::Module; diff --git a/srml/staking/src/phragmen.rs b/srml/staking/src/phragmen.rs index bb939baa79..5db4621600 100644 --- a/srml/staking/src/phragmen.rs +++ b/srml/staking/src/phragmen.rs @@ -16,11 +16,11 @@ //! Rust implementation of the Phragmén election algorithm. -use rstd::prelude::*; +use crate::{BalanceOf, Exposure, IndividualExposure, Trait, ValidatorPrefs}; +use parity_codec::{Decode, Encode, HasCompact}; +use primitives::traits::{Convert, Saturating, Zero}; use primitives::PerU128; -use primitives::traits::{Zero, Saturating, Convert}; -use parity_codec::{HasCompact, Encode, Decode}; -use crate::{Exposure, BalanceOf, Trait, ValidatorPrefs, IndividualExposure}; +use rstd::prelude::*; type Fraction = PerU128; type ExtendedBalance = u128; @@ -28,62 +28,62 @@ type ExtendedBalance = u128; /// Configure the behavior of the Phragmen election. /// Might be deprecated. pub struct ElectionConfig { - /// Perform equalize?. - pub equalize: bool, - /// Number of equalize iterations. - pub iterations: usize, - /// Tolerance of max change per equalize iteration. - pub tolerance: Balance, + /// Perform equalize?. + pub equalize: bool, + /// Number of equalize iterations. + pub iterations: usize, + /// Tolerance of max change per equalize iteration. + pub tolerance: Balance, } /// Wrapper around validation candidates some metadata. #[derive(Clone, Encode, Decode, Default)] #[cfg_attr(feature = "std", derive(Debug))] pub struct Candidate { - /// The validator's account - pub who: AccountId, - /// Exposure struct, holding info about the value that the validator has in stake. - pub exposure: Exposure, - /// Intermediary value used to sort candidates. - pub score: Fraction, - /// Accumulator of the stake of this candidate based on received votes. - approval_stake: ExtendedBalance, - /// Flag for being elected. - elected: bool, - /// This is most often equal to `Exposure.total` but not always. Needed for [`equalize`] - backing_stake: ExtendedBalance + /// The validator's account + pub who: AccountId, + /// Exposure struct, holding info about the value that the validator has in stake. + pub exposure: Exposure, + /// Intermediary value used to sort candidates. + pub score: Fraction, + /// Accumulator of the stake of this candidate based on received votes. + approval_stake: ExtendedBalance, + /// Flag for being elected. + elected: bool, + /// This is most often equal to `Exposure.total` but not always. Needed for [`equalize`] + backing_stake: ExtendedBalance, } /// Wrapper around the nomination info of a single nominator for a group of validators. #[derive(Clone, Encode, Decode, Default)] #[cfg_attr(feature = "std", derive(Debug))] pub struct Nominator { - /// The nominator's account. - who: AccountId, - /// List of validators proposed by this nominator. - edges: Vec>, - /// the stake amount proposed by the nominator as a part of the vote. - budget: ExtendedBalance, - /// Incremented each time a nominee that this nominator voted for has been elected. - load: Fraction, + /// The nominator's account. + who: AccountId, + /// List of validators proposed by this nominator. + edges: Vec>, + /// the stake amount proposed by the nominator as a part of the vote. + budget: ExtendedBalance, + /// Incremented each time a nominee that this nominator voted for has been elected. + load: Fraction, } /// Wrapper around a nominator vote and the load of that vote. #[derive(Clone, Encode, Decode, Default)] #[cfg_attr(feature = "std", derive(Debug))] pub struct Edge { - /// Account being voted for - who: AccountId, - /// Load of this vote. - load: Fraction, - /// Final backing stake of this vote. - backing_stake: ExtendedBalance, - /// Index of the candidate stored in the 'candidates' vector - candidate_index: usize, - /// Index of the candidate stored in the 'elected_candidates' vector. Used only with equalize. - elected_idx: usize, - /// Indicates if this edge is a vote for an elected candidate. Used only with equalize. - elected: bool, + /// Account being voted for + who: AccountId, + /// Load of this vote. + load: Fraction, + /// Final backing stake of this vote. + backing_stake: ExtendedBalance, + /// Index of the candidate stored in the 'candidates' vector + candidate_index: usize, + /// Index of the candidate stored in the 'elected_candidates' vector. Used only with equalize. + elected_idx: usize, + /// Indicates if this edge is a vote for an elected candidate. Used only with equalize. + elected: bool, } /// Perform election based on Phragmén algorithm. @@ -93,187 +93,209 @@ pub struct Edge { /// Returns an Option of elected candidates, if election is performed. /// Returns None if not enough candidates exist. pub fn elect( - get_rounds: FR, - get_validators: FV, - get_nominators: FN, - stash_of: FS, - minimum_validator_count: usize, - config: ElectionConfig>, -) -> Option>>> where - FR: Fn() -> usize, - FV: Fn() -> Box>) - >>, - FN: Fn() -> Box) - >>, - for <'r> FS: Fn(&'r T::AccountId) -> BalanceOf, + get_rounds: FR, + get_validators: FV, + get_nominators: FN, + stash_of: FS, + minimum_validator_count: usize, + config: ElectionConfig>, +) -> Option>>> +where + FR: Fn() -> usize, + FV: Fn() -> Box>)>>, + FN: Fn() -> Box)>>, + for<'r> FS: Fn(&'r T::AccountId) -> BalanceOf, { - let expand = |b: BalanceOf| , u64>>::convert(b) as ExtendedBalance; - let shrink = |b: ExtendedBalance| >>::convert(b); - let rounds = get_rounds(); - let mut elected_candidates; - - // 1- Pre-process candidates and place them in a container - let mut candidates = get_validators().map(|(who, _)| { - let stash_balance = stash_of(&who); - Candidate { - who, - exposure: Exposure { total: stash_balance, own: stash_balance, others: vec![] }, - ..Default::default() - } - }).collect::>>>(); - - // 1.1- Add phantom votes. - let mut nominators: Vec> = Vec::with_capacity(candidates.len()); - candidates.iter_mut().enumerate().for_each(|(idx, c)| { - c.approval_stake += expand(c.exposure.total); - nominators.push(Nominator { - who: c.who.clone(), - edges: vec![ Edge { who: c.who.clone(), candidate_index: idx, ..Default::default() }], - budget: expand(c.exposure.total), - load: Fraction::zero(), - }) - }); - - // 2- Collect the nominators with the associated votes. - // Also collect approval stake along the way. - nominators.extend(get_nominators().map(|(who, nominees)| { - let nominator_stake = stash_of(&who); - let mut edges: Vec> = Vec::with_capacity(nominees.len()); - for n in &nominees { - if let Some(idx) = candidates.iter_mut().position(|i| i.who == *n) { - candidates[idx].approval_stake = candidates[idx].approval_stake - .saturating_add(expand(nominator_stake)); - edges.push(Edge { who: n.clone(), candidate_index: idx, ..Default::default() }); - } - } - - Nominator { - who, - edges: edges, - budget: expand(nominator_stake), - load: Fraction::zero(), - } - })); - - - // 3- optimization: - // Candidates who have 0 stake => have no votes or all null-votes. Kick them out not. - let mut candidates = candidates.into_iter().filter(|c| c.approval_stake > 0) - .collect::>>>(); - - // 4- If we have more candidates then needed, run Phragmén. - if candidates.len() >= rounds { - elected_candidates = Vec::with_capacity(rounds); - // Main election loop - for _round in 0..rounds { - // Loop 1: initialize score - for c in &mut candidates { - if !c.elected { - c.score = Fraction::from_xth(c.approval_stake); - } - } - // Loop 2: increment score. - for n in &nominators { - for e in &n.edges { - let c = &mut candidates[e.candidate_index]; - if !c.elected { - let temp = n.budget.saturating_mul(*n.load) / c.approval_stake; - c.score = Fraction::from_max_value((*c.score).saturating_add(temp)); - } - } - } - - // Find the best - let winner = candidates - .iter_mut() - .filter(|c| !c.elected) - .min_by_key(|c| *c.score) - .expect("candidates length is checked to be >0; qed"); - - // loop 3: update nominator and edge load - winner.elected = true; - for n in &mut nominators { - for e in &mut n.edges { - if e.who == winner.who { - e.load = Fraction::from_max_value(*winner.score - *n.load); - n.load = winner.score; - } - } - } - - elected_candidates.push(winner.clone()); - } // end of all rounds - - // 4.1- Update backing stake of candidates and nominators - for n in &mut nominators { - for e in &mut n.edges { - // if the target of this vote is among the winners, otherwise let go. - if let Some(c) = elected_candidates.iter_mut().find(|c| c.who == e.who) { - e.elected = true; - // NOTE: for now, always divide last to avoid collapse to zero. - e.backing_stake = n.budget.saturating_mul(*e.load) / *n.load; - c.backing_stake = c.backing_stake.saturating_add(e.backing_stake); - if c.who != n.who { - // Only update the exposure if this vote is from some other account. - c.exposure.total = c.exposure.total.saturating_add(shrink(e.backing_stake)); - c.exposure.others.push( - IndividualExposure { who: n.who.clone(), value: shrink(e.backing_stake) } - ); - } - } - } - } - - // Optionally perform equalize post-processing. - if config.equalize { - let tolerance = config.tolerance; - let equalize_iterations = config.iterations; - - // Fix indexes - nominators.iter_mut().for_each(|n| { - n.edges.iter_mut().for_each(|e| { - if let Some(idx) = elected_candidates.iter().position(|c| c.who == e.who) { - e.elected_idx = idx; - } - }); - }); - - for _i in 0..equalize_iterations { - let mut max_diff = >::zero(); - nominators.iter_mut().for_each(|mut n| { - let diff = equalize::(&mut n, &mut elected_candidates, tolerance); - if diff > max_diff { - max_diff = diff; - } - }); - if max_diff < tolerance { - break; - } - } - } - } else { - if candidates.len() > minimum_validator_count { - // if we don't have enough candidates, just choose all that have some vote. - elected_candidates = candidates; - for n in &mut nominators { - let nominator = n.who.clone(); - for e in &mut n.edges { - if let Some(c) = elected_candidates.iter_mut().find(|c| c.who == e.who && c.who != nominator) { - c.exposure.total = c.exposure.total.saturating_add(shrink(n.budget)); - c.exposure.others.push( - IndividualExposure { who: n.who.clone(), value: shrink(n.budget) } - ); - } - } - } - } else { - // if we have less than minimum, use the previous validator set. - return None - } - } - Some(elected_candidates) + let expand = |b: BalanceOf| { + , u64>>::convert(b) as ExtendedBalance + }; + let shrink = |b: ExtendedBalance| { + >>::convert(b) + }; + let rounds = get_rounds(); + let mut elected_candidates; + + // 1- Pre-process candidates and place them in a container + let mut candidates = get_validators() + .map(|(who, _)| { + let stash_balance = stash_of(&who); + Candidate { + who, + exposure: Exposure { + total: stash_balance, + own: stash_balance, + others: vec![], + }, + ..Default::default() + } + }) + .collect::>>>(); + + // 1.1- Add phantom votes. + let mut nominators: Vec> = Vec::with_capacity(candidates.len()); + candidates.iter_mut().enumerate().for_each(|(idx, c)| { + c.approval_stake += expand(c.exposure.total); + nominators.push(Nominator { + who: c.who.clone(), + edges: vec![Edge { + who: c.who.clone(), + candidate_index: idx, + ..Default::default() + }], + budget: expand(c.exposure.total), + load: Fraction::zero(), + }) + }); + + // 2- Collect the nominators with the associated votes. + // Also collect approval stake along the way. + nominators.extend(get_nominators().map(|(who, nominees)| { + let nominator_stake = stash_of(&who); + let mut edges: Vec> = Vec::with_capacity(nominees.len()); + for n in &nominees { + if let Some(idx) = candidates.iter_mut().position(|i| i.who == *n) { + candidates[idx].approval_stake = candidates[idx] + .approval_stake + .saturating_add(expand(nominator_stake)); + edges.push(Edge { + who: n.clone(), + candidate_index: idx, + ..Default::default() + }); + } + } + + Nominator { + who, + edges: edges, + budget: expand(nominator_stake), + load: Fraction::zero(), + } + })); + + // 3- optimization: + // Candidates who have 0 stake => have no votes or all null-votes. Kick them out not. + let mut candidates = candidates + .into_iter() + .filter(|c| c.approval_stake > 0) + .collect::>>>(); + + // 4- If we have more candidates then needed, run Phragmén. + if candidates.len() >= rounds { + elected_candidates = Vec::with_capacity(rounds); + // Main election loop + for _round in 0..rounds { + // Loop 1: initialize score + for c in &mut candidates { + if !c.elected { + c.score = Fraction::from_xth(c.approval_stake); + } + } + // Loop 2: increment score. + for n in &nominators { + for e in &n.edges { + let c = &mut candidates[e.candidate_index]; + if !c.elected { + let temp = n.budget.saturating_mul(*n.load) / c.approval_stake; + c.score = Fraction::from_max_value((*c.score).saturating_add(temp)); + } + } + } + + // Find the best + let winner = candidates + .iter_mut() + .filter(|c| !c.elected) + .min_by_key(|c| *c.score) + .expect("candidates length is checked to be >0; qed"); + + // loop 3: update nominator and edge load + winner.elected = true; + for n in &mut nominators { + for e in &mut n.edges { + if e.who == winner.who { + e.load = Fraction::from_max_value(*winner.score - *n.load); + n.load = winner.score; + } + } + } + + elected_candidates.push(winner.clone()); + } // end of all rounds + + // 4.1- Update backing stake of candidates and nominators + for n in &mut nominators { + for e in &mut n.edges { + // if the target of this vote is among the winners, otherwise let go. + if let Some(c) = elected_candidates.iter_mut().find(|c| c.who == e.who) { + e.elected = true; + // NOTE: for now, always divide last to avoid collapse to zero. + e.backing_stake = n.budget.saturating_mul(*e.load) / *n.load; + c.backing_stake = c.backing_stake.saturating_add(e.backing_stake); + if c.who != n.who { + // Only update the exposure if this vote is from some other account. + c.exposure.total = c.exposure.total.saturating_add(shrink(e.backing_stake)); + c.exposure.others.push(IndividualExposure { + who: n.who.clone(), + value: shrink(e.backing_stake), + }); + } + } + } + } + + // Optionally perform equalize post-processing. + if config.equalize { + let tolerance = config.tolerance; + let equalize_iterations = config.iterations; + + // Fix indexes + nominators.iter_mut().for_each(|n| { + n.edges.iter_mut().for_each(|e| { + if let Some(idx) = elected_candidates.iter().position(|c| c.who == e.who) { + e.elected_idx = idx; + } + }); + }); + + for _i in 0..equalize_iterations { + let mut max_diff = >::zero(); + nominators.iter_mut().for_each(|mut n| { + let diff = equalize::(&mut n, &mut elected_candidates, tolerance); + if diff > max_diff { + max_diff = diff; + } + }); + if max_diff < tolerance { + break; + } + } + } + } else { + if candidates.len() > minimum_validator_count { + // if we don't have enough candidates, just choose all that have some vote. + elected_candidates = candidates; + for n in &mut nominators { + let nominator = n.who.clone(); + for e in &mut n.edges { + if let Some(c) = elected_candidates + .iter_mut() + .find(|c| c.who == e.who && c.who != nominator) + { + c.exposure.total = c.exposure.total.saturating_add(shrink(n.budget)); + c.exposure.others.push(IndividualExposure { + who: n.who.clone(), + value: shrink(n.budget), + }); + } + } + } + } else { + // if we have less than minimum, use the previous validator set. + return None; + } + } + Some(elected_candidates) } /// Performs equalize post-processing to the output of the election algorithm @@ -281,97 +303,108 @@ pub fn elect( /// the elected candidates. /// The return value is to tolerance at which the function has stopped. pub fn equalize( - nominator: &mut Nominator, - elected_candidates: &mut Vec>>, - _tolerance: BalanceOf + nominator: &mut Nominator, + elected_candidates: &mut Vec>>, + _tolerance: BalanceOf, ) -> BalanceOf { - let expand = |b: BalanceOf| , u64>>::convert(b) as ExtendedBalance; - let shrink = |b: ExtendedBalance| >>::convert(b); - let tolerance = expand(_tolerance); - - let mut elected_edges = nominator.edges - .iter_mut() - .filter(|e| e.elected) - .collect::>>(); - - if elected_edges.len() == 0 { - return >::zero(); - } - - let stake_used = elected_edges - .iter() - .fold(0, |s, e| s.saturating_add(e.backing_stake)); - let backed_stakes = elected_edges - .iter() - .map(|e| elected_candidates[e.elected_idx].backing_stake) - .collect::>(); - let backing_backed_stake = elected_edges - .iter() - .filter(|e| e.backing_stake > 0) - .map(|e| elected_candidates[e.elected_idx].backing_stake) - .collect::>(); - - let mut difference; - if backing_backed_stake.len() > 0 { - let max_stake = *backing_backed_stake - .iter() - .max() - .expect("vector with positive length will have a max; qed"); - let min_stake = *backed_stakes - .iter() - .min() - .expect("vector with positive length will have a min; qed"); - difference = max_stake.saturating_sub(min_stake); - difference = difference.saturating_add(nominator.budget.saturating_sub(stake_used)); - if difference < tolerance { - return shrink(difference); - } - } else { - difference = nominator.budget; - } - - // Undo updates to exposure - elected_edges.iter_mut().for_each(|e| { - // NOTE: no assertions in the runtime, but this should nonetheless be indicative. - //assert_eq!(elected_candidates[e.elected_idx].who, e.who); - elected_candidates[e.elected_idx].backing_stake -= e.backing_stake; - elected_candidates[e.elected_idx].exposure.total -= shrink(e.backing_stake); - e.backing_stake = 0; - }); - - elected_edges.sort_unstable_by_key(|e| elected_candidates[e.elected_idx].backing_stake); - - let mut cumulative_stake: ExtendedBalance = 0; - let mut last_index = elected_edges.len() - 1; - let budget = nominator.budget; - elected_edges.iter_mut().enumerate().for_each(|(idx, e)| { - let stake = elected_candidates[e.elected_idx].backing_stake; - - let stake_mul = stake.saturating_mul(idx as ExtendedBalance); - let stake_sub = stake_mul.saturating_sub(cumulative_stake); - if stake_sub > budget { - last_index = idx.checked_sub(1).unwrap_or(0); - return - } - cumulative_stake = cumulative_stake.saturating_add(stake); - }); - - let last_stake = elected_candidates[elected_edges[last_index].elected_idx].backing_stake; - let split_ways = last_index + 1; - let excess = nominator.budget - .saturating_add(cumulative_stake) - .saturating_sub(last_stake.saturating_mul(split_ways as ExtendedBalance)); - let nominator_address = nominator.who.clone(); - elected_edges.iter_mut().take(split_ways).for_each(|e| { - let c = &mut elected_candidates[e.elected_idx]; - e.backing_stake = (excess / split_ways as ExtendedBalance) - .saturating_add(last_stake) - .saturating_sub(c.backing_stake); - c.exposure.total = c.exposure.total.saturating_add(shrink(e.backing_stake)); - c.backing_stake = c.backing_stake.saturating_add(e.backing_stake); - if let Some(i_expo) = c.exposure.others.iter_mut().find(|i| i.who == nominator_address) { - i_expo.value = shrink(e.backing_stake); - } - }); - shrink(difference) + let expand = |b: BalanceOf| { + , u64>>::convert(b) as ExtendedBalance + }; + let shrink = |b: ExtendedBalance| { + >>::convert(b) + }; + let tolerance = expand(_tolerance); + + let mut elected_edges = nominator + .edges + .iter_mut() + .filter(|e| e.elected) + .collect::>>(); + + if elected_edges.len() == 0 { + return >::zero(); + } + + let stake_used = elected_edges + .iter() + .fold(0, |s, e| s.saturating_add(e.backing_stake)); + let backed_stakes = elected_edges + .iter() + .map(|e| elected_candidates[e.elected_idx].backing_stake) + .collect::>(); + let backing_backed_stake = elected_edges + .iter() + .filter(|e| e.backing_stake > 0) + .map(|e| elected_candidates[e.elected_idx].backing_stake) + .collect::>(); + + let mut difference; + if backing_backed_stake.len() > 0 { + let max_stake = *backing_backed_stake + .iter() + .max() + .expect("vector with positive length will have a max; qed"); + let min_stake = *backed_stakes + .iter() + .min() + .expect("vector with positive length will have a min; qed"); + difference = max_stake.saturating_sub(min_stake); + difference = difference.saturating_add(nominator.budget.saturating_sub(stake_used)); + if difference < tolerance { + return shrink(difference); + } + } else { + difference = nominator.budget; + } + + // Undo updates to exposure + elected_edges.iter_mut().for_each(|e| { + // NOTE: no assertions in the runtime, but this should nonetheless be indicative. + //assert_eq!(elected_candidates[e.elected_idx].who, e.who); + elected_candidates[e.elected_idx].backing_stake -= e.backing_stake; + elected_candidates[e.elected_idx].exposure.total -= shrink(e.backing_stake); + e.backing_stake = 0; + }); + + elected_edges.sort_unstable_by_key(|e| elected_candidates[e.elected_idx].backing_stake); + + let mut cumulative_stake: ExtendedBalance = 0; + let mut last_index = elected_edges.len() - 1; + let budget = nominator.budget; + elected_edges.iter_mut().enumerate().for_each(|(idx, e)| { + let stake = elected_candidates[e.elected_idx].backing_stake; + + let stake_mul = stake.saturating_mul(idx as ExtendedBalance); + let stake_sub = stake_mul.saturating_sub(cumulative_stake); + if stake_sub > budget { + last_index = idx.checked_sub(1).unwrap_or(0); + return; + } + cumulative_stake = cumulative_stake.saturating_add(stake); + }); + + let last_stake = elected_candidates[elected_edges[last_index].elected_idx].backing_stake; + let split_ways = last_index + 1; + let excess = nominator + .budget + .saturating_add(cumulative_stake) + .saturating_sub(last_stake.saturating_mul(split_ways as ExtendedBalance)); + let nominator_address = nominator.who.clone(); + elected_edges.iter_mut().take(split_ways).for_each(|e| { + let c = &mut elected_candidates[e.elected_idx]; + e.backing_stake = (excess / split_ways as ExtendedBalance) + .saturating_add(last_stake) + .saturating_sub(c.backing_stake); + c.exposure.total = c.exposure.total.saturating_add(shrink(e.backing_stake)); + c.backing_stake = c.backing_stake.saturating_add(e.backing_stake); + if let Some(i_expo) = c + .exposure + .others + .iter_mut() + .find(|i| i.who == nominator_address) + { + i_expo.value = shrink(e.backing_stake); + } + }); + shrink(difference) } diff --git a/srml/staking/src/tests.rs b/srml/staking/src/tests.rs index 2d6ce5aa7a..a1aa490b85 100644 --- a/srml/staking/src/tests.rs +++ b/srml/staking/src/tests.rs @@ -19,1955 +19,2658 @@ #![cfg(test)] use super::*; -use runtime_io::with_externalities; +use mock::{Balances, ExtBuilder, Origin, Session, Staking, System, Test, Timestamp}; use phragmen; use primitives::PerU128; -use srml_support::{assert_ok, assert_noop, assert_eq_uvec, EnumerableStorageMap}; -use mock::{Balances, Session, Staking, System, Timestamp, Test, ExtBuilder, Origin}; +use runtime_io::with_externalities; use srml_support::traits::{Currency, ReservableCurrency}; +use srml_support::{assert_eq_uvec, assert_noop, assert_ok, EnumerableStorageMap}; #[test] fn basic_setup_works() { - // Verifies initial conditions of mock - with_externalities(&mut ExtBuilder::default() - .build(), - || { - assert_eq!(Staking::bonded(&11), Some(10)); // Account 11 is stashed and locked, and account 10 is the controller - assert_eq!(Staking::bonded(&21), Some(20)); // Account 21 is stashed and locked, and account 20 is the controller - assert_eq!(Staking::bonded(&1), None); // Account 1 is not a stashed - - // Account 10 controls the stash from account 11, which is 100 * balance_factor units - assert_eq!(Staking::ledger(&10), Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![] })); - // Account 20 controls the stash from account 21, which is 200 * balance_factor units - assert_eq!(Staking::ledger(&20), Some(StakingLedger { stash: 21, total: 1000, active: 1000, unlocking: vec![] })); - // Account 1 does not control any stash - assert_eq!(Staking::ledger(&1), None); - - // ValidatorPrefs are default, thus unstake_threshold is 3, other values are default for their type - assert_eq!(>::enumerate().collect::>(), vec![ - (21, ValidatorPrefs { unstake_threshold: 3, validator_payment: 0 }), - (11, ValidatorPrefs { unstake_threshold: 3, validator_payment: 0 }) - ]); - - // Account 100 is the default nominator - assert_eq!(Staking::ledger(100), Some(StakingLedger { stash: 101, total: 500, active: 500, unlocking: vec![] })); - assert_eq!(Staking::nominators(101), vec![11, 21]); - - // Account 10 is exposed by 1000 * balance_factor from their own stash in account 11 + the default nominator vote - assert_eq!(Staking::stakers(11), Exposure { total: 1124, own: 1000, others: vec![ IndividualExposure { who: 101, value: 124 }] }); - // Account 20 is exposed by 1000 * balance_factor from their own stash in account 21 + the default nominator vote - assert_eq!(Staking::stakers(21), Exposure { total: 1375, own: 1000, others: vec![ IndividualExposure { who: 101, value: 375 }] }); - - // The number of validators required. - assert_eq!(Staking::validator_count(), 2); - - // Initial Era and session - assert_eq!(Staking::current_era(), 0); - assert_eq!(Session::current_index(), 0); - - // initial rewards - assert_eq!(Staking::current_session_reward(), 10); - - // initial slot_stake - assert_eq!(Staking::slot_stake(), 1124); // Naive - - // initial slash_count of validators - assert_eq!(Staking::slash_count(&11), 0); - assert_eq!(Staking::slash_count(&21), 0); - }); + // Verifies initial conditions of mock + with_externalities(&mut ExtBuilder::default().build(), || { + assert_eq!(Staking::bonded(&11), Some(10)); // Account 11 is stashed and locked, and account 10 is the controller + assert_eq!(Staking::bonded(&21), Some(20)); // Account 21 is stashed and locked, and account 20 is the controller + assert_eq!(Staking::bonded(&1), None); // Account 1 is not a stashed + + // Account 10 controls the stash from account 11, which is 100 * balance_factor units + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![] + }) + ); + // Account 20 controls the stash from account 21, which is 200 * balance_factor units + assert_eq!( + Staking::ledger(&20), + Some(StakingLedger { + stash: 21, + total: 1000, + active: 1000, + unlocking: vec![] + }) + ); + // Account 1 does not control any stash + assert_eq!(Staking::ledger(&1), None); + + // ValidatorPrefs are default, thus unstake_threshold is 3, other values are default for their type + assert_eq!( + >::enumerate().collect::>(), + vec![ + ( + 21, + ValidatorPrefs { + unstake_threshold: 3, + validator_payment: 0 + } + ), + ( + 11, + ValidatorPrefs { + unstake_threshold: 3, + validator_payment: 0 + } + ) + ] + ); + + // Account 100 is the default nominator + assert_eq!( + Staking::ledger(100), + Some(StakingLedger { + stash: 101, + total: 500, + active: 500, + unlocking: vec![] + }) + ); + assert_eq!(Staking::nominators(101), vec![11, 21]); + + // Account 10 is exposed by 1000 * balance_factor from their own stash in account 11 + the default nominator vote + assert_eq!( + Staking::stakers(11), + Exposure { + total: 1124, + own: 1000, + others: vec![IndividualExposure { + who: 101, + value: 124 + }] + } + ); + // Account 20 is exposed by 1000 * balance_factor from their own stash in account 21 + the default nominator vote + assert_eq!( + Staking::stakers(21), + Exposure { + total: 1375, + own: 1000, + others: vec![IndividualExposure { + who: 101, + value: 375 + }] + } + ); + + // The number of validators required. + assert_eq!(Staking::validator_count(), 2); + + // Initial Era and session + assert_eq!(Staking::current_era(), 0); + assert_eq!(Session::current_index(), 0); + + // initial rewards + assert_eq!(Staking::current_session_reward(), 10); + + // initial slot_stake + assert_eq!(Staking::slot_stake(), 1124); // Naive + + // initial slash_count of validators + assert_eq!(Staking::slash_count(&11), 0); + assert_eq!(Staking::slash_count(&21), 0); + }); } #[test] fn no_offline_should_work() { - // Test the staking module works when no validators are offline - with_externalities(&mut ExtBuilder::default().build(), - || { - // Slashing begins for validators immediately if found offline - assert_eq!(Staking::offline_slash_grace(), 0); - // Account 10 has not been reported offline - assert_eq!(Staking::slash_count(&10), 0); - // Account 10 has `balance_factor` free balance - assert_eq!(Balances::free_balance(&10), 1); - // Nothing happens to Account 10, as expected - assert_eq!(Staking::slash_count(&10), 0); - assert_eq!(Balances::free_balance(&10), 1); - // New era is not being forced - assert!(Staking::forcing_new_era().is_none()); - }); + // Test the staking module works when no validators are offline + with_externalities(&mut ExtBuilder::default().build(), || { + // Slashing begins for validators immediately if found offline + assert_eq!(Staking::offline_slash_grace(), 0); + // Account 10 has not been reported offline + assert_eq!(Staking::slash_count(&10), 0); + // Account 10 has `balance_factor` free balance + assert_eq!(Balances::free_balance(&10), 1); + // Nothing happens to Account 10, as expected + assert_eq!(Staking::slash_count(&10), 0); + assert_eq!(Balances::free_balance(&10), 1); + // New era is not being forced + assert!(Staking::forcing_new_era().is_none()); + }); } #[test] fn invulnerability_should_work() { - // Test that users can be invulnerable from slashing and being kicked - with_externalities(&mut ExtBuilder::default().build(), - || { - // Make account 11 invulnerable - assert_ok!(Staking::set_invulnerables(vec![11])); - // Give account 11 some funds - let _ = Balances::make_free_balance_be(&11, 70); - // There is no slash grace -- slash immediately. - assert_eq!(Staking::offline_slash_grace(), 0); - // Account 11 has not been slashed - assert_eq!(Staking::slash_count(&11), 0); - // Account 11 has the 70 funds we gave it above - assert_eq!(Balances::free_balance(&11), 70); - // Account 11 should be a validator - assert!(>::exists(&11)); - - // Set account 11 as an offline validator with a large number of reports - // Should exit early if invulnerable - Staking::on_offline_validator(10, 100); - - // Show that account 11 has not been touched - assert_eq!(Staking::slash_count(&11), 0); - assert_eq!(Balances::free_balance(&11), 70); - assert!(>::exists(&11)); - // New era not being forced - // NOTE: new era is always forced once slashing happens -> new validators need to be chosen. - assert!(Staking::forcing_new_era().is_none()); - }); + // Test that users can be invulnerable from slashing and being kicked + with_externalities(&mut ExtBuilder::default().build(), || { + // Make account 11 invulnerable + assert_ok!(Staking::set_invulnerables(vec![11])); + // Give account 11 some funds + let _ = Balances::make_free_balance_be(&11, 70); + // There is no slash grace -- slash immediately. + assert_eq!(Staking::offline_slash_grace(), 0); + // Account 11 has not been slashed + assert_eq!(Staking::slash_count(&11), 0); + // Account 11 has the 70 funds we gave it above + assert_eq!(Balances::free_balance(&11), 70); + // Account 11 should be a validator + assert!(>::exists(&11)); + + // Set account 11 as an offline validator with a large number of reports + // Should exit early if invulnerable + Staking::on_offline_validator(10, 100); + + // Show that account 11 has not been touched + assert_eq!(Staking::slash_count(&11), 0); + assert_eq!(Balances::free_balance(&11), 70); + assert!(>::exists(&11)); + // New era not being forced + // NOTE: new era is always forced once slashing happens -> new validators need to be chosen. + assert!(Staking::forcing_new_era().is_none()); + }); } #[test] fn offline_should_slash_and_kick() { - // Test that an offline validator gets slashed and kicked - with_externalities(&mut ExtBuilder::default().build(), || { - // Give account 10 some balance - let _ = Balances::make_free_balance_be(&11, 1000); - // Confirm account 10 is a validator - assert!(>::exists(&11)); - // Validators get slashed immediately - assert_eq!(Staking::offline_slash_grace(), 0); - // Unstake threshold is 3 - assert_eq!(Staking::validators(&11).unstake_threshold, 3); - // Account 10 has not been slashed before - assert_eq!(Staking::slash_count(&11), 0); - // Account 10 has the funds we just gave it - assert_eq!(Balances::free_balance(&11), 1000); - // Report account 10 as offline, one greater than unstake threshold - Staking::on_offline_validator(10, 4); - // Confirm user has been reported - assert_eq!(Staking::slash_count(&11), 4); - // Confirm balance has been reduced by 2^unstake_threshold * offline_slash() * amount_at_stake. - let slash_base = Staking::offline_slash() * Staking::stakers(11).total; - assert_eq!(Balances::free_balance(&11), 1000 - 2_u64.pow(3) * slash_base); - // Confirm account 10 has been removed as a validator - assert!(!>::exists(&11)); - // A new era is forced due to slashing - assert!(Staking::forcing_new_era().is_some()); - }); + // Test that an offline validator gets slashed and kicked + with_externalities(&mut ExtBuilder::default().build(), || { + // Give account 10 some balance + let _ = Balances::make_free_balance_be(&11, 1000); + // Confirm account 10 is a validator + assert!(>::exists(&11)); + // Validators get slashed immediately + assert_eq!(Staking::offline_slash_grace(), 0); + // Unstake threshold is 3 + assert_eq!(Staking::validators(&11).unstake_threshold, 3); + // Account 10 has not been slashed before + assert_eq!(Staking::slash_count(&11), 0); + // Account 10 has the funds we just gave it + assert_eq!(Balances::free_balance(&11), 1000); + // Report account 10 as offline, one greater than unstake threshold + Staking::on_offline_validator(10, 4); + // Confirm user has been reported + assert_eq!(Staking::slash_count(&11), 4); + // Confirm balance has been reduced by 2^unstake_threshold * offline_slash() * amount_at_stake. + let slash_base = Staking::offline_slash() * Staking::stakers(11).total; + assert_eq!( + Balances::free_balance(&11), + 1000 - 2_u64.pow(3) * slash_base + ); + // Confirm account 10 has been removed as a validator + assert!(!>::exists(&11)); + // A new era is forced due to slashing + assert!(Staking::forcing_new_era().is_some()); + }); } #[test] fn offline_grace_should_delay_slashing() { - // Tests that with grace, slashing is delayed - with_externalities(&mut ExtBuilder::default().build(), || { - // Initialize account 10 with balance - let _ = Balances::make_free_balance_be(&11, 70); - // Verify account 11 has balance - assert_eq!(Balances::free_balance(&11), 70); - - // Set offline slash grace - let offline_slash_grace = 1; - assert_ok!(Staking::set_offline_slash_grace(offline_slash_grace)); - assert_eq!(Staking::offline_slash_grace(), 1); - - // Check unstake_threshold is 3 (default) - let default_unstake_threshold = 3; - assert_eq!(Staking::validators(&11), ValidatorPrefs { unstake_threshold: default_unstake_threshold, validator_payment: 0 }); - - // Check slash count is zero - assert_eq!(Staking::slash_count(&11), 0); - - // Report account 10 up to the threshold - Staking::on_offline_validator(10, default_unstake_threshold as usize + offline_slash_grace as usize); - // Confirm slash count - assert_eq!(Staking::slash_count(&11), 4); - - // Nothing should happen - assert_eq!(Balances::free_balance(&11), 70); - - // Report account 10 one more time - Staking::on_offline_validator(10, 1); - assert_eq!(Staking::slash_count(&11), 5); - // User gets slashed - assert!(Balances::free_balance(&11) < 70); - // New era is forced - assert!(Staking::forcing_new_era().is_some()); - }); + // Tests that with grace, slashing is delayed + with_externalities(&mut ExtBuilder::default().build(), || { + // Initialize account 10 with balance + let _ = Balances::make_free_balance_be(&11, 70); + // Verify account 11 has balance + assert_eq!(Balances::free_balance(&11), 70); + + // Set offline slash grace + let offline_slash_grace = 1; + assert_ok!(Staking::set_offline_slash_grace(offline_slash_grace)); + assert_eq!(Staking::offline_slash_grace(), 1); + + // Check unstake_threshold is 3 (default) + let default_unstake_threshold = 3; + assert_eq!( + Staking::validators(&11), + ValidatorPrefs { + unstake_threshold: default_unstake_threshold, + validator_payment: 0 + } + ); + + // Check slash count is zero + assert_eq!(Staking::slash_count(&11), 0); + + // Report account 10 up to the threshold + Staking::on_offline_validator( + 10, + default_unstake_threshold as usize + offline_slash_grace as usize, + ); + // Confirm slash count + assert_eq!(Staking::slash_count(&11), 4); + + // Nothing should happen + assert_eq!(Balances::free_balance(&11), 70); + + // Report account 10 one more time + Staking::on_offline_validator(10, 1); + assert_eq!(Staking::slash_count(&11), 5); + // User gets slashed + assert!(Balances::free_balance(&11) < 70); + // New era is forced + assert!(Staking::forcing_new_era().is_some()); + }); } - #[test] fn max_unstake_threshold_works() { - // Tests that max_unstake_threshold gets used when prefs.unstake_threshold is large - with_externalities(&mut ExtBuilder::default().build(), || { - const MAX_UNSTAKE_THRESHOLD: u32 = 10; - // Two users with maximum possible balance - let _ = Balances::make_free_balance_be(&11, u64::max_value()); - let _ = Balances::make_free_balance_be(&21, u64::max_value()); - - // Give them full exposure as a staker - >::insert(&11, Exposure { total: 1000000, own: 1000000, others: vec![]}); - >::insert(&21, Exposure { total: 2000000, own: 2000000, others: vec![]}); - - // Check things are initialized correctly - assert_eq!(Balances::free_balance(&11), u64::max_value()); - assert_eq!(Balances::free_balance(&21), u64::max_value()); - assert_eq!(Staking::offline_slash_grace(), 0); - // Account 10 will have max unstake_threshold - assert_ok!(Staking::validate(Origin::signed(10), ValidatorPrefs { - unstake_threshold: MAX_UNSTAKE_THRESHOLD, - validator_payment: 0, - })); - // Account 20 could not set their unstake_threshold past 10 - assert_noop!(Staking::validate(Origin::signed(20), ValidatorPrefs { - unstake_threshold: MAX_UNSTAKE_THRESHOLD + 1, - validator_payment: 0}), - "unstake threshold too large" - ); - // Give Account 20 unstake_threshold 11 anyway, should still be limited to 10 - >::insert(21, ValidatorPrefs { - unstake_threshold: MAX_UNSTAKE_THRESHOLD + 1, - validator_payment: 0, - }); - - >::put(Perbill::from_fraction(0.0001)); - - // Report each user 1 more than the max_unstake_threshold - Staking::on_offline_validator(10, MAX_UNSTAKE_THRESHOLD as usize + 1); - Staking::on_offline_validator(20, MAX_UNSTAKE_THRESHOLD as usize + 1); - - // Show that each balance only gets reduced by 2^max_unstake_threshold times 10% - // of their total stake. - assert_eq!(Balances::free_balance(&11), u64::max_value() - 2_u64.pow(MAX_UNSTAKE_THRESHOLD) * 100); - assert_eq!(Balances::free_balance(&21), u64::max_value() - 2_u64.pow(MAX_UNSTAKE_THRESHOLD) * 200); - }); + // Tests that max_unstake_threshold gets used when prefs.unstake_threshold is large + with_externalities(&mut ExtBuilder::default().build(), || { + const MAX_UNSTAKE_THRESHOLD: u32 = 10; + // Two users with maximum possible balance + let _ = Balances::make_free_balance_be(&11, u64::max_value()); + let _ = Balances::make_free_balance_be(&21, u64::max_value()); + + // Give them full exposure as a staker + >::insert( + &11, + Exposure { + total: 1000000, + own: 1000000, + others: vec![], + }, + ); + >::insert( + &21, + Exposure { + total: 2000000, + own: 2000000, + others: vec![], + }, + ); + + // Check things are initialized correctly + assert_eq!(Balances::free_balance(&11), u64::max_value()); + assert_eq!(Balances::free_balance(&21), u64::max_value()); + assert_eq!(Staking::offline_slash_grace(), 0); + // Account 10 will have max unstake_threshold + assert_ok!(Staking::validate( + Origin::signed(10), + ValidatorPrefs { + unstake_threshold: MAX_UNSTAKE_THRESHOLD, + validator_payment: 0, + } + )); + // Account 20 could not set their unstake_threshold past 10 + assert_noop!( + Staking::validate( + Origin::signed(20), + ValidatorPrefs { + unstake_threshold: MAX_UNSTAKE_THRESHOLD + 1, + validator_payment: 0 + } + ), + "unstake threshold too large" + ); + // Give Account 20 unstake_threshold 11 anyway, should still be limited to 10 + >::insert( + 21, + ValidatorPrefs { + unstake_threshold: MAX_UNSTAKE_THRESHOLD + 1, + validator_payment: 0, + }, + ); + + >::put(Perbill::from_fraction(0.0001)); + + // Report each user 1 more than the max_unstake_threshold + Staking::on_offline_validator(10, MAX_UNSTAKE_THRESHOLD as usize + 1); + Staking::on_offline_validator(20, MAX_UNSTAKE_THRESHOLD as usize + 1); + + // Show that each balance only gets reduced by 2^max_unstake_threshold times 10% + // of their total stake. + assert_eq!( + Balances::free_balance(&11), + u64::max_value() - 2_u64.pow(MAX_UNSTAKE_THRESHOLD) * 100 + ); + assert_eq!( + Balances::free_balance(&21), + u64::max_value() - 2_u64.pow(MAX_UNSTAKE_THRESHOLD) * 200 + ); + }); } #[test] fn slashing_does_not_cause_underflow() { - // Tests that slashing more than a user has does not underflow - with_externalities(&mut ExtBuilder::default().build(), || { - // Verify initial conditions - assert_eq!(Balances::free_balance(&11), 1000); - assert_eq!(Staking::offline_slash_grace(), 0); - - // Set validator preference so that 2^unstake_threshold would cause overflow (greater than 64) - >::insert(11, ValidatorPrefs { - unstake_threshold: 10, - validator_payment: 0, - }); - - System::set_block_number(1); - Session::check_rotate_session(System::block_number()); - - // Should not panic - Staking::on_offline_validator(10, 100); - // Confirm that underflow has not occurred, and account balance is set to zero - assert_eq!(Balances::free_balance(&11), 0); - }); + // Tests that slashing more than a user has does not underflow + with_externalities(&mut ExtBuilder::default().build(), || { + // Verify initial conditions + assert_eq!(Balances::free_balance(&11), 1000); + assert_eq!(Staking::offline_slash_grace(), 0); + + // Set validator preference so that 2^unstake_threshold would cause overflow (greater than 64) + >::insert( + 11, + ValidatorPrefs { + unstake_threshold: 10, + validator_payment: 0, + }, + ); + + System::set_block_number(1); + Session::check_rotate_session(System::block_number()); + + // Should not panic + Staking::on_offline_validator(10, 100); + // Confirm that underflow has not occurred, and account balance is set to zero + assert_eq!(Balances::free_balance(&11), 0); + }); } - #[test] fn rewards_should_work() { - // should check that: - // * rewards get recorded per session - // * rewards get paid per Era - // * Check that nominators are also rewarded - with_externalities(&mut ExtBuilder::default() - .session_length(3) - .sessions_per_era(3) - .build(), - || { - let delay = 0; - // this test is only in the scope of one era. Since this variable changes - // at the last block/new era, we'll save it. - let session_reward = 10; - - // Set payee to controller - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); - - // Initial config should be correct - assert_eq!(Staking::era_length(), 9); - assert_eq!(Staking::sessions_per_era(), 3); - assert_eq!(Staking::last_era_length_change(), 0); - assert_eq!(Staking::current_era(), 0); - assert_eq!(Session::current_index(), 0); - assert_eq!(Staking::current_session_reward(), 10); - - // check the balance of a validator accounts. - assert_eq!(Balances::total_balance(&11), 1000); - // and the nominator (to-be) - let _ = Balances::make_free_balance_be(&2, 500); - assert_eq!(Balances::total_balance(&2), 500); - - // add a dummy nominator. - // NOTE: this nominator is being added 'manually'. a Further test (nomination_and_reward..) will add it via '.nominate()' - >::insert(&11, Exposure { - own: 500, // equal division indicates that the reward will be equally divided among validator and nominator. - total: 1000, - others: vec![IndividualExposure {who: 2, value: 500 }] - }); - - >::insert(&2, RewardDestination::Stash); - assert_eq!(Staking::payee(2), RewardDestination::Stash); - assert_eq!(Staking::payee(11), RewardDestination::Controller); - - let mut block = 3; - // Block 3 => Session 1 => Era 0 - System::set_block_number(block); - Timestamp::set_timestamp(block*5); // on time. - Session::check_rotate_session(System::block_number()); - assert_eq!(Staking::current_era(), 0); - assert_eq!(Session::current_index(), 1); - - // session triggered: the reward value stashed should be 10 -- defined in ExtBuilder genesis. - assert_eq!(Staking::current_session_reward(), session_reward); - assert_eq!(Staking::current_era_reward(), session_reward); - - block = 6; // Block 6 => Session 2 => Era 0 - System::set_block_number(block); - Timestamp::set_timestamp(block*5 + delay); // a little late. - Session::check_rotate_session(System::block_number()); - assert_eq!(Staking::current_era(), 0); - assert_eq!(Session::current_index(), 2); - - // session reward is the same, - assert_eq!(Staking::current_session_reward(), session_reward); - // though 2 will be deducted while stashed in the era reward due to delay - assert_eq!(Staking::current_era_reward(), 2*session_reward - delay); - - block = 9; // Block 9 => Session 3 => Era 1 - System::set_block_number(block); - Timestamp::set_timestamp(block*5); // back to being on time. no delays - Session::check_rotate_session(System::block_number()); - assert_eq!(Staking::current_era(), 1); - assert_eq!(Session::current_index(), 3); - - assert_eq!(Balances::total_balance(&10), 1 + (3*session_reward - delay)/2); - assert_eq!(Balances::total_balance(&2), 500 + (3*session_reward - delay)/2); - }); + // should check that: + // * rewards get recorded per session + // * rewards get paid per Era + // * Check that nominators are also rewarded + with_externalities( + &mut ExtBuilder::default() + .session_length(3) + .sessions_per_era(3) + .build(), + || { + let delay = 0; + // this test is only in the scope of one era. Since this variable changes + // at the last block/new era, we'll save it. + let session_reward = 10; + + // Set payee to controller + assert_ok!(Staking::set_payee( + Origin::signed(10), + RewardDestination::Controller + )); + + // Initial config should be correct + assert_eq!(Staking::era_length(), 9); + assert_eq!(Staking::sessions_per_era(), 3); + assert_eq!(Staking::last_era_length_change(), 0); + assert_eq!(Staking::current_era(), 0); + assert_eq!(Session::current_index(), 0); + assert_eq!(Staking::current_session_reward(), 10); + + // check the balance of a validator accounts. + assert_eq!(Balances::total_balance(&11), 1000); + // and the nominator (to-be) + let _ = Balances::make_free_balance_be(&2, 500); + assert_eq!(Balances::total_balance(&2), 500); + + // add a dummy nominator. + // NOTE: this nominator is being added 'manually'. a Further test (nomination_and_reward..) will add it via '.nominate()' + >::insert( + &11, + Exposure { + own: 500, // equal division indicates that the reward will be equally divided among validator and nominator. + total: 1000, + others: vec![IndividualExposure { who: 2, value: 500 }], + }, + ); + + >::insert(&2, RewardDestination::Stash); + assert_eq!(Staking::payee(2), RewardDestination::Stash); + assert_eq!(Staking::payee(11), RewardDestination::Controller); + + let mut block = 3; + // Block 3 => Session 1 => Era 0 + System::set_block_number(block); + Timestamp::set_timestamp(block * 5); // on time. + Session::check_rotate_session(System::block_number()); + assert_eq!(Staking::current_era(), 0); + assert_eq!(Session::current_index(), 1); + + // session triggered: the reward value stashed should be 10 -- defined in ExtBuilder genesis. + assert_eq!(Staking::current_session_reward(), session_reward); + assert_eq!(Staking::current_era_reward(), session_reward); + + block = 6; // Block 6 => Session 2 => Era 0 + System::set_block_number(block); + Timestamp::set_timestamp(block * 5 + delay); // a little late. + Session::check_rotate_session(System::block_number()); + assert_eq!(Staking::current_era(), 0); + assert_eq!(Session::current_index(), 2); + + // session reward is the same, + assert_eq!(Staking::current_session_reward(), session_reward); + // though 2 will be deducted while stashed in the era reward due to delay + assert_eq!(Staking::current_era_reward(), 2 * session_reward - delay); + + block = 9; // Block 9 => Session 3 => Era 1 + System::set_block_number(block); + Timestamp::set_timestamp(block * 5); // back to being on time. no delays + Session::check_rotate_session(System::block_number()); + assert_eq!(Staking::current_era(), 1); + assert_eq!(Session::current_index(), 3); + + assert_eq!( + Balances::total_balance(&10), + 1 + (3 * session_reward - delay) / 2 + ); + assert_eq!( + Balances::total_balance(&2), + 500 + (3 * session_reward - delay) / 2 + ); + }, + ); } #[test] fn multi_era_reward_should_work() { - // should check that: - // The value of current_session_reward is set at the end of each era, based on - // slot_stake and session_reward. Check and verify this. - with_externalities(&mut ExtBuilder::default() - .session_length(3) - .sessions_per_era(3) - .nominate(false) - .build(), - || { - let delay = 0; - let session_reward = 10; - - // This is set by the test config builder. - assert_eq!(Staking::current_session_reward(), session_reward); - - // check the balance of a validator accounts. - assert_eq!(Balances::total_balance(&10), 1); - - // Set payee to controller - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); - - let mut block = 3; - // Block 3 => Session 1 => Era 0 - System::set_block_number(block); - Timestamp::set_timestamp(block*5); // on time. - Session::check_rotate_session(System::block_number()); - assert_eq!(Staking::current_era(), 0); - assert_eq!(Session::current_index(), 1); - - // session triggered: the reward value stashed should be 10 -- defined in ExtBuilder genesis. - assert_eq!(Staking::current_session_reward(), session_reward); - assert_eq!(Staking::current_era_reward(), session_reward); - - block = 6; // Block 6 => Session 2 => Era 0 - System::set_block_number(block); - Timestamp::set_timestamp(block*5 + delay); // a little late. - Session::check_rotate_session(System::block_number()); - assert_eq!(Staking::current_era(), 0); - assert_eq!(Session::current_index(), 2); - - assert_eq!(Staking::current_session_reward(), session_reward); - assert_eq!(Staking::current_era_reward(), 2*session_reward - delay); - - block = 9; // Block 9 => Session 3 => Era 1 - System::set_block_number(block); - Timestamp::set_timestamp(block*5); // back to being punktlisch. no delayss - Session::check_rotate_session(System::block_number()); - assert_eq!(Staking::current_era(), 1); - assert_eq!(Session::current_index(), 3); - - // 1 + sum of of the session rewards accumulated - let recorded_balance = 1 + 3*session_reward - delay; - assert_eq!(Balances::total_balance(&10), recorded_balance); - - // the reward for next era will be: session_reward * slot_stake - let new_session_reward = Staking::session_reward() * Staking::slot_stake(); - assert_eq!(Staking::current_session_reward(), new_session_reward); - - // fast forward to next era: - block=12;System::set_block_number(block);Timestamp::set_timestamp(block*5);Session::check_rotate_session(System::block_number()); - block=15;System::set_block_number(block);Timestamp::set_timestamp(block*5);Session::check_rotate_session(System::block_number()); - - // intermediate test. - assert_eq!(Staking::current_era_reward(), 2*new_session_reward); - - // new era is triggered here. - block=18;System::set_block_number(block);Timestamp::set_timestamp(block*5);Session::check_rotate_session(System::block_number()); - - // pay time - assert_eq!(Balances::total_balance(&10), 3*new_session_reward + recorded_balance); - }); + // should check that: + // The value of current_session_reward is set at the end of each era, based on + // slot_stake and session_reward. Check and verify this. + with_externalities( + &mut ExtBuilder::default() + .session_length(3) + .sessions_per_era(3) + .nominate(false) + .build(), + || { + let delay = 0; + let session_reward = 10; + + // This is set by the test config builder. + assert_eq!(Staking::current_session_reward(), session_reward); + + // check the balance of a validator accounts. + assert_eq!(Balances::total_balance(&10), 1); + + // Set payee to controller + assert_ok!(Staking::set_payee( + Origin::signed(10), + RewardDestination::Controller + )); + + let mut block = 3; + // Block 3 => Session 1 => Era 0 + System::set_block_number(block); + Timestamp::set_timestamp(block * 5); // on time. + Session::check_rotate_session(System::block_number()); + assert_eq!(Staking::current_era(), 0); + assert_eq!(Session::current_index(), 1); + + // session triggered: the reward value stashed should be 10 -- defined in ExtBuilder genesis. + assert_eq!(Staking::current_session_reward(), session_reward); + assert_eq!(Staking::current_era_reward(), session_reward); + + block = 6; // Block 6 => Session 2 => Era 0 + System::set_block_number(block); + Timestamp::set_timestamp(block * 5 + delay); // a little late. + Session::check_rotate_session(System::block_number()); + assert_eq!(Staking::current_era(), 0); + assert_eq!(Session::current_index(), 2); + + assert_eq!(Staking::current_session_reward(), session_reward); + assert_eq!(Staking::current_era_reward(), 2 * session_reward - delay); + + block = 9; // Block 9 => Session 3 => Era 1 + System::set_block_number(block); + Timestamp::set_timestamp(block * 5); // back to being punktlisch. no delayss + Session::check_rotate_session(System::block_number()); + assert_eq!(Staking::current_era(), 1); + assert_eq!(Session::current_index(), 3); + + // 1 + sum of of the session rewards accumulated + let recorded_balance = 1 + 3 * session_reward - delay; + assert_eq!(Balances::total_balance(&10), recorded_balance); + + // the reward for next era will be: session_reward * slot_stake + let new_session_reward = Staking::session_reward() * Staking::slot_stake(); + assert_eq!(Staking::current_session_reward(), new_session_reward); + + // fast forward to next era: + block = 12; + System::set_block_number(block); + Timestamp::set_timestamp(block * 5); + Session::check_rotate_session(System::block_number()); + block = 15; + System::set_block_number(block); + Timestamp::set_timestamp(block * 5); + Session::check_rotate_session(System::block_number()); + + // intermediate test. + assert_eq!(Staking::current_era_reward(), 2 * new_session_reward); + + // new era is triggered here. + block = 18; + System::set_block_number(block); + Timestamp::set_timestamp(block * 5); + Session::check_rotate_session(System::block_number()); + + // pay time + assert_eq!( + Balances::total_balance(&10), + 3 * new_session_reward + recorded_balance + ); + }, + ); } #[test] fn staking_should_work() { - // should test: - // * new validators can be added to the default set - // * new ones will be chosen per era - // * either one can unlock the stash and back-down from being a validator via `chill`ing. - with_externalities(&mut ExtBuilder::default() - .sessions_per_era(3) - .nominate(false) - .fare(false) // to give 20 more staked value - .build(), - || { - // remember + compare this along with the test. - assert_eq_uvec!(Session::validators(), vec![20, 10]); - - assert_ok!(Staking::set_bonding_duration(2)); - assert_eq!(Staking::bonding_duration(), 2); - - // put some money in account that we'll use. - for i in 1..5 { let _ = Balances::make_free_balance_be(&i, 2000); } - - // --- Block 1: - System::set_block_number(1); - Session::check_rotate_session(System::block_number()); - assert_eq!(Staking::current_era(), 0); - - // add a new candidate for being a validator. account 3 controlled by 4. - assert_ok!(Staking::bond(Origin::signed(3), 4, 1500, RewardDestination::Controller)); - assert_ok!(Staking::validate(Origin::signed(4), ValidatorPrefs::default())); - - // No effects will be seen so far. - assert_eq_uvec!(Session::validators(), vec![20, 10]); - - // --- Block 2: - System::set_block_number(2); - Session::check_rotate_session(System::block_number()); - assert_eq!(Staking::current_era(), 0); - - // No effects will be seen so far. Era has not been yet triggered. - assert_eq_uvec!(Session::validators(), vec![20, 10]); - - - // --- Block 3: the validators will now change. - System::set_block_number(3); - Session::check_rotate_session(System::block_number()); - - // 2 only voted for 4 and 20 - assert_eq!(Session::validators().len(), 2); - assert_eq_uvec!(Session::validators(), vec![20, 4]); - assert_eq!(Staking::current_era(), 1); - - - // --- Block 4: Unstake 4 as a validator, freeing up the balance stashed in 3 - System::set_block_number(4); - Session::check_rotate_session(System::block_number()); - - // 4 will chill - Staking::chill(Origin::signed(4)).unwrap(); - - // nothing should be changed so far. - assert_eq_uvec!(Session::validators(), vec![20, 4]); - assert_eq!(Staking::current_era(), 1); - - - // --- Block 5: nothing. 4 is still there. - System::set_block_number(5); - Session::check_rotate_session(System::block_number()); - assert_eq_uvec!(Session::validators(), vec![20, 4]); - assert_eq!(Staking::current_era(), 1); - - - // --- Block 6: 4 will not be a validator. - System::set_block_number(6); - Session::check_rotate_session(System::block_number()); - assert_eq!(Staking::current_era(), 2); - assert_eq!(Session::validators().contains(&4), false); - assert_eq_uvec!(Session::validators(), vec![20, 10]); - - // Note: the stashed value of 4 is still lock - assert_eq!(Staking::ledger(&4), Some(StakingLedger { stash: 3, total: 1500, active: 1500, unlocking: vec![] })); - // e.g. it cannot spend more than 500 that it has free from the total 2000 - assert_noop!(Balances::reserve(&3, 501), "account liquidity restrictions prevent withdrawal"); - assert_ok!(Balances::reserve(&3, 409)); - }); + // should test: + // * new validators can be added to the default set + // * new ones will be chosen per era + // * either one can unlock the stash and back-down from being a validator via `chill`ing. + with_externalities( + &mut ExtBuilder::default() + .sessions_per_era(3) + .nominate(false) + .fare(false) // to give 20 more staked value + .build(), + || { + // remember + compare this along with the test. + assert_eq_uvec!(Session::validators(), vec![20, 10]); + + assert_ok!(Staking::set_bonding_duration(2)); + assert_eq!(Staking::bonding_duration(), 2); + + // put some money in account that we'll use. + for i in 1..5 { + let _ = Balances::make_free_balance_be(&i, 2000); + } + + // --- Block 1: + System::set_block_number(1); + Session::check_rotate_session(System::block_number()); + assert_eq!(Staking::current_era(), 0); + + // add a new candidate for being a validator. account 3 controlled by 4. + assert_ok!(Staking::bond( + Origin::signed(3), + 4, + 1500, + RewardDestination::Controller + )); + assert_ok!(Staking::validate( + Origin::signed(4), + ValidatorPrefs::default() + )); + + // No effects will be seen so far. + assert_eq_uvec!(Session::validators(), vec![20, 10]); + + // --- Block 2: + System::set_block_number(2); + Session::check_rotate_session(System::block_number()); + assert_eq!(Staking::current_era(), 0); + + // No effects will be seen so far. Era has not been yet triggered. + assert_eq_uvec!(Session::validators(), vec![20, 10]); + + // --- Block 3: the validators will now change. + System::set_block_number(3); + Session::check_rotate_session(System::block_number()); + + // 2 only voted for 4 and 20 + assert_eq!(Session::validators().len(), 2); + assert_eq_uvec!(Session::validators(), vec![20, 4]); + assert_eq!(Staking::current_era(), 1); + + // --- Block 4: Unstake 4 as a validator, freeing up the balance stashed in 3 + System::set_block_number(4); + Session::check_rotate_session(System::block_number()); + + // 4 will chill + Staking::chill(Origin::signed(4)).unwrap(); + + // nothing should be changed so far. + assert_eq_uvec!(Session::validators(), vec![20, 4]); + assert_eq!(Staking::current_era(), 1); + + // --- Block 5: nothing. 4 is still there. + System::set_block_number(5); + Session::check_rotate_session(System::block_number()); + assert_eq_uvec!(Session::validators(), vec![20, 4]); + assert_eq!(Staking::current_era(), 1); + + // --- Block 6: 4 will not be a validator. + System::set_block_number(6); + Session::check_rotate_session(System::block_number()); + assert_eq!(Staking::current_era(), 2); + assert_eq!(Session::validators().contains(&4), false); + assert_eq_uvec!(Session::validators(), vec![20, 10]); + + // Note: the stashed value of 4 is still lock + assert_eq!( + Staking::ledger(&4), + Some(StakingLedger { + stash: 3, + total: 1500, + active: 1500, + unlocking: vec![] + }) + ); + // e.g. it cannot spend more than 500 that it has free from the total 2000 + assert_noop!( + Balances::reserve(&3, 501), + "account liquidity restrictions prevent withdrawal" + ); + assert_ok!(Balances::reserve(&3, 409)); + }, + ); } #[test] fn less_than_needed_candidates_works() { - // Test the situation where the number of validators are less than `ValidatorCount` but more than - // The expected behavior is to choose all the candidates that have some vote. - with_externalities(&mut ExtBuilder::default() - .minimum_validator_count(1) - .validator_count(3) - .nominate(false) - .build(), - || { - assert_eq!(Staking::era_length(), 1); - assert_eq!(Staking::validator_count(), 3); - assert_eq!(Staking::minimum_validator_count(), 1); - - // initial validators - assert_eq_uvec!(Session::validators(), vec![20, 10]); - - // 10 and 20 are now valid candidates. - // trigger era - System::set_block_number(1); - Session::check_rotate_session(System::block_number()); - assert_eq!(Staking::current_era(), 1); - - // both validators will be chosen again. NO election algorithm is even executed. - assert_eq_uvec!(Session::validators(), vec![20, 10]); - - // But the exposure is updated in a simple way. No external votes exists. This is purely self-vote. - assert_eq!(Staking::stakers(10).others.iter().map(|e| e.who).collect::>>(), vec![]); - assert_eq!(Staking::stakers(20).others.iter().map(|e| e.who).collect::>>(), vec![]); - }); + // Test the situation where the number of validators are less than `ValidatorCount` but more than + // The expected behavior is to choose all the candidates that have some vote. + with_externalities( + &mut ExtBuilder::default() + .minimum_validator_count(1) + .validator_count(3) + .nominate(false) + .build(), + || { + assert_eq!(Staking::era_length(), 1); + assert_eq!(Staking::validator_count(), 3); + assert_eq!(Staking::minimum_validator_count(), 1); + + // initial validators + assert_eq_uvec!(Session::validators(), vec![20, 10]); + + // 10 and 20 are now valid candidates. + // trigger era + System::set_block_number(1); + Session::check_rotate_session(System::block_number()); + assert_eq!(Staking::current_era(), 1); + + // both validators will be chosen again. NO election algorithm is even executed. + assert_eq_uvec!(Session::validators(), vec![20, 10]); + + // But the exposure is updated in a simple way. No external votes exists. This is purely self-vote. + assert_eq!( + Staking::stakers(10) + .others + .iter() + .map(|e| e.who) + .collect::>>(), + vec![] + ); + assert_eq!( + Staking::stakers(20) + .others + .iter() + .map(|e| e.who) + .collect::>>(), + vec![] + ); + }, + ); } #[test] fn no_candidate_emergency_condition() { - // Test the situation where the number of validators are less than `ValidatorCount` and less than - // The expected behavior is to choose all candidates from the previous era. - with_externalities(&mut ExtBuilder::default() - .minimum_validator_count(10) - .validator_count(15) - .validator_pool(true) - .nominate(false) - .build(), - || { - assert_eq!(Staking::era_length(), 1); - assert_eq!(Staking::validator_count(), 15); - - // initial validators - assert_eq_uvec!(Session::validators(), vec![10, 20, 30, 40]); - - // trigger era - System::set_block_number(1); - Session::check_rotate_session(System::block_number()); - assert_eq!(Staking::current_era(), 1); - - // No one nominates => no one has a proper vote => no change - assert_eq_uvec!(Session::validators(), vec![10, 20, 30, 40]); - }); + // Test the situation where the number of validators are less than `ValidatorCount` and less than + // The expected behavior is to choose all candidates from the previous era. + with_externalities( + &mut ExtBuilder::default() + .minimum_validator_count(10) + .validator_count(15) + .validator_pool(true) + .nominate(false) + .build(), + || { + assert_eq!(Staking::era_length(), 1); + assert_eq!(Staking::validator_count(), 15); + + // initial validators + assert_eq_uvec!(Session::validators(), vec![10, 20, 30, 40]); + + // trigger era + System::set_block_number(1); + Session::check_rotate_session(System::block_number()); + assert_eq!(Staking::current_era(), 1); + + // No one nominates => no one has a proper vote => no change + assert_eq_uvec!(Session::validators(), vec![10, 20, 30, 40]); + }, + ); } #[test] fn nominating_and_rewards_should_work() { - // PHRAGMEN OUTPUT: running this test with the reference impl gives: - // - // Votes [('10', 1000, ['10']), ('20', 1000, ['20']), ('30', 1000, ['30']), ('40', 1000, ['40']), ('2', 1000, ['10', '20', '30']), ('4', 1000, ['10', '20', '40'])] - // Sequential Phragmén gives - // 10 is elected with stake 2200.0 and score 0.0003333333333333333 - // 20 is elected with stake 1800.0 and score 0.0005555555555555556 - - // 10 has load 0.0003333333333333333 and supported - // 10 with stake 1000.0 - // 20 has load 0.0005555555555555556 and supported - // 20 with stake 1000.0 - // 30 has load 0 and supported - // 30 with stake 0 - // 40 has load 0 and supported - // 40 with stake 0 - // 2 has load 0.0005555555555555556 and supported - // 10 with stake 600.0 20 with stake 400.0 30 with stake 0.0 - // 4 has load 0.0005555555555555556 and supported - // 10 with stake 600.0 20 with stake 400.0 40 with stake 0.0 - - // Sequential Phragmén with post processing gives - // 10 is elected with stake 2000.0 and score 0.0003333333333333333 - // 20 is elected with stake 2000.0 and score 0.0005555555555555556 - - // 10 has load 0.0003333333333333333 and supported - // 10 with stake 1000.0 - // 20 has load 0.0005555555555555556 and supported - // 20 with stake 1000.0 - // 30 has load 0 and supported - // 30 with stake 0 - // 40 has load 0 and supported - // 40 with stake 0 - // 2 has load 0.0005555555555555556 and supported - // 10 with stake 400.0 20 with stake 600.0 30 with stake 0 - // 4 has load 0.0005555555555555556 and supported - // 10 with stake 600.0 20 with stake 400.0 40 with stake 0.0 - - with_externalities(&mut ExtBuilder::default() - .nominate(false) - .validator_pool(true) - .build(), - || { - // initial validators -- everyone is actually even. - assert_eq_uvec!(Session::validators(), vec![40, 30]); - - // Set payee to controller - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); - assert_ok!(Staking::set_payee(Origin::signed(20), RewardDestination::Controller)); - assert_ok!(Staking::set_payee(Origin::signed(30), RewardDestination::Controller)); - assert_ok!(Staking::set_payee(Origin::signed(40), RewardDestination::Controller)); - - // default reward for the first session. - let session_reward = 10; - assert_eq!(Staking::current_session_reward(), session_reward); - - // give the man some money - let initial_balance = 1000; - for i in [1, 2, 3, 4, 5, 10, 11, 20, 21].iter() { - let _ = Balances::make_free_balance_be(i, initial_balance); - } - - // bond two account pairs and state interest in nomination. - // 2 will nominate for 10, 20, 30 - assert_ok!(Staking::bond(Origin::signed(1), 2, 1000, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 21, 31])); - // 4 will nominate for 10, 20, 40 - assert_ok!(Staking::bond(Origin::signed(3), 4, 1000, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(4), vec![11, 21, 41])); - - System::set_block_number(1); - Session::check_rotate_session(System::block_number()); - assert_eq!(Staking::current_era(), 1); - - // 10 and 20 have more votes, they will be chosen by phragmen. - assert_eq_uvec!(Session::validators(), vec![20, 10]); - - // OLD validators must have already received some rewards. - assert_eq!(Balances::total_balance(&40), 1 + session_reward); - assert_eq!(Balances::total_balance(&30), 1 + session_reward); - - // ------ check the staked value of all parties. - - // total expo of 10, with 1200 coming from nominators (externals), according to phragmen. - assert_eq!(Staking::stakers(11).own, 1000); - assert_eq!(Staking::stakers(11).total, 1000 + 798); - // 2 and 4 supported 10, each with stake 600, according to phragmen. - assert_eq!(Staking::stakers(11).others.iter().map(|e| e.value).collect::>>(), vec![399, 399]); - assert_eq!(Staking::stakers(11).others.iter().map(|e| e.who).collect::>>(), vec![3, 1]); - // total expo of 20, with 500 coming from nominators (externals), according to phragmen. - assert_eq!(Staking::stakers(21).own, 1000); - assert_eq!(Staking::stakers(21).total, 1000 + 1200); - // 2 and 4 supported 20, each with stake 250, according to phragmen. - assert_eq!(Staking::stakers(21).others.iter().map(|e| e.value).collect::>>(), vec![600, 600]); - assert_eq!(Staking::stakers(21).others.iter().map(|e| e.who).collect::>>(), vec![3, 1]); - - // They are not chosen anymore - assert_eq!(Staking::stakers(31).total, 0); - assert_eq!(Staking::stakers(41).total, 0); - - - System::set_block_number(2); - Session::check_rotate_session(System::block_number()); - // next session reward. - let new_session_reward = Staking::session_reward() * Staking::slot_stake(); - // nothing else will happen, era ends and rewards are paid again, - // it is expected that nominators will also be paid. See below - - // Nominator 2: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 20]'s reward. ==> 2/9 + 3/11 - assert_eq!(Balances::total_balance(&2), initial_balance + (2*new_session_reward/9 + 3*new_session_reward/11)); - // Nominator 4: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 20]'s reward. ==> 2/9 + 3/11 - assert_eq!(Balances::total_balance(&4), initial_balance + (2*new_session_reward/9 + 3*new_session_reward/11)); - - // 10 got 800 / 1800 external stake => 8/18 =? 4/9 => Validator's share = 5/9 - assert_eq!(Balances::total_balance(&10), initial_balance + 5*new_session_reward/9 + 2) ; - // 10 got 1200 / 2200 external stake => 12/22 =? 6/11 => Validator's share = 5/11 - assert_eq!(Balances::total_balance(&20), initial_balance + 5*new_session_reward/11); - }); + // PHRAGMEN OUTPUT: running this test with the reference impl gives: + // + // Votes [('10', 1000, ['10']), ('20', 1000, ['20']), ('30', 1000, ['30']), ('40', 1000, ['40']), ('2', 1000, ['10', '20', '30']), ('4', 1000, ['10', '20', '40'])] + // Sequential Phragmén gives + // 10 is elected with stake 2200.0 and score 0.0003333333333333333 + // 20 is elected with stake 1800.0 and score 0.0005555555555555556 + + // 10 has load 0.0003333333333333333 and supported + // 10 with stake 1000.0 + // 20 has load 0.0005555555555555556 and supported + // 20 with stake 1000.0 + // 30 has load 0 and supported + // 30 with stake 0 + // 40 has load 0 and supported + // 40 with stake 0 + // 2 has load 0.0005555555555555556 and supported + // 10 with stake 600.0 20 with stake 400.0 30 with stake 0.0 + // 4 has load 0.0005555555555555556 and supported + // 10 with stake 600.0 20 with stake 400.0 40 with stake 0.0 + + // Sequential Phragmén with post processing gives + // 10 is elected with stake 2000.0 and score 0.0003333333333333333 + // 20 is elected with stake 2000.0 and score 0.0005555555555555556 + + // 10 has load 0.0003333333333333333 and supported + // 10 with stake 1000.0 + // 20 has load 0.0005555555555555556 and supported + // 20 with stake 1000.0 + // 30 has load 0 and supported + // 30 with stake 0 + // 40 has load 0 and supported + // 40 with stake 0 + // 2 has load 0.0005555555555555556 and supported + // 10 with stake 400.0 20 with stake 600.0 30 with stake 0 + // 4 has load 0.0005555555555555556 and supported + // 10 with stake 600.0 20 with stake 400.0 40 with stake 0.0 + + with_externalities( + &mut ExtBuilder::default() + .nominate(false) + .validator_pool(true) + .build(), + || { + // initial validators -- everyone is actually even. + assert_eq_uvec!(Session::validators(), vec![40, 30]); + + // Set payee to controller + assert_ok!(Staking::set_payee( + Origin::signed(10), + RewardDestination::Controller + )); + assert_ok!(Staking::set_payee( + Origin::signed(20), + RewardDestination::Controller + )); + assert_ok!(Staking::set_payee( + Origin::signed(30), + RewardDestination::Controller + )); + assert_ok!(Staking::set_payee( + Origin::signed(40), + RewardDestination::Controller + )); + + // default reward for the first session. + let session_reward = 10; + assert_eq!(Staking::current_session_reward(), session_reward); + + // give the man some money + let initial_balance = 1000; + for i in [1, 2, 3, 4, 5, 10, 11, 20, 21].iter() { + let _ = Balances::make_free_balance_be(i, initial_balance); + } + + // bond two account pairs and state interest in nomination. + // 2 will nominate for 10, 20, 30 + assert_ok!(Staking::bond( + Origin::signed(1), + 2, + 1000, + RewardDestination::Controller + )); + assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 21, 31])); + // 4 will nominate for 10, 20, 40 + assert_ok!(Staking::bond( + Origin::signed(3), + 4, + 1000, + RewardDestination::Controller + )); + assert_ok!(Staking::nominate(Origin::signed(4), vec![11, 21, 41])); + + System::set_block_number(1); + Session::check_rotate_session(System::block_number()); + assert_eq!(Staking::current_era(), 1); + + // 10 and 20 have more votes, they will be chosen by phragmen. + assert_eq_uvec!(Session::validators(), vec![20, 10]); + + // OLD validators must have already received some rewards. + assert_eq!(Balances::total_balance(&40), 1 + session_reward); + assert_eq!(Balances::total_balance(&30), 1 + session_reward); + + // ------ check the staked value of all parties. + + // total expo of 10, with 1200 coming from nominators (externals), according to phragmen. + assert_eq!(Staking::stakers(11).own, 1000); + assert_eq!(Staking::stakers(11).total, 1000 + 798); + // 2 and 4 supported 10, each with stake 600, according to phragmen. + assert_eq!( + Staking::stakers(11) + .others + .iter() + .map(|e| e.value) + .collect::>>(), + vec![399, 399] + ); + assert_eq!( + Staking::stakers(11) + .others + .iter() + .map(|e| e.who) + .collect::>>(), + vec![3, 1] + ); + // total expo of 20, with 500 coming from nominators (externals), according to phragmen. + assert_eq!(Staking::stakers(21).own, 1000); + assert_eq!(Staking::stakers(21).total, 1000 + 1200); + // 2 and 4 supported 20, each with stake 250, according to phragmen. + assert_eq!( + Staking::stakers(21) + .others + .iter() + .map(|e| e.value) + .collect::>>(), + vec![600, 600] + ); + assert_eq!( + Staking::stakers(21) + .others + .iter() + .map(|e| e.who) + .collect::>>(), + vec![3, 1] + ); + + // They are not chosen anymore + assert_eq!(Staking::stakers(31).total, 0); + assert_eq!(Staking::stakers(41).total, 0); + + System::set_block_number(2); + Session::check_rotate_session(System::block_number()); + // next session reward. + let new_session_reward = Staking::session_reward() * Staking::slot_stake(); + // nothing else will happen, era ends and rewards are paid again, + // it is expected that nominators will also be paid. See below + + // Nominator 2: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 20]'s reward. ==> 2/9 + 3/11 + assert_eq!( + Balances::total_balance(&2), + initial_balance + (2 * new_session_reward / 9 + 3 * new_session_reward / 11) + ); + // Nominator 4: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 20]'s reward. ==> 2/9 + 3/11 + assert_eq!( + Balances::total_balance(&4), + initial_balance + (2 * new_session_reward / 9 + 3 * new_session_reward / 11) + ); + + // 10 got 800 / 1800 external stake => 8/18 =? 4/9 => Validator's share = 5/9 + assert_eq!( + Balances::total_balance(&10), + initial_balance + 5 * new_session_reward / 9 + 2 + ); + // 10 got 1200 / 2200 external stake => 12/22 =? 6/11 => Validator's share = 5/11 + assert_eq!( + Balances::total_balance(&20), + initial_balance + 5 * new_session_reward / 11 + ); + }, + ); } #[test] fn nominators_also_get_slashed() { - // A nominator should be slashed if the validator they nominated is slashed - with_externalities(&mut ExtBuilder::default().nominate(false).build(), || { - assert_eq!(Staking::era_length(), 1); - assert_eq!(Staking::validator_count(), 2); - // slash happens immediately. - assert_eq!(Staking::offline_slash_grace(), 0); - // Account 10 has not been reported offline - assert_eq!(Staking::slash_count(&10), 0); - // initial validators - assert_eq_uvec!(Session::validators(), vec![20, 10]); - >::put(Perbill::from_percent(12)); - - // Set payee to controller - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); - - // give the man some money. - let initial_balance = 1000; - for i in [1, 2, 3, 10].iter() { - let _ = Balances::make_free_balance_be(i, initial_balance); - } - - // 2 will nominate for 10 - let nominator_stake = 500; - assert_ok!(Staking::bond(Origin::signed(1), 2, nominator_stake, RewardDestination::default())); - assert_ok!(Staking::nominate(Origin::signed(2), vec![20, 10])); - - // new era, pay rewards, - System::set_block_number(1); - Session::check_rotate_session(System::block_number()); - - // Nominator stash didn't collect any. - assert_eq!(Balances::total_balance(&2), initial_balance); - - // 10 goes offline - Staking::on_offline_validator(10, 4); - let expo = Staking::stakers(10); - let slash_value = Staking::offline_slash() * expo.total * 2_u64.pow(3); - let total_slash = expo.total.min(slash_value); - let validator_slash = expo.own.min(total_slash); - let nominator_slash = nominator_stake.min(total_slash - validator_slash); - - // initial + first era reward + slash - assert_eq!(Balances::total_balance(&10), initial_balance + 10 - validator_slash); - assert_eq!(Balances::total_balance(&2), initial_balance - nominator_slash); - // Because slashing happened. - assert!(Staking::forcing_new_era().is_some()); - }); + // A nominator should be slashed if the validator they nominated is slashed + with_externalities(&mut ExtBuilder::default().nominate(false).build(), || { + assert_eq!(Staking::era_length(), 1); + assert_eq!(Staking::validator_count(), 2); + // slash happens immediately. + assert_eq!(Staking::offline_slash_grace(), 0); + // Account 10 has not been reported offline + assert_eq!(Staking::slash_count(&10), 0); + // initial validators + assert_eq_uvec!(Session::validators(), vec![20, 10]); + >::put(Perbill::from_percent(12)); + + // Set payee to controller + assert_ok!(Staking::set_payee( + Origin::signed(10), + RewardDestination::Controller + )); + + // give the man some money. + let initial_balance = 1000; + for i in [1, 2, 3, 10].iter() { + let _ = Balances::make_free_balance_be(i, initial_balance); + } + + // 2 will nominate for 10 + let nominator_stake = 500; + assert_ok!(Staking::bond( + Origin::signed(1), + 2, + nominator_stake, + RewardDestination::default() + )); + assert_ok!(Staking::nominate(Origin::signed(2), vec![20, 10])); + + // new era, pay rewards, + System::set_block_number(1); + Session::check_rotate_session(System::block_number()); + + // Nominator stash didn't collect any. + assert_eq!(Balances::total_balance(&2), initial_balance); + + // 10 goes offline + Staking::on_offline_validator(10, 4); + let expo = Staking::stakers(10); + let slash_value = Staking::offline_slash() * expo.total * 2_u64.pow(3); + let total_slash = expo.total.min(slash_value); + let validator_slash = expo.own.min(total_slash); + let nominator_slash = nominator_stake.min(total_slash - validator_slash); + + // initial + first era reward + slash + assert_eq!( + Balances::total_balance(&10), + initial_balance + 10 - validator_slash + ); + assert_eq!( + Balances::total_balance(&2), + initial_balance - nominator_slash + ); + // Because slashing happened. + assert!(Staking::forcing_new_era().is_some()); + }); } #[test] fn double_staking_should_fail() { - // should test (in the same order): - // * an account already bonded as stash cannot be be stashed again. - // * an account already bonded as stash cannot nominate. - // * an account already bonded as controller can nominate. - with_externalities(&mut ExtBuilder::default() - .sessions_per_era(2) - .build(), - || { - let arbitrary_value = 5; - // 2 = controller, 1 stashed => ok - assert_ok!(Staking::bond(Origin::signed(1), 2, arbitrary_value, RewardDestination::default())); - // 4 = not used so far, 1 stashed => not allowed. - assert_noop!(Staking::bond(Origin::signed(1), 4, arbitrary_value, RewardDestination::default()), "stash already bonded"); - // 1 = stashed => attempting to nominate should fail. - assert_noop!(Staking::nominate(Origin::signed(1), vec![1]), "not a controller"); - // 2 = controller => nominating should work. - assert_ok!(Staking::nominate(Origin::signed(2), vec![1])); - }); + // should test (in the same order): + // * an account already bonded as stash cannot be be stashed again. + // * an account already bonded as stash cannot nominate. + // * an account already bonded as controller can nominate. + with_externalities( + &mut ExtBuilder::default().sessions_per_era(2).build(), + || { + let arbitrary_value = 5; + // 2 = controller, 1 stashed => ok + assert_ok!(Staking::bond( + Origin::signed(1), + 2, + arbitrary_value, + RewardDestination::default() + )); + // 4 = not used so far, 1 stashed => not allowed. + assert_noop!( + Staking::bond( + Origin::signed(1), + 4, + arbitrary_value, + RewardDestination::default() + ), + "stash already bonded" + ); + // 1 = stashed => attempting to nominate should fail. + assert_noop!( + Staking::nominate(Origin::signed(1), vec![1]), + "not a controller" + ); + // 2 = controller => nominating should work. + assert_ok!(Staking::nominate(Origin::signed(2), vec![1])); + }, + ); } #[test] fn double_controlling_should_fail() { - // should test (in the same order): - // * an account already bonded as controller CANNOT be reused as the controller of another account. - with_externalities(&mut ExtBuilder::default() - .sessions_per_era(2) - .build(), - || { - let arbitrary_value = 5; - // 2 = controller, 1 stashed => ok - assert_ok!(Staking::bond(Origin::signed(1), 2, arbitrary_value, RewardDestination::default())); - // 2 = controller, 3 stashed (Note that 2 is reused.) => no-op - assert_noop!(Staking::bond(Origin::signed(3), 2, arbitrary_value, RewardDestination::default()), "controller already paired"); - }); + // should test (in the same order): + // * an account already bonded as controller CANNOT be reused as the controller of another account. + with_externalities( + &mut ExtBuilder::default().sessions_per_era(2).build(), + || { + let arbitrary_value = 5; + // 2 = controller, 1 stashed => ok + assert_ok!(Staking::bond( + Origin::signed(1), + 2, + arbitrary_value, + RewardDestination::default() + )); + // 2 = controller, 3 stashed (Note that 2 is reused.) => no-op + assert_noop!( + Staking::bond( + Origin::signed(3), + 2, + arbitrary_value, + RewardDestination::default() + ), + "controller already paired" + ); + }, + ); } #[test] fn session_and_eras_work() { - with_externalities(&mut ExtBuilder::default() - .sessions_per_era(2) - .build(), - || { - assert_eq!(Staking::era_length(), 2); - assert_eq!(Staking::sessions_per_era(), 2); - assert_eq!(Staking::last_era_length_change(), 0); - assert_eq!(Staking::current_era(), 0); - assert_eq!(Session::current_index(), 0); - - // Block 1: No change. - System::set_block_number(1); - Session::check_rotate_session(System::block_number()); - assert_eq!(Session::current_index(), 1); - assert_eq!(Staking::sessions_per_era(), 2); - assert_eq!(Staking::last_era_length_change(), 0); - assert_eq!(Staking::current_era(), 0); - - // Block 2: Simple era change. - System::set_block_number(2); - Session::check_rotate_session(System::block_number()); - assert_eq!(Session::current_index(), 2); - assert_eq!(Staking::sessions_per_era(), 2); - assert_eq!(Staking::last_era_length_change(), 0); - assert_eq!(Staking::current_era(), 1); - - // Block 3: Schedule an era length change; no visible changes. - System::set_block_number(3); - assert_ok!(Staking::set_sessions_per_era(3)); - Session::check_rotate_session(System::block_number()); - assert_eq!(Session::current_index(), 3); - assert_eq!(Staking::sessions_per_era(), 2); - assert_eq!(Staking::last_era_length_change(), 0); - assert_eq!(Staking::current_era(), 1); - - // Block 4: Era change kicks in. - System::set_block_number(4); - Session::check_rotate_session(System::block_number()); - assert_eq!(Session::current_index(), 4); - assert_eq!(Staking::sessions_per_era(), 3); - assert_eq!(Staking::last_era_length_change(), 4); - assert_eq!(Staking::current_era(), 2); - - // Block 5: No change. - System::set_block_number(5); - Session::check_rotate_session(System::block_number()); - assert_eq!(Session::current_index(), 5); - assert_eq!(Staking::sessions_per_era(), 3); - assert_eq!(Staking::last_era_length_change(), 4); - assert_eq!(Staking::current_era(), 2); - - // Block 6: No change. - System::set_block_number(6); - Session::check_rotate_session(System::block_number()); - assert_eq!(Session::current_index(), 6); - assert_eq!(Staking::sessions_per_era(), 3); - assert_eq!(Staking::last_era_length_change(), 4); - assert_eq!(Staking::current_era(), 2); - - // Block 7: Era increment. - System::set_block_number(7); - Session::check_rotate_session(System::block_number()); - assert_eq!(Session::current_index(), 7); - assert_eq!(Staking::sessions_per_era(), 3); - assert_eq!(Staking::last_era_length_change(), 4); - assert_eq!(Staking::current_era(), 3); - }); + with_externalities( + &mut ExtBuilder::default().sessions_per_era(2).build(), + || { + assert_eq!(Staking::era_length(), 2); + assert_eq!(Staking::sessions_per_era(), 2); + assert_eq!(Staking::last_era_length_change(), 0); + assert_eq!(Staking::current_era(), 0); + assert_eq!(Session::current_index(), 0); + + // Block 1: No change. + System::set_block_number(1); + Session::check_rotate_session(System::block_number()); + assert_eq!(Session::current_index(), 1); + assert_eq!(Staking::sessions_per_era(), 2); + assert_eq!(Staking::last_era_length_change(), 0); + assert_eq!(Staking::current_era(), 0); + + // Block 2: Simple era change. + System::set_block_number(2); + Session::check_rotate_session(System::block_number()); + assert_eq!(Session::current_index(), 2); + assert_eq!(Staking::sessions_per_era(), 2); + assert_eq!(Staking::last_era_length_change(), 0); + assert_eq!(Staking::current_era(), 1); + + // Block 3: Schedule an era length change; no visible changes. + System::set_block_number(3); + assert_ok!(Staking::set_sessions_per_era(3)); + Session::check_rotate_session(System::block_number()); + assert_eq!(Session::current_index(), 3); + assert_eq!(Staking::sessions_per_era(), 2); + assert_eq!(Staking::last_era_length_change(), 0); + assert_eq!(Staking::current_era(), 1); + + // Block 4: Era change kicks in. + System::set_block_number(4); + Session::check_rotate_session(System::block_number()); + assert_eq!(Session::current_index(), 4); + assert_eq!(Staking::sessions_per_era(), 3); + assert_eq!(Staking::last_era_length_change(), 4); + assert_eq!(Staking::current_era(), 2); + + // Block 5: No change. + System::set_block_number(5); + Session::check_rotate_session(System::block_number()); + assert_eq!(Session::current_index(), 5); + assert_eq!(Staking::sessions_per_era(), 3); + assert_eq!(Staking::last_era_length_change(), 4); + assert_eq!(Staking::current_era(), 2); + + // Block 6: No change. + System::set_block_number(6); + Session::check_rotate_session(System::block_number()); + assert_eq!(Session::current_index(), 6); + assert_eq!(Staking::sessions_per_era(), 3); + assert_eq!(Staking::last_era_length_change(), 4); + assert_eq!(Staking::current_era(), 2); + + // Block 7: Era increment. + System::set_block_number(7); + Session::check_rotate_session(System::block_number()); + assert_eq!(Session::current_index(), 7); + assert_eq!(Staking::sessions_per_era(), 3); + assert_eq!(Staking::last_era_length_change(), 4); + assert_eq!(Staking::current_era(), 3); + }, + ); } #[test] fn cannot_transfer_staked_balance() { - // Tests that a stash account cannot transfer funds - with_externalities(&mut ExtBuilder::default().nominate(false).build(), || { - // Confirm account 11 is stashed - assert_eq!(Staking::bonded(&11), Some(10)); - // Confirm account 11 has some free balance - assert_eq!(Balances::free_balance(&11), 1000); - // Confirm account 11 (via controller 10) is totally staked - assert_eq!(Staking::stakers(&11).total, 1000); - // Confirm account 11 cannot transfer as a result - assert_noop!(Balances::transfer(Origin::signed(11), 20, 1), "account liquidity restrictions prevent withdrawal"); - - // Give account 11 extra free balance - let _ = Balances::make_free_balance_be(&11, 10000); - // Confirm that account 11 can now transfer some balance - assert_ok!(Balances::transfer(Origin::signed(11), 20, 1)); - }); + // Tests that a stash account cannot transfer funds + with_externalities(&mut ExtBuilder::default().nominate(false).build(), || { + // Confirm account 11 is stashed + assert_eq!(Staking::bonded(&11), Some(10)); + // Confirm account 11 has some free balance + assert_eq!(Balances::free_balance(&11), 1000); + // Confirm account 11 (via controller 10) is totally staked + assert_eq!(Staking::stakers(&11).total, 1000); + // Confirm account 11 cannot transfer as a result + assert_noop!( + Balances::transfer(Origin::signed(11), 20, 1), + "account liquidity restrictions prevent withdrawal" + ); + + // Give account 11 extra free balance + let _ = Balances::make_free_balance_be(&11, 10000); + // Confirm that account 11 can now transfer some balance + assert_ok!(Balances::transfer(Origin::signed(11), 20, 1)); + }); } #[test] fn cannot_transfer_staked_balance_2() { - // Tests that a stash account cannot transfer funds - // Same test as above but with 20, and more accurate. - // 21 has 2000 free balance but 1000 at stake - with_externalities(&mut ExtBuilder::default() - .nominate(false) - .fare(true) - .build(), - || { - // Confirm account 21 is stashed - assert_eq!(Staking::bonded(&21), Some(20)); - // Confirm account 21 has some free balance - assert_eq!(Balances::free_balance(&21), 2000); - // Confirm account 21 (via controller 20) is totally staked - assert_eq!(Staking::stakers(&21).total, 1000); - // Confirm account 21 can transfer at most 1000 - assert_noop!(Balances::transfer(Origin::signed(21), 20, 1001), "account liquidity restrictions prevent withdrawal"); - assert_ok!(Balances::transfer(Origin::signed(21), 20, 1000)); - }); + // Tests that a stash account cannot transfer funds + // Same test as above but with 20, and more accurate. + // 21 has 2000 free balance but 1000 at stake + with_externalities( + &mut ExtBuilder::default().nominate(false).fare(true).build(), + || { + // Confirm account 21 is stashed + assert_eq!(Staking::bonded(&21), Some(20)); + // Confirm account 21 has some free balance + assert_eq!(Balances::free_balance(&21), 2000); + // Confirm account 21 (via controller 20) is totally staked + assert_eq!(Staking::stakers(&21).total, 1000); + // Confirm account 21 can transfer at most 1000 + assert_noop!( + Balances::transfer(Origin::signed(21), 20, 1001), + "account liquidity restrictions prevent withdrawal" + ); + assert_ok!(Balances::transfer(Origin::signed(21), 20, 1000)); + }, + ); } #[test] fn cannot_reserve_staked_balance() { - // Checks that a bonded account cannot reserve balance from free balance - with_externalities(&mut ExtBuilder::default().build(), || { - // Confirm account 11 is stashed - assert_eq!(Staking::bonded(&11), Some(10)); - // Confirm account 11 has some free balance - assert_eq!(Balances::free_balance(&11), 1000); - // Confirm account 11 (via controller 10) is totally staked - assert_eq!(Staking::stakers(&11).own, 1000); - // Confirm account 11 cannot transfer as a result - assert_noop!(Balances::reserve(&11, 1), "account liquidity restrictions prevent withdrawal"); - - // Give account 11 extra free balance - let _ = Balances::make_free_balance_be(&11, 10000); - // Confirm account 11 can now reserve balance - assert_ok!(Balances::reserve(&11, 1)); - }); + // Checks that a bonded account cannot reserve balance from free balance + with_externalities(&mut ExtBuilder::default().build(), || { + // Confirm account 11 is stashed + assert_eq!(Staking::bonded(&11), Some(10)); + // Confirm account 11 has some free balance + assert_eq!(Balances::free_balance(&11), 1000); + // Confirm account 11 (via controller 10) is totally staked + assert_eq!(Staking::stakers(&11).own, 1000); + // Confirm account 11 cannot transfer as a result + assert_noop!( + Balances::reserve(&11, 1), + "account liquidity restrictions prevent withdrawal" + ); + + // Give account 11 extra free balance + let _ = Balances::make_free_balance_be(&11, 10000); + // Confirm account 11 can now reserve balance + assert_ok!(Balances::reserve(&11, 1)); + }); } #[test] fn reward_destination_works() { - // Rewards go to the correct destination as determined in Payee - with_externalities(&mut ExtBuilder::default().nominate(false).build(), || { - // Check that account 11 is a validator - assert!(Staking::current_elected().contains(&11)); - // Check the balance of the validator account - assert_eq!(Balances::free_balance(&10), 1); - // Check the balance of the stash account - assert_eq!(Balances::free_balance(&11), 1000); - // Check how much is at stake - assert_eq!(Staking::ledger(&10), Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![] })); - // Check current session reward is 10 - let session_reward0 = Staking::current_session_reward(); // 10 - - // Move forward the system for payment - System::set_block_number(1); - Timestamp::set_timestamp(5); - Session::check_rotate_session(System::block_number()); - - // Check that RewardDestination is Staked (default) - assert_eq!(Staking::payee(&11), RewardDestination::Staked); - // Check that reward went to the stash account of validator - assert_eq!(Balances::free_balance(&11), 1000 + session_reward0); - // Check that amount at stake increased accordingly - assert_eq!(Staking::ledger(&10), Some(StakingLedger { stash: 11, total: 1000 + session_reward0, active: 1000 + session_reward0, unlocking: vec![] })); - // Update current session reward - let session_reward1 = Staking::current_session_reward(); // 1010 (1* slot_stake) - - //Change RewardDestination to Stash - >::insert(&11, RewardDestination::Stash); - - // Move forward the system for payment - System::set_block_number(2); - Timestamp::set_timestamp(10); - Session::check_rotate_session(System::block_number()); - - // Check that RewardDestination is Stash - assert_eq!(Staking::payee(&11), RewardDestination::Stash); - // Check that reward went to the stash account - assert_eq!(Balances::free_balance(&11), 1000 + session_reward0 + session_reward1); - // Record this value - let recorded_stash_balance = 1000 + session_reward0 + session_reward1; - // Check that amount at stake is NOT increased - assert_eq!(Staking::ledger(&10), Some(StakingLedger { stash: 11, total: 1000 + session_reward0, active: 1000 + session_reward0, unlocking: vec![] })); - - // Change RewardDestination to Controller - >::insert(&11, RewardDestination::Controller); - - // Check controller balance - assert_eq!(Balances::free_balance(&10), 1); - - // Move forward the system for payment - System::set_block_number(3); - Timestamp::set_timestamp(15); - Session::check_rotate_session(System::block_number()); - let session_reward2 = Staking::current_session_reward(); // 1010 (1* slot_stake) - - // Check that RewardDestination is Controller - assert_eq!(Staking::payee(&11), RewardDestination::Controller); - // Check that reward went to the controller account - assert_eq!(Balances::free_balance(&10), 1 + session_reward2); - // Check that amount at stake is NOT increased - assert_eq!(Staking::ledger(&10), Some(StakingLedger { stash: 11, total: 1000 + session_reward0, active: 1000 + session_reward0, unlocking: vec![] })); - // Check that amount in staked account is NOT increased. - assert_eq!(Balances::free_balance(&11), recorded_stash_balance); - }); + // Rewards go to the correct destination as determined in Payee + with_externalities(&mut ExtBuilder::default().nominate(false).build(), || { + // Check that account 11 is a validator + assert!(Staking::current_elected().contains(&11)); + // Check the balance of the validator account + assert_eq!(Balances::free_balance(&10), 1); + // Check the balance of the stash account + assert_eq!(Balances::free_balance(&11), 1000); + // Check how much is at stake + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![] + }) + ); + // Check current session reward is 10 + let session_reward0 = Staking::current_session_reward(); // 10 + + // Move forward the system for payment + System::set_block_number(1); + Timestamp::set_timestamp(5); + Session::check_rotate_session(System::block_number()); + + // Check that RewardDestination is Staked (default) + assert_eq!(Staking::payee(&11), RewardDestination::Staked); + // Check that reward went to the stash account of validator + assert_eq!(Balances::free_balance(&11), 1000 + session_reward0); + // Check that amount at stake increased accordingly + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + session_reward0, + active: 1000 + session_reward0, + unlocking: vec![] + }) + ); + // Update current session reward + let session_reward1 = Staking::current_session_reward(); // 1010 (1* slot_stake) + + //Change RewardDestination to Stash + >::insert(&11, RewardDestination::Stash); + + // Move forward the system for payment + System::set_block_number(2); + Timestamp::set_timestamp(10); + Session::check_rotate_session(System::block_number()); + + // Check that RewardDestination is Stash + assert_eq!(Staking::payee(&11), RewardDestination::Stash); + // Check that reward went to the stash account + assert_eq!( + Balances::free_balance(&11), + 1000 + session_reward0 + session_reward1 + ); + // Record this value + let recorded_stash_balance = 1000 + session_reward0 + session_reward1; + // Check that amount at stake is NOT increased + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + session_reward0, + active: 1000 + session_reward0, + unlocking: vec![] + }) + ); + + // Change RewardDestination to Controller + >::insert(&11, RewardDestination::Controller); + + // Check controller balance + assert_eq!(Balances::free_balance(&10), 1); + + // Move forward the system for payment + System::set_block_number(3); + Timestamp::set_timestamp(15); + Session::check_rotate_session(System::block_number()); + let session_reward2 = Staking::current_session_reward(); // 1010 (1* slot_stake) + + // Check that RewardDestination is Controller + assert_eq!(Staking::payee(&11), RewardDestination::Controller); + // Check that reward went to the controller account + assert_eq!(Balances::free_balance(&10), 1 + session_reward2); + // Check that amount at stake is NOT increased + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + session_reward0, + active: 1000 + session_reward0, + unlocking: vec![] + }) + ); + // Check that amount in staked account is NOT increased. + assert_eq!(Balances::free_balance(&11), recorded_stash_balance); + }); } #[test] fn validator_payment_prefs_work() { - // Test that validator preferences are correctly honored - // Note: unstake threshold is being directly tested in slashing tests. - // This test will focus on validator payment. - with_externalities(&mut ExtBuilder::default() - .session_length(3) - .sessions_per_era(3) - .build(), - || { - // Initial config - let session_reward = 10; - let validator_cut = 5; - let stash_initial_balance = Balances::total_balance(&11); - assert_eq!(Staking::current_session_reward(), session_reward); - - // check the balance of a validator accounts. - assert_eq!(Balances::total_balance(&10), 1); - // check the balance of a validator's stash accounts. - assert_eq!(Balances::total_balance(&11), stash_initial_balance); - // and the nominator (to-be) - let _ = Balances::make_free_balance_be(&2, 500); - - // add a dummy nominator. - >::insert(&11, Exposure { - own: 500, // equal division indicates that the reward will be equally divided among validator and nominator. - total: 1000, - others: vec![IndividualExposure {who: 2, value: 500 }] - }); - >::insert(&2, RewardDestination::Stash); - >::insert(&11, ValidatorPrefs { - unstake_threshold: 3, - validator_payment: validator_cut - }); - - // ------------ Fast forward - // Block 3 => Session 1 => Era 0 - let mut block = 3; - System::set_block_number(block); - Session::check_rotate_session(System::block_number()); - assert_eq!(Staking::current_era(), 0); - assert_eq!(Session::current_index(), 1); - - // session triggered: the reward value stashed should be 10 -- defined in ExtBuilder genesis. - assert_eq!(Staking::current_session_reward(), session_reward); - assert_eq!(Staking::current_era_reward(), session_reward); - - block = 6; // Block 6 => Session 2 => Era 0 - System::set_block_number(block); - Session::check_rotate_session(System::block_number()); - assert_eq!(Staking::current_era(), 0); - assert_eq!(Session::current_index(), 2); - - assert_eq!(Staking::current_session_reward(), session_reward); - assert_eq!(Staking::current_era_reward(), 2*session_reward); - - block = 9; // Block 9 => Session 3 => Era 1 - System::set_block_number(block); - Session::check_rotate_session(System::block_number()); - assert_eq!(Staking::current_era(), 1); - assert_eq!(Session::current_index(), 3); - - // whats left to be shared is the sum of 3 rounds minus the validator's cut. - let shared_cut = 3 * session_reward - validator_cut; - // Validator's payee is Staked account, 11, reward will be paid here. - assert_eq!(Balances::total_balance(&11), stash_initial_balance + shared_cut/2 + validator_cut); - // Controller account will not get any reward. - assert_eq!(Balances::total_balance(&10), 1); - // Rest of the reward will be shared and paid to the nominator in stake. - assert_eq!(Balances::total_balance(&2), 500 + shared_cut/2); - }); - + // Test that validator preferences are correctly honored + // Note: unstake threshold is being directly tested in slashing tests. + // This test will focus on validator payment. + with_externalities( + &mut ExtBuilder::default() + .session_length(3) + .sessions_per_era(3) + .build(), + || { + // Initial config + let session_reward = 10; + let validator_cut = 5; + let stash_initial_balance = Balances::total_balance(&11); + assert_eq!(Staking::current_session_reward(), session_reward); + + // check the balance of a validator accounts. + assert_eq!(Balances::total_balance(&10), 1); + // check the balance of a validator's stash accounts. + assert_eq!(Balances::total_balance(&11), stash_initial_balance); + // and the nominator (to-be) + let _ = Balances::make_free_balance_be(&2, 500); + + // add a dummy nominator. + >::insert( + &11, + Exposure { + own: 500, // equal division indicates that the reward will be equally divided among validator and nominator. + total: 1000, + others: vec![IndividualExposure { who: 2, value: 500 }], + }, + ); + >::insert(&2, RewardDestination::Stash); + >::insert( + &11, + ValidatorPrefs { + unstake_threshold: 3, + validator_payment: validator_cut, + }, + ); + + // ------------ Fast forward + // Block 3 => Session 1 => Era 0 + let mut block = 3; + System::set_block_number(block); + Session::check_rotate_session(System::block_number()); + assert_eq!(Staking::current_era(), 0); + assert_eq!(Session::current_index(), 1); + + // session triggered: the reward value stashed should be 10 -- defined in ExtBuilder genesis. + assert_eq!(Staking::current_session_reward(), session_reward); + assert_eq!(Staking::current_era_reward(), session_reward); + + block = 6; // Block 6 => Session 2 => Era 0 + System::set_block_number(block); + Session::check_rotate_session(System::block_number()); + assert_eq!(Staking::current_era(), 0); + assert_eq!(Session::current_index(), 2); + + assert_eq!(Staking::current_session_reward(), session_reward); + assert_eq!(Staking::current_era_reward(), 2 * session_reward); + + block = 9; // Block 9 => Session 3 => Era 1 + System::set_block_number(block); + Session::check_rotate_session(System::block_number()); + assert_eq!(Staking::current_era(), 1); + assert_eq!(Session::current_index(), 3); + + // whats left to be shared is the sum of 3 rounds minus the validator's cut. + let shared_cut = 3 * session_reward - validator_cut; + // Validator's payee is Staked account, 11, reward will be paid here. + assert_eq!( + Balances::total_balance(&11), + stash_initial_balance + shared_cut / 2 + validator_cut + ); + // Controller account will not get any reward. + assert_eq!(Balances::total_balance(&10), 1); + // Rest of the reward will be shared and paid to the nominator in stake. + assert_eq!(Balances::total_balance(&2), 500 + shared_cut / 2); + }, + ); } #[test] fn bond_extra_works() { - // Tests that extra `free_balance` in the stash can be added to stake - // NOTE: this tests only verifies `StakingLedger` for correct updates - // See `bond_extra_and_withdraw_unbonded_works` for more details and updates on `Exposure`. - with_externalities(&mut ExtBuilder::default().build(), - || { - // Check that account 10 is a validator - assert!(>::exists(11)); - // Check that account 10 is bonded to account 11 - assert_eq!(Staking::bonded(&11), Some(10)); - // Check how much is at stake - assert_eq!(Staking::ledger(&10), Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![] })); - - // Give account 11 some large free balance greater than total - let _ = Balances::make_free_balance_be(&11, 1000000); - - // Call the bond_extra function from controller, add only 100 - assert_ok!(Staking::bond_extra(Origin::signed(11), 100)); - // There should be 100 more `total` and `active` in the ledger - assert_eq!(Staking::ledger(&10), Some(StakingLedger { stash: 11, total: 1000 + 100, active: 1000 + 100, unlocking: vec![] })); - - // Call the bond_extra function with a large number, should handle it - assert_ok!(Staking::bond_extra(Origin::signed(11), u64::max_value())); - // The full amount of the funds should now be in the total and active - assert_eq!(Staking::ledger(&10), Some(StakingLedger { stash: 11, total: 1000000, active: 1000000, unlocking: vec![] })); - }); + // Tests that extra `free_balance` in the stash can be added to stake + // NOTE: this tests only verifies `StakingLedger` for correct updates + // See `bond_extra_and_withdraw_unbonded_works` for more details and updates on `Exposure`. + with_externalities(&mut ExtBuilder::default().build(), || { + // Check that account 10 is a validator + assert!(>::exists(11)); + // Check that account 10 is bonded to account 11 + assert_eq!(Staking::bonded(&11), Some(10)); + // Check how much is at stake + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![] + }) + ); + + // Give account 11 some large free balance greater than total + let _ = Balances::make_free_balance_be(&11, 1000000); + + // Call the bond_extra function from controller, add only 100 + assert_ok!(Staking::bond_extra(Origin::signed(11), 100)); + // There should be 100 more `total` and `active` in the ledger + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + 100, + active: 1000 + 100, + unlocking: vec![] + }) + ); + + // Call the bond_extra function with a large number, should handle it + assert_ok!(Staking::bond_extra(Origin::signed(11), u64::max_value())); + // The full amount of the funds should now be in the total and active + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000000, + active: 1000000, + unlocking: vec![] + }) + ); + }); } #[test] fn bond_extra_and_withdraw_unbonded_works() { - // * Should test - // * Given an account being bonded [and chosen as a validator](not mandatory) - // * It can add extra funds to the bonded account. - // * it can unbond a portion of its funds from the stash account. - // * Once the unbonding period is done, it can actually take the funds out of the stash. - with_externalities(&mut ExtBuilder::default() - .nominate(false) - .build(), - || { - // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); - - // Set unbonding era (bonding_duration) to 2 - assert_ok!(Staking::set_bonding_duration(2)); - - // Give account 11 some large free balance greater than total - let _ = Balances::make_free_balance_be(&11, 1000000); - - // Initial config should be correct - assert_eq!(Staking::sessions_per_era(), 1); - assert_eq!(Staking::current_era(), 0); - assert_eq!(Session::current_index(), 0); - assert_eq!(Staking::current_session_reward(), 10); - - // check the balance of a validator accounts. - assert_eq!(Balances::total_balance(&10), 1); - - // confirm that 10 is a normal validator and gets paid at the end of the era. - System::set_block_number(1); - Session::check_rotate_session(System::block_number()); - - // Initial state of 10 - assert_eq!(Staking::ledger(&10), Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![] })); - assert_eq!(Staking::stakers(&11), Exposure { total: 1000, own: 1000, others: vec![] }); - - // deposit the extra 100 units - Staking::bond_extra(Origin::signed(11), 100).unwrap(); - - assert_eq!(Staking::ledger(&10), Some(StakingLedger { stash: 11, total: 1000 + 100, active: 1000 + 100, unlocking: vec![] })); - // Exposure is a snapshot! only updated after the next era update. - assert_ne!(Staking::stakers(&11), Exposure { total: 1000 + 100, own: 1000 + 100, others: vec![] }); - - // trigger next era. - System::set_block_number(2);Timestamp::set_timestamp(10);Session::check_rotate_session(System::block_number()); - assert_eq!(Staking::current_era(), 2); - assert_eq!(Session::current_index(), 2); - - // ledger should be the same. - assert_eq!(Staking::ledger(&10), Some(StakingLedger { stash: 11, total: 1000 + 100, active: 1000 + 100, unlocking: vec![] })); - // Exposure is now updated. - assert_eq!(Staking::stakers(&11), Exposure { total: 1000 + 100, own: 1000 + 100, others: vec![] }); - - // Unbond almost all of the funds in stash. - Staking::unbond(Origin::signed(10), 1000).unwrap(); - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, total: 1000 + 100, active: 100, unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 2}] }) - ); - - // Attempting to free the balances now will fail. 2 eras need to pass. - Staking::withdraw_unbonded(Origin::signed(10)).unwrap(); - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, total: 1000 + 100, active: 100, unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 2}] })); - - // trigger next era. - System::set_block_number(3); - Session::check_rotate_session(System::block_number()); - - assert_eq!(Staking::current_era(), 3); - assert_eq!(Session::current_index(), 3); - - // nothing yet - Staking::withdraw_unbonded(Origin::signed(10)).unwrap(); - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, total: 1000 + 100, active: 100, unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 2}] })); - - // trigger next era. - System::set_block_number(4); - Session::check_rotate_session(System::block_number()); - assert_eq!(Staking::current_era(), 4); - assert_eq!(Session::current_index(), 4); - - Staking::withdraw_unbonded(Origin::signed(10)).unwrap(); - // Now the value is free and the staking ledger is updated. - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, total: 100, active: 100, unlocking: vec![] })); - }) + // * Should test + // * Given an account being bonded [and chosen as a validator](not mandatory) + // * It can add extra funds to the bonded account. + // * it can unbond a portion of its funds from the stash account. + // * Once the unbonding period is done, it can actually take the funds out of the stash. + with_externalities(&mut ExtBuilder::default().nominate(false).build(), || { + // Set payee to controller. avoids confusion + assert_ok!(Staking::set_payee( + Origin::signed(10), + RewardDestination::Controller + )); + + // Set unbonding era (bonding_duration) to 2 + assert_ok!(Staking::set_bonding_duration(2)); + + // Give account 11 some large free balance greater than total + let _ = Balances::make_free_balance_be(&11, 1000000); + + // Initial config should be correct + assert_eq!(Staking::sessions_per_era(), 1); + assert_eq!(Staking::current_era(), 0); + assert_eq!(Session::current_index(), 0); + assert_eq!(Staking::current_session_reward(), 10); + + // check the balance of a validator accounts. + assert_eq!(Balances::total_balance(&10), 1); + + // confirm that 10 is a normal validator and gets paid at the end of the era. + System::set_block_number(1); + Session::check_rotate_session(System::block_number()); + + // Initial state of 10 + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![] + }) + ); + assert_eq!( + Staking::stakers(&11), + Exposure { + total: 1000, + own: 1000, + others: vec![] + } + ); + + // deposit the extra 100 units + Staking::bond_extra(Origin::signed(11), 100).unwrap(); + + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + 100, + active: 1000 + 100, + unlocking: vec![] + }) + ); + // Exposure is a snapshot! only updated after the next era update. + assert_ne!( + Staking::stakers(&11), + Exposure { + total: 1000 + 100, + own: 1000 + 100, + others: vec![] + } + ); + + // trigger next era. + System::set_block_number(2); + Timestamp::set_timestamp(10); + Session::check_rotate_session(System::block_number()); + assert_eq!(Staking::current_era(), 2); + assert_eq!(Session::current_index(), 2); + + // ledger should be the same. + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + 100, + active: 1000 + 100, + unlocking: vec![] + }) + ); + // Exposure is now updated. + assert_eq!( + Staking::stakers(&11), + Exposure { + total: 1000 + 100, + own: 1000 + 100, + others: vec![] + } + ); + + // Unbond almost all of the funds in stash. + Staking::unbond(Origin::signed(10), 1000).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + 100, + active: 100, + unlocking: vec![UnlockChunk { + value: 1000, + era: 2 + 2 + }] + }) + ); + + // Attempting to free the balances now will fail. 2 eras need to pass. + Staking::withdraw_unbonded(Origin::signed(10)).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + 100, + active: 100, + unlocking: vec![UnlockChunk { + value: 1000, + era: 2 + 2 + }] + }) + ); + + // trigger next era. + System::set_block_number(3); + Session::check_rotate_session(System::block_number()); + + assert_eq!(Staking::current_era(), 3); + assert_eq!(Session::current_index(), 3); + + // nothing yet + Staking::withdraw_unbonded(Origin::signed(10)).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + 100, + active: 100, + unlocking: vec![UnlockChunk { + value: 1000, + era: 2 + 2 + }] + }) + ); + + // trigger next era. + System::set_block_number(4); + Session::check_rotate_session(System::block_number()); + assert_eq!(Staking::current_era(), 4); + assert_eq!(Session::current_index(), 4); + + Staking::withdraw_unbonded(Origin::signed(10)).unwrap(); + // Now the value is free and the staking ledger is updated. + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 100, + active: 100, + unlocking: vec![] + }) + ); + }) } #[test] fn slot_stake_is_least_staked_validator_and_exposure_defines_maximum_punishment() { - // Test that slot_stake is determined by the least staked validator - // Test that slot_stake is the maximum punishment that can happen to a validator - with_externalities(&mut ExtBuilder::default() - .nominate(false) - .fare(false) - .build(), - || { - // Confirm validator count is 2 - assert_eq!(Staking::validator_count(), 2); - // Confirm account 10 and 20 are validators - assert!(>::exists(&11) && >::exists(&21)); - - assert_eq!(Staking::stakers(&11).total, 1000); - assert_eq!(Staking::stakers(&21).total, 2000); - - // Give the man some money. - let _ = Balances::make_free_balance_be(&10, 1000); - let _ = Balances::make_free_balance_be(&20, 1000); - - // We confirm initialized slot_stake is this value - assert_eq!(Staking::slot_stake(), Staking::stakers(&11).total); - - // Now lets lower account 20 stake - >::insert(&21, Exposure { total: 69, own: 69, others: vec![] }); - assert_eq!(Staking::stakers(&21).total, 69); - >::insert(&20, StakingLedger { stash: 22, total: 69, active: 69, unlocking: vec![] }); - - // New era --> rewards are paid --> stakes are changed - System::set_block_number(1); - Session::check_rotate_session(System::block_number()); - assert_eq!(Staking::current_era(), 1); - - // -- new balances + reward - assert_eq!(Staking::stakers(&11).total, 1000 + 10); - assert_eq!(Staking::stakers(&21).total, 69 + 10); - - // -- slot stake should also be updated. - assert_eq!(Staking::slot_stake(), 79); - - // If 10 gets slashed now, it will be slashed by 5% of exposure.total * 2.pow(unstake_thresh) - Staking::on_offline_validator(10, 4); - // Confirm user has been reported - assert_eq!(Staking::slash_count(&11), 4); - // check the balance of 10 (slash will be deducted from free balance.) - assert_eq!(Balances::free_balance(&11), 1000 + 10 - 50 /*5% of 1000*/ * 8 /*2**3*/); - }); + // Test that slot_stake is determined by the least staked validator + // Test that slot_stake is the maximum punishment that can happen to a validator + with_externalities( + &mut ExtBuilder::default().nominate(false).fare(false).build(), + || { + // Confirm validator count is 2 + assert_eq!(Staking::validator_count(), 2); + // Confirm account 10 and 20 are validators + assert!(>::exists(&11) && >::exists(&21)); + + assert_eq!(Staking::stakers(&11).total, 1000); + assert_eq!(Staking::stakers(&21).total, 2000); + + // Give the man some money. + let _ = Balances::make_free_balance_be(&10, 1000); + let _ = Balances::make_free_balance_be(&20, 1000); + + // We confirm initialized slot_stake is this value + assert_eq!(Staking::slot_stake(), Staking::stakers(&11).total); + + // Now lets lower account 20 stake + >::insert( + &21, + Exposure { + total: 69, + own: 69, + others: vec![], + }, + ); + assert_eq!(Staking::stakers(&21).total, 69); + >::insert( + &20, + StakingLedger { + stash: 22, + total: 69, + active: 69, + unlocking: vec![], + }, + ); + + // New era --> rewards are paid --> stakes are changed + System::set_block_number(1); + Session::check_rotate_session(System::block_number()); + assert_eq!(Staking::current_era(), 1); + + // -- new balances + reward + assert_eq!(Staking::stakers(&11).total, 1000 + 10); + assert_eq!(Staking::stakers(&21).total, 69 + 10); + + // -- slot stake should also be updated. + assert_eq!(Staking::slot_stake(), 79); + + // If 10 gets slashed now, it will be slashed by 5% of exposure.total * 2.pow(unstake_thresh) + Staking::on_offline_validator(10, 4); + // Confirm user has been reported + assert_eq!(Staking::slash_count(&11), 4); + // check the balance of 10 (slash will be deducted from free balance.) + assert_eq!( + Balances::free_balance(&11), + 1000 + 10 - 50 /*5% of 1000*/ * 8 /*2**3*/ + ); + }, + ); } #[test] fn on_free_balance_zero_stash_removes_validator() { - // Tests that validator storage items are cleaned up when stash is empty - // Tests that storage items are untouched when controller is empty - with_externalities(&mut ExtBuilder::default() - .existential_deposit(10) - .build(), - || { - // Check the balance of the validator account - assert_eq!(Balances::free_balance(&10), 256); - // Check the balance of the stash account - assert_eq!(Balances::free_balance(&11), 256000); - // Check these two accounts are bonded - assert_eq!(Staking::bonded(&11), Some(10)); - - // Set some storage items which we expect to be cleaned up - // Initiate slash count storage item - Staking::on_offline_validator(10, 1); - // Set payee information - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Stash)); - - // Check storage items that should be cleaned up - assert!(>::exists(&10)); - assert!(>::exists(&11)); - assert!(>::exists(&11)); - assert!(>::exists(&11)); - assert!(>::exists(&11)); - - // Reduce free_balance of controller to 0 - Balances::slash(&10, u64::max_value()); - - // Check the balance of the stash account has not been touched - assert_eq!(Balances::free_balance(&11), 256000); - // Check these two accounts are still bonded - assert_eq!(Staking::bonded(&11), Some(10)); - - // Check storage items have not changed - assert!(>::exists(&10)); - assert!(>::exists(&11)); - assert!(>::exists(&11)); - assert!(>::exists(&11)); - assert!(>::exists(&11)); - - // Reduce free_balance of stash to 0 - Balances::slash(&11, u64::max_value()); - // Check total balance of stash - assert_eq!(Balances::total_balance(&11), 0); - - // Check storage items do not exist - assert!(!>::exists(&10)); - assert!(!>::exists(&11)); - assert!(!>::exists(&11)); - assert!(!>::exists(&11)); - assert!(!>::exists(&11)); - assert!(!>::exists(&11)); - }); + // Tests that validator storage items are cleaned up when stash is empty + // Tests that storage items are untouched when controller is empty + with_externalities( + &mut ExtBuilder::default().existential_deposit(10).build(), + || { + // Check the balance of the validator account + assert_eq!(Balances::free_balance(&10), 256); + // Check the balance of the stash account + assert_eq!(Balances::free_balance(&11), 256000); + // Check these two accounts are bonded + assert_eq!(Staking::bonded(&11), Some(10)); + + // Set some storage items which we expect to be cleaned up + // Initiate slash count storage item + Staking::on_offline_validator(10, 1); + // Set payee information + assert_ok!(Staking::set_payee( + Origin::signed(10), + RewardDestination::Stash + )); + + // Check storage items that should be cleaned up + assert!(>::exists(&10)); + assert!(>::exists(&11)); + assert!(>::exists(&11)); + assert!(>::exists(&11)); + assert!(>::exists(&11)); + + // Reduce free_balance of controller to 0 + Balances::slash(&10, u64::max_value()); + + // Check the balance of the stash account has not been touched + assert_eq!(Balances::free_balance(&11), 256000); + // Check these two accounts are still bonded + assert_eq!(Staking::bonded(&11), Some(10)); + + // Check storage items have not changed + assert!(>::exists(&10)); + assert!(>::exists(&11)); + assert!(>::exists(&11)); + assert!(>::exists(&11)); + assert!(>::exists(&11)); + + // Reduce free_balance of stash to 0 + Balances::slash(&11, u64::max_value()); + // Check total balance of stash + assert_eq!(Balances::total_balance(&11), 0); + + // Check storage items do not exist + assert!(!>::exists(&10)); + assert!(!>::exists(&11)); + assert!(!>::exists(&11)); + assert!(!>::exists(&11)); + assert!(!>::exists(&11)); + assert!(!>::exists(&11)); + }, + ); } #[test] fn on_free_balance_zero_stash_removes_nominator() { - // Tests that nominator storage items are cleaned up when stash is empty - // Tests that storage items are untouched when controller is empty - with_externalities(&mut ExtBuilder::default() - .existential_deposit(10) - .build(), - || { - // Make 10 a nominator - assert_ok!(Staking::nominate(Origin::signed(10), vec![20])); - // Check that account 10 is a nominator - assert!(>::exists(11)); - // Check the balance of the nominator account - assert_eq!(Balances::free_balance(&10), 256); - // Check the balance of the stash account - assert_eq!(Balances::free_balance(&11), 256000); - - // Set payee information - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Stash)); - - // Check storage items that should be cleaned up - assert!(>::exists(&10)); - assert!(>::exists(&11)); - assert!(>::exists(&11)); - assert!(>::exists(&11)); - - // Reduce free_balance of controller to 0 - Balances::slash(&10, u64::max_value()); - // Check total balance of account 10 - assert_eq!(Balances::total_balance(&10), 0); - - // Check the balance of the stash account has not been touched - assert_eq!(Balances::free_balance(&11), 256000); - // Check these two accounts are still bonded - assert_eq!(Staking::bonded(&11), Some(10)); - - // Check storage items have not changed - assert!(>::exists(&10)); - assert!(>::exists(&11)); - assert!(>::exists(&11)); - assert!(>::exists(&11)); - - // Reduce free_balance of stash to 0 - Balances::slash(&11, u64::max_value()); - // Check total balance of stash - assert_eq!(Balances::total_balance(&11), 0); - - // Check storage items do not exist - assert!(!>::exists(&10)); - assert!(!>::exists(&11)); - assert!(!>::exists(&11)); - assert!(!>::exists(&11)); - assert!(!>::exists(&11)); - assert!(!>::exists(&11)); - }); + // Tests that nominator storage items are cleaned up when stash is empty + // Tests that storage items are untouched when controller is empty + with_externalities( + &mut ExtBuilder::default().existential_deposit(10).build(), + || { + // Make 10 a nominator + assert_ok!(Staking::nominate(Origin::signed(10), vec![20])); + // Check that account 10 is a nominator + assert!(>::exists(11)); + // Check the balance of the nominator account + assert_eq!(Balances::free_balance(&10), 256); + // Check the balance of the stash account + assert_eq!(Balances::free_balance(&11), 256000); + + // Set payee information + assert_ok!(Staking::set_payee( + Origin::signed(10), + RewardDestination::Stash + )); + + // Check storage items that should be cleaned up + assert!(>::exists(&10)); + assert!(>::exists(&11)); + assert!(>::exists(&11)); + assert!(>::exists(&11)); + + // Reduce free_balance of controller to 0 + Balances::slash(&10, u64::max_value()); + // Check total balance of account 10 + assert_eq!(Balances::total_balance(&10), 0); + + // Check the balance of the stash account has not been touched + assert_eq!(Balances::free_balance(&11), 256000); + // Check these two accounts are still bonded + assert_eq!(Staking::bonded(&11), Some(10)); + + // Check storage items have not changed + assert!(>::exists(&10)); + assert!(>::exists(&11)); + assert!(>::exists(&11)); + assert!(>::exists(&11)); + + // Reduce free_balance of stash to 0 + Balances::slash(&11, u64::max_value()); + // Check total balance of stash + assert_eq!(Balances::total_balance(&11), 0); + + // Check storage items do not exist + assert!(!>::exists(&10)); + assert!(!>::exists(&11)); + assert!(!>::exists(&11)); + assert!(!>::exists(&11)); + assert!(!>::exists(&11)); + assert!(!>::exists(&11)); + }, + ); } #[test] fn phragmen_poc_works() { - // Tests the POC test of the phragmen, mentioned in the paper and reference implementation. - // Initial votes: - // Votes [ - // ('2', 500, ['10', '20', '30']), - // ('4', 500, ['10', '20', '40']), - // ('10', 1000, ['10']), - // ('20', 1000, ['20']), - // ('30', 1000, ['30']), - // ('40', 1000, ['40'])] - // - // Sequential Phragmén gives - // 10 is elected with stake 1666.6666666666665 and score 0.0005 - // 20 is elected with stake 1333.3333333333333 and score 0.00075 - - // 2 has load 0.00075 and supported - // 10 with stake 333.3333333333333 20 with stake 166.66666666666666 30 with stake 0.0 - // 4 has load 0.00075 and supported - // 10 with stake 333.3333333333333 20 with stake 166.66666666666666 40 with stake 0.0 - // 10 has load 0.0005 and supported - // 10 with stake 1000.0 - // 20 has load 0.00075 and supported - // 20 with stake 1000.0 - // 30 has load 0 and supported - // 30 with stake 0 - // 40 has load 0 and supported - // 40 with stake 0 - - // Sequential Phragmén with post processing gives - // 10 is elected with stake 1500.0 and score 0.0005 - // 20 is elected with stake 1500.0 and score 0.00075 - // - // 10 has load 0.0005 and supported - // 10 with stake 1000.0 - // 20 has load 0.00075 and supported - // 20 with stake 1000.0 - // 30 has load 0 and supported - // 30 with stake 0 - // 40 has load 0 and supported - // 40 with stake 0 - // 2 has load 0.00075 and supported - // 10 with stake 166.66666666666674 20 with stake 333.33333333333326 30 with stake 0 - // 4 has load 0.00075 and supported - // 10 with stake 333.3333333333333 20 with stake 166.66666666666666 40 with stake 0.0 - - - with_externalities(&mut ExtBuilder::default() - .nominate(false) - .validator_pool(true) - .build(), - || { - // We don't really care about this. At this point everything is even. - assert_eq_uvec!(Session::validators(), vec![40, 30]); - - // Set payees to Controller - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); - assert_ok!(Staking::set_payee(Origin::signed(20), RewardDestination::Controller)); - assert_ok!(Staking::set_payee(Origin::signed(30), RewardDestination::Controller)); - assert_ok!(Staking::set_payee(Origin::signed(40), RewardDestination::Controller)); - - // no one is a nominator - assert_eq!(>::enumerate().count(), 0 as usize); - - // bond [2,1] / [4,3] a nominator - let _ = Balances::deposit_creating(&1, 1000); - let _ = Balances::deposit_creating(&3, 1000); - - assert_ok!(Staking::bond(Origin::signed(1), 2, 500, RewardDestination::default())); - assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 21, 31])); - - assert_ok!(Staking::bond(Origin::signed(3), 4, 500, RewardDestination::default())); - assert_ok!(Staking::nominate(Origin::signed(4), vec![11, 21, 41])); - - // New era => election algorithm will trigger - System::set_block_number(1); - Session::check_rotate_session(System::block_number()); - - assert_eq_uvec!(Session::validators(), vec![20, 10]); - - // with stake 1666 and 1333 respectively - assert_eq!(Staking::stakers(11).own, 1000); - assert_eq!(Staking::stakers(11).total, 1000 + 332); - assert_eq!(Staking::stakers(21).own, 1000); - assert_eq!(Staking::stakers(21).total, 1000 + 666); - - // Nominator's stake distribution. - assert_eq!(Staking::stakers(11).others.iter().map(|e| e.value).collect::>>(), vec![166, 166]); - assert_eq!(Staking::stakers(11).others.iter().map(|e| e.value).sum::>(), 332); - assert_eq!(Staking::stakers(11).others.iter().map(|e| e.who).collect::>>(), vec![3, 1]); - - assert_eq!(Staking::stakers(21).others.iter().map(|e| e.value).collect::>>(), vec![333, 333]); - assert_eq!(Staking::stakers(21).others.iter().map(|e| e.value).sum::>(), 666); - assert_eq!(Staking::stakers(21).others.iter().map(|e| e.who).collect::>>(), vec![3, 1]); - }); + // Tests the POC test of the phragmen, mentioned in the paper and reference implementation. + // Initial votes: + // Votes [ + // ('2', 500, ['10', '20', '30']), + // ('4', 500, ['10', '20', '40']), + // ('10', 1000, ['10']), + // ('20', 1000, ['20']), + // ('30', 1000, ['30']), + // ('40', 1000, ['40'])] + // + // Sequential Phragmén gives + // 10 is elected with stake 1666.6666666666665 and score 0.0005 + // 20 is elected with stake 1333.3333333333333 and score 0.00075 + + // 2 has load 0.00075 and supported + // 10 with stake 333.3333333333333 20 with stake 166.66666666666666 30 with stake 0.0 + // 4 has load 0.00075 and supported + // 10 with stake 333.3333333333333 20 with stake 166.66666666666666 40 with stake 0.0 + // 10 has load 0.0005 and supported + // 10 with stake 1000.0 + // 20 has load 0.00075 and supported + // 20 with stake 1000.0 + // 30 has load 0 and supported + // 30 with stake 0 + // 40 has load 0 and supported + // 40 with stake 0 + + // Sequential Phragmén with post processing gives + // 10 is elected with stake 1500.0 and score 0.0005 + // 20 is elected with stake 1500.0 and score 0.00075 + // + // 10 has load 0.0005 and supported + // 10 with stake 1000.0 + // 20 has load 0.00075 and supported + // 20 with stake 1000.0 + // 30 has load 0 and supported + // 30 with stake 0 + // 40 has load 0 and supported + // 40 with stake 0 + // 2 has load 0.00075 and supported + // 10 with stake 166.66666666666674 20 with stake 333.33333333333326 30 with stake 0 + // 4 has load 0.00075 and supported + // 10 with stake 333.3333333333333 20 with stake 166.66666666666666 40 with stake 0.0 + + with_externalities( + &mut ExtBuilder::default() + .nominate(false) + .validator_pool(true) + .build(), + || { + // We don't really care about this. At this point everything is even. + assert_eq_uvec!(Session::validators(), vec![40, 30]); + + // Set payees to Controller + assert_ok!(Staking::set_payee( + Origin::signed(10), + RewardDestination::Controller + )); + assert_ok!(Staking::set_payee( + Origin::signed(20), + RewardDestination::Controller + )); + assert_ok!(Staking::set_payee( + Origin::signed(30), + RewardDestination::Controller + )); + assert_ok!(Staking::set_payee( + Origin::signed(40), + RewardDestination::Controller + )); + + // no one is a nominator + assert_eq!(>::enumerate().count(), 0 as usize); + + // bond [2,1] / [4,3] a nominator + let _ = Balances::deposit_creating(&1, 1000); + let _ = Balances::deposit_creating(&3, 1000); + + assert_ok!(Staking::bond( + Origin::signed(1), + 2, + 500, + RewardDestination::default() + )); + assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 21, 31])); + + assert_ok!(Staking::bond( + Origin::signed(3), + 4, + 500, + RewardDestination::default() + )); + assert_ok!(Staking::nominate(Origin::signed(4), vec![11, 21, 41])); + + // New era => election algorithm will trigger + System::set_block_number(1); + Session::check_rotate_session(System::block_number()); + + assert_eq_uvec!(Session::validators(), vec![20, 10]); + + // with stake 1666 and 1333 respectively + assert_eq!(Staking::stakers(11).own, 1000); + assert_eq!(Staking::stakers(11).total, 1000 + 332); + assert_eq!(Staking::stakers(21).own, 1000); + assert_eq!(Staking::stakers(21).total, 1000 + 666); + + // Nominator's stake distribution. + assert_eq!( + Staking::stakers(11) + .others + .iter() + .map(|e| e.value) + .collect::>>(), + vec![166, 166] + ); + assert_eq!( + Staking::stakers(11) + .others + .iter() + .map(|e| e.value) + .sum::>(), + 332 + ); + assert_eq!( + Staking::stakers(11) + .others + .iter() + .map(|e| e.who) + .collect::>>(), + vec![3, 1] + ); + + assert_eq!( + Staking::stakers(21) + .others + .iter() + .map(|e| e.value) + .collect::>>(), + vec![333, 333] + ); + assert_eq!( + Staking::stakers(21) + .others + .iter() + .map(|e| e.value) + .sum::>(), + 666 + ); + assert_eq!( + Staking::stakers(21) + .others + .iter() + .map(|e| e.who) + .collect::>>(), + vec![3, 1] + ); + }, + ); } #[test] fn phragmen_election_works_with_post_processing() { - // tests the encapsulated phragmen::elect function. - // Votes [ - // ('10', 1000, ['10']), - // ('20', 1000, ['20']), - // ('30', 1000, ['30']), - // ('2', 50, ['10', '20']), - // ('4', 1000, ['10', '30']) - // ] - // Sequential Phragmén gives - // 10 is elected with stake 1705.7377049180327 and score 0.0004878048780487805 - // 30 is elected with stake 1344.2622950819673 and score 0.0007439024390243903 - - // 10 has load 0.0004878048780487805 and supported - // 10 with stake 1000.0 - // 20 has load 0 and supported - // 20 with stake 0 - // 30 has load 0.0007439024390243903 and supported - // 30 with stake 1000.0 - // 2 has load 0.0004878048780487805 and supported - // 10 with stake 50.0 20 with stake 0.0 - // 4 has load 0.0007439024390243903 and supported - // 10 with stake 655.7377049180328 30 with stake 344.26229508196724 - - // Sequential Phragmén with post processing gives - // 10 is elected with stake 1525.0 and score 0.0004878048780487805 - // 30 is elected with stake 1525.0 and score 0.0007439024390243903 - - // 10 has load 0.0004878048780487805 and supported - // 10 with stake 1000.0 - // 20 has load 0 and supported - // 20 with stake 0 - // 30 has load 0.0007439024390243903 and supported - // 30 with stake 1000.0 - // 2 has load 0.0004878048780487805 and supported - // 10 with stake 50.0 20 with stake 0.0 - // 4 has load 0.0007439024390243903 and supported - // 10 with stake 475.0 30 with stake 525.0 - with_externalities(&mut ExtBuilder::default().nominate(false).build(), || { - // initial setup of 10 and 20, both validators - assert_eq_uvec!(Session::validators(), vec![20, 10]); - - // Bond [30, 31] as the third validator - assert_ok!(Staking::bond(Origin::signed(31), 30, 1000, RewardDestination::default())); - assert_ok!(Staking::validate(Origin::signed(30), ValidatorPrefs::default())); - - // bond [2,1](A), [4,3](B), as 2 nominators - for i in &[1, 3] { let _ = Balances::deposit_creating(i, 2000); } - - assert_ok!(Staking::bond(Origin::signed(1), 2, 50, RewardDestination::default())); - assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 21])); - - assert_ok!(Staking::bond(Origin::signed(3), 4, 1000, RewardDestination::default())); - assert_ok!(Staking::nominate(Origin::signed(4), vec![11, 31])); - - let rounds = || 2 as usize; - let validators = || >::enumerate(); - let nominators = || >::enumerate(); - let min_validator_count = Staking::minimum_validator_count() as usize; - - let winners = phragmen::elect::( - rounds, - validators, - nominators, - Staking::slashable_balance_of, - min_validator_count, - ElectionConfig::> { - equalize: true, - tolerance: >::sa(10 as u64), - iterations: 10, - } - ); - - let winners = winners.unwrap(); - - // 10 and 30 must be the winners - assert_eq!(winners.iter().map(|w| w.who).collect::>>(), vec![11, 31]); - - let winner_10 = winners.iter().filter(|w| w.who == 11).nth(0).unwrap(); - let winner_30 = winners.iter().filter(|w| w.who == 31).nth(0).unwrap(); - - // Check exposures - assert_eq!(winner_10.exposure.total, 1000 + 525); - assert_eq!(winner_10.score, PerU128::from_max_value(165991398498018762665060784113057664)); - assert_eq!(winner_10.exposure.others[0].value, 475); - assert_eq!(winner_10.exposure.others[1].value, 50); - - assert_eq!(winner_30.exposure.total, 1000 + 525); - assert_eq!(winner_30.score, PerU128::from_max_value(253136882709478613064217695772412937)); - assert_eq!(winner_30.exposure.others[0].value, 525); - }) + // tests the encapsulated phragmen::elect function. + // Votes [ + // ('10', 1000, ['10']), + // ('20', 1000, ['20']), + // ('30', 1000, ['30']), + // ('2', 50, ['10', '20']), + // ('4', 1000, ['10', '30']) + // ] + // Sequential Phragmén gives + // 10 is elected with stake 1705.7377049180327 and score 0.0004878048780487805 + // 30 is elected with stake 1344.2622950819673 and score 0.0007439024390243903 + + // 10 has load 0.0004878048780487805 and supported + // 10 with stake 1000.0 + // 20 has load 0 and supported + // 20 with stake 0 + // 30 has load 0.0007439024390243903 and supported + // 30 with stake 1000.0 + // 2 has load 0.0004878048780487805 and supported + // 10 with stake 50.0 20 with stake 0.0 + // 4 has load 0.0007439024390243903 and supported + // 10 with stake 655.7377049180328 30 with stake 344.26229508196724 + + // Sequential Phragmén with post processing gives + // 10 is elected with stake 1525.0 and score 0.0004878048780487805 + // 30 is elected with stake 1525.0 and score 0.0007439024390243903 + + // 10 has load 0.0004878048780487805 and supported + // 10 with stake 1000.0 + // 20 has load 0 and supported + // 20 with stake 0 + // 30 has load 0.0007439024390243903 and supported + // 30 with stake 1000.0 + // 2 has load 0.0004878048780487805 and supported + // 10 with stake 50.0 20 with stake 0.0 + // 4 has load 0.0007439024390243903 and supported + // 10 with stake 475.0 30 with stake 525.0 + with_externalities(&mut ExtBuilder::default().nominate(false).build(), || { + // initial setup of 10 and 20, both validators + assert_eq_uvec!(Session::validators(), vec![20, 10]); + + // Bond [30, 31] as the third validator + assert_ok!(Staking::bond( + Origin::signed(31), + 30, + 1000, + RewardDestination::default() + )); + assert_ok!(Staking::validate( + Origin::signed(30), + ValidatorPrefs::default() + )); + + // bond [2,1](A), [4,3](B), as 2 nominators + for i in &[1, 3] { + let _ = Balances::deposit_creating(i, 2000); + } + + assert_ok!(Staking::bond( + Origin::signed(1), + 2, + 50, + RewardDestination::default() + )); + assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 21])); + + assert_ok!(Staking::bond( + Origin::signed(3), + 4, + 1000, + RewardDestination::default() + )); + assert_ok!(Staking::nominate(Origin::signed(4), vec![11, 31])); + + let rounds = || 2 as usize; + let validators = || >::enumerate(); + let nominators = || >::enumerate(); + let min_validator_count = Staking::minimum_validator_count() as usize; + + let winners = phragmen::elect::( + rounds, + validators, + nominators, + Staking::slashable_balance_of, + min_validator_count, + ElectionConfig::> { + equalize: true, + tolerance: >::sa(10 as u64), + iterations: 10, + }, + ); + + let winners = winners.unwrap(); + + // 10 and 30 must be the winners + assert_eq!( + winners + .iter() + .map(|w| w.who) + .collect::>>(), + vec![11, 31] + ); + + let winner_10 = winners.iter().filter(|w| w.who == 11).nth(0).unwrap(); + let winner_30 = winners.iter().filter(|w| w.who == 31).nth(0).unwrap(); + + // Check exposures + assert_eq!(winner_10.exposure.total, 1000 + 525); + assert_eq!( + winner_10.score, + PerU128::from_max_value(165991398498018762665060784113057664) + ); + assert_eq!(winner_10.exposure.others[0].value, 475); + assert_eq!(winner_10.exposure.others[1].value, 50); + + assert_eq!(winner_30.exposure.total, 1000 + 525); + assert_eq!( + winner_30.score, + PerU128::from_max_value(253136882709478613064217695772412937) + ); + assert_eq!(winner_30.exposure.others[0].value, 525); + }) } #[test] fn switching_roles() { - // Test that it should be possible to switch between roles (nominator, validator, idle) with minimal overhead. - with_externalities(&mut ExtBuilder::default() - .nominate(false) - .sessions_per_era(3) - .build(), - || { - // Reset reward destination - for i in &[10, 20] { assert_ok!(Staking::set_payee(Origin::signed(*i), RewardDestination::Controller)); } - - assert_eq_uvec!(Session::validators(), vec![20, 10]); - - // put some money in account that we'll use. - for i in 1..7 { let _ = Balances::deposit_creating(&i, 5000); } - - // add 2 nominators - assert_ok!(Staking::bond(Origin::signed(1), 2, 2000, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 5])); - - assert_ok!(Staking::bond(Origin::signed(3), 4, 500, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(4), vec![21, 1])); - - // add a new validator candidate - assert_ok!(Staking::bond(Origin::signed(5), 6, 1000, RewardDestination::Controller)); - assert_ok!(Staking::validate(Origin::signed(6), ValidatorPrefs::default())); - - // new block - System::set_block_number(1); - Session::check_rotate_session(System::block_number()); - - // no change - assert_eq_uvec!(Session::validators(), vec![20, 10]); - - // new block - System::set_block_number(2); - Session::check_rotate_session(System::block_number()); - - // no change - assert_eq_uvec!(Session::validators(), vec![20, 10]); - - // new block --> ne era --> new validators - System::set_block_number(3); - Session::check_rotate_session(System::block_number()); - - // with current nominators 10 and 5 have the most stake - assert_eq_uvec!(Session::validators(), vec![6, 10]); - - // 2 decides to be a validator. Consequences: - assert_ok!(Staking::validate(Origin::signed(2), ValidatorPrefs::default())); - // new stakes: - // 10: 1000 self vote - // 20: 1000 self vote + 500 vote - // 6 : 1000 self vote - // 2 : 2000 self vote + 500 vote. - // Winners: 20 and 2 - - System::set_block_number(4); - Session::check_rotate_session(System::block_number()); - assert_eq_uvec!(Session::validators(), vec![6, 10]); - - System::set_block_number(5); - Session::check_rotate_session(System::block_number()); - assert_eq_uvec!(Session::validators(), vec![6, 10]); - - // ne era - System::set_block_number(6); - Session::check_rotate_session(System::block_number()); - assert_eq_uvec!(Session::validators(), vec![2, 20]); - }); + // Test that it should be possible to switch between roles (nominator, validator, idle) with minimal overhead. + with_externalities( + &mut ExtBuilder::default() + .nominate(false) + .sessions_per_era(3) + .build(), + || { + // Reset reward destination + for i in &[10, 20] { + assert_ok!(Staking::set_payee( + Origin::signed(*i), + RewardDestination::Controller + )); + } + + assert_eq_uvec!(Session::validators(), vec![20, 10]); + + // put some money in account that we'll use. + for i in 1..7 { + let _ = Balances::deposit_creating(&i, 5000); + } + + // add 2 nominators + assert_ok!(Staking::bond( + Origin::signed(1), + 2, + 2000, + RewardDestination::Controller + )); + assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 5])); + + assert_ok!(Staking::bond( + Origin::signed(3), + 4, + 500, + RewardDestination::Controller + )); + assert_ok!(Staking::nominate(Origin::signed(4), vec![21, 1])); + + // add a new validator candidate + assert_ok!(Staking::bond( + Origin::signed(5), + 6, + 1000, + RewardDestination::Controller + )); + assert_ok!(Staking::validate( + Origin::signed(6), + ValidatorPrefs::default() + )); + + // new block + System::set_block_number(1); + Session::check_rotate_session(System::block_number()); + + // no change + assert_eq_uvec!(Session::validators(), vec![20, 10]); + + // new block + System::set_block_number(2); + Session::check_rotate_session(System::block_number()); + + // no change + assert_eq_uvec!(Session::validators(), vec![20, 10]); + + // new block --> ne era --> new validators + System::set_block_number(3); + Session::check_rotate_session(System::block_number()); + + // with current nominators 10 and 5 have the most stake + assert_eq_uvec!(Session::validators(), vec![6, 10]); + + // 2 decides to be a validator. Consequences: + assert_ok!(Staking::validate( + Origin::signed(2), + ValidatorPrefs::default() + )); + // new stakes: + // 10: 1000 self vote + // 20: 1000 self vote + 500 vote + // 6 : 1000 self vote + // 2 : 2000 self vote + 500 vote. + // Winners: 20 and 2 + + System::set_block_number(4); + Session::check_rotate_session(System::block_number()); + assert_eq_uvec!(Session::validators(), vec![6, 10]); + + System::set_block_number(5); + Session::check_rotate_session(System::block_number()); + assert_eq_uvec!(Session::validators(), vec![6, 10]); + + // ne era + System::set_block_number(6); + Session::check_rotate_session(System::block_number()); + assert_eq_uvec!(Session::validators(), vec![2, 20]); + }, + ); } #[test] fn wrong_vote_is_null() { - with_externalities(&mut ExtBuilder::default() - .nominate(false) - .validator_pool(true) - .build(), - || { - assert_eq_uvec!(Session::validators(), vec![40, 30]); - - // put some money in account that we'll use. - for i in 1..3 { let _ = Balances::deposit_creating(&i, 5000); } - - // add 1 nominators - assert_ok!(Staking::bond(Origin::signed(1), 2, 2000, RewardDestination::default())); - assert_ok!(Staking::nominate(Origin::signed(2), vec![ - 11, 21, // good votes - 1, 2, 15, 1000, 25 // crap votes. No effect. - ])); - - // new block - System::set_block_number(1); - Session::check_rotate_session(System::block_number()); - - assert_eq_uvec!(Session::validators(), vec![20, 10]); - }); + with_externalities( + &mut ExtBuilder::default() + .nominate(false) + .validator_pool(true) + .build(), + || { + assert_eq_uvec!(Session::validators(), vec![40, 30]); + + // put some money in account that we'll use. + for i in 1..3 { + let _ = Balances::deposit_creating(&i, 5000); + } + + // add 1 nominators + assert_ok!(Staking::bond( + Origin::signed(1), + 2, + 2000, + RewardDestination::default() + )); + assert_ok!(Staking::nominate( + Origin::signed(2), + vec![ + 11, 21, // good votes + 1, 2, 15, 1000, 25 // crap votes. No effect. + ] + )); + + // new block + System::set_block_number(1); + Session::check_rotate_session(System::block_number()); + + assert_eq_uvec!(Session::validators(), vec![20, 10]); + }, + ); } #[test] fn bond_with_no_staked_value() { - // Behavior when someone bonds with no staked value. - // Particularly when she votes and the candidate is elected. - with_externalities(&mut ExtBuilder::default() - .validator_count(3) - .nominate(false) - .minimum_validator_count(1) - .build(), || { - // setup - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); - let _ = Balances::deposit_creating(&3, 1000); - let initial_balance_2 = Balances::free_balance(&2); - let initial_balance_4 = Balances::free_balance(&4); - - // initial validators - assert_eq_uvec!(Session::validators(), vec![20, 10]); - - // Stingy validator. - assert_ok!(Staking::bond(Origin::signed(1), 2, 0, RewardDestination::Controller)); - assert_ok!(Staking::validate(Origin::signed(2), ValidatorPrefs::default())); - - System::set_block_number(1); - Session::check_rotate_session(System::block_number()); - - // Not elected even though we want 3. - assert_eq_uvec!(Session::validators(), vec![20, 10]); - - // min of 10 and 20. - assert_eq!(Staking::slot_stake(), 1000); - - // let's make the stingy one elected. - assert_ok!(Staking::bond(Origin::signed(3), 4, 500, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(4), vec![1])); - - // no rewards paid to 2 and 4 yet - assert_eq!(Balances::free_balance(&2), initial_balance_2); - assert_eq!(Balances::free_balance(&4), initial_balance_4); - - System::set_block_number(2); - Session::check_rotate_session(System::block_number()); - - // Stingy one is selected - assert_eq_uvec!(Session::validators(), vec![20, 10, 2]); - assert_eq!(Staking::stakers(1), Exposure { own: 0, total: 500, others: vec![IndividualExposure { who: 3, value: 500}]}); - // New slot stake. - assert_eq!(Staking::slot_stake(), 500); - - // no rewards paid to 2 and 4 yet - assert_eq!(Balances::free_balance(&2), initial_balance_2); - assert_eq!(Balances::free_balance(&4), initial_balance_4); - - System::set_block_number(3); - Session::check_rotate_session(System::block_number()); - - let reward = Staking::current_session_reward(); - // 2 will not get any reward - // 4 will get all the reward share - assert_eq!(Balances::free_balance(&2), initial_balance_2); - assert_eq!(Balances::free_balance(&4), initial_balance_4 + reward); - }); + // Behavior when someone bonds with no staked value. + // Particularly when she votes and the candidate is elected. + with_externalities( + &mut ExtBuilder::default() + .validator_count(3) + .nominate(false) + .minimum_validator_count(1) + .build(), + || { + // setup + assert_ok!(Staking::set_payee( + Origin::signed(10), + RewardDestination::Controller + )); + let _ = Balances::deposit_creating(&3, 1000); + let initial_balance_2 = Balances::free_balance(&2); + let initial_balance_4 = Balances::free_balance(&4); + + // initial validators + assert_eq_uvec!(Session::validators(), vec![20, 10]); + + // Stingy validator. + assert_ok!(Staking::bond( + Origin::signed(1), + 2, + 0, + RewardDestination::Controller + )); + assert_ok!(Staking::validate( + Origin::signed(2), + ValidatorPrefs::default() + )); + + System::set_block_number(1); + Session::check_rotate_session(System::block_number()); + + // Not elected even though we want 3. + assert_eq_uvec!(Session::validators(), vec![20, 10]); + + // min of 10 and 20. + assert_eq!(Staking::slot_stake(), 1000); + + // let's make the stingy one elected. + assert_ok!(Staking::bond( + Origin::signed(3), + 4, + 500, + RewardDestination::Controller + )); + assert_ok!(Staking::nominate(Origin::signed(4), vec![1])); + + // no rewards paid to 2 and 4 yet + assert_eq!(Balances::free_balance(&2), initial_balance_2); + assert_eq!(Balances::free_balance(&4), initial_balance_4); + + System::set_block_number(2); + Session::check_rotate_session(System::block_number()); + + // Stingy one is selected + assert_eq_uvec!(Session::validators(), vec![20, 10, 2]); + assert_eq!( + Staking::stakers(1), + Exposure { + own: 0, + total: 500, + others: vec![IndividualExposure { who: 3, value: 500 }] + } + ); + // New slot stake. + assert_eq!(Staking::slot_stake(), 500); + + // no rewards paid to 2 and 4 yet + assert_eq!(Balances::free_balance(&2), initial_balance_2); + assert_eq!(Balances::free_balance(&4), initial_balance_4); + + System::set_block_number(3); + Session::check_rotate_session(System::block_number()); + + let reward = Staking::current_session_reward(); + // 2 will not get any reward + // 4 will get all the reward share + assert_eq!(Balances::free_balance(&2), initial_balance_2); + assert_eq!(Balances::free_balance(&4), initial_balance_4 + reward); + }, + ); } #[test] fn bond_with_little_staked_value_bounded_by_slot_stake() { - // Behavior when someone bonds with little staked value. - // Particularly when she votes and the candidate is elected. - with_externalities(&mut ExtBuilder::default() - .validator_count(3) - .nominate(false) - .minimum_validator_count(1) - .build(), - || { - // setup - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); - let initial_balance_2 = Balances::free_balance(&2); - let initial_balance_10 = Balances::free_balance(&10); - - // initial validators - assert_eq_uvec!(Session::validators(), vec![20, 10]); - - // Stingy validator. - assert_ok!(Staking::bond(Origin::signed(1), 2, 1, RewardDestination::Controller)); - assert_ok!(Staking::validate(Origin::signed(2), ValidatorPrefs::default())); - - System::set_block_number(1); - Session::check_rotate_session(System::block_number()); - - // 2 is elected. - // and fucks up the slot stake. - assert_eq_uvec!(Session::validators(), vec![20, 10, 2]); - assert_eq!(Staking::slot_stake(), 1); - - // Old ones are rewarded. - assert_eq!(Balances::free_balance(&10), initial_balance_10 + 10); - // no rewards paid to 2. This was initial election. - assert_eq!(Balances::free_balance(&2), initial_balance_2); - - System::set_block_number(2); - Session::check_rotate_session(System::block_number()); - - assert_eq_uvec!(Session::validators(), vec![20, 10, 2]); - assert_eq!(Staking::slot_stake(), 1); - - let reward = Staking::current_session_reward(); - // 2 will not get the full reward, practically 1 - assert_eq!(Balances::free_balance(&2), initial_balance_2 + reward.max(1)); - // same for 10 - assert_eq!(Balances::free_balance(&10), initial_balance_10 + 10 + reward.max(1)); - }); + // Behavior when someone bonds with little staked value. + // Particularly when she votes and the candidate is elected. + with_externalities( + &mut ExtBuilder::default() + .validator_count(3) + .nominate(false) + .minimum_validator_count(1) + .build(), + || { + // setup + assert_ok!(Staking::set_payee( + Origin::signed(10), + RewardDestination::Controller + )); + let initial_balance_2 = Balances::free_balance(&2); + let initial_balance_10 = Balances::free_balance(&10); + + // initial validators + assert_eq_uvec!(Session::validators(), vec![20, 10]); + + // Stingy validator. + assert_ok!(Staking::bond( + Origin::signed(1), + 2, + 1, + RewardDestination::Controller + )); + assert_ok!(Staking::validate( + Origin::signed(2), + ValidatorPrefs::default() + )); + + System::set_block_number(1); + Session::check_rotate_session(System::block_number()); + + // 2 is elected. + // and fucks up the slot stake. + assert_eq_uvec!(Session::validators(), vec![20, 10, 2]); + assert_eq!(Staking::slot_stake(), 1); + + // Old ones are rewarded. + assert_eq!(Balances::free_balance(&10), initial_balance_10 + 10); + // no rewards paid to 2. This was initial election. + assert_eq!(Balances::free_balance(&2), initial_balance_2); + + System::set_block_number(2); + Session::check_rotate_session(System::block_number()); + + assert_eq_uvec!(Session::validators(), vec![20, 10, 2]); + assert_eq!(Staking::slot_stake(), 1); + + let reward = Staking::current_session_reward(); + // 2 will not get the full reward, practically 1 + assert_eq!( + Balances::free_balance(&2), + initial_balance_2 + reward.max(1) + ); + // same for 10 + assert_eq!( + Balances::free_balance(&10), + initial_balance_10 + 10 + reward.max(1) + ); + }, + ); } - #[test] #[ignore] // Enable this once post-processing is on. fn phragmen_linear_worse_case_equalize() { - with_externalities(&mut ExtBuilder::default() - .nominate(false) - .validator_pool(true) - .fare(true) - .build(), - || { - let bond_validator = |a, b| { - let _ = Balances::deposit_creating(&(a-1), b); - assert_ok!(Staking::bond(Origin::signed(a-1), a, b, RewardDestination::Controller)); - assert_ok!(Staking::validate(Origin::signed(a), ValidatorPrefs::default())); - }; - let bond_nominator = |a, b, v| { - let _ = Balances::deposit_creating(&(a-1), b); - assert_ok!(Staking::bond(Origin::signed(a-1), a, b, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(a), v)); - }; - - for i in &[10, 20, 30, 40] { assert_ok!(Staking::set_payee(Origin::signed(*i), RewardDestination::Controller)); } - - bond_validator(50, 1000); - bond_validator(60, 1000); - bond_validator(70, 1000); - - bond_nominator(2, 2000, vec![11]); - bond_nominator(4, 1000, vec![11, 21]); - bond_nominator(6, 1000, vec![21, 31]); - bond_nominator(8, 1000, vec![31, 41]); - bond_nominator(110, 1000, vec![41, 51]); - bond_nominator(112, 1000, vec![51, 61]); - bond_nominator(114, 1000, vec![61, 71]); - - assert_eq_uvec!(Session::validators(), vec![40, 30]); - assert_ok!(Staking::set_validator_count(7)); - - System::set_block_number(1); - Session::check_rotate_session(System::block_number()); - - assert_eq_uvec!(Session::validators(), vec![10, 60, 40, 20, 50, 30, 70]); - - // Sequential Phragmén with post processing gives - // 10 is elected with stake 3000.0 and score 0.00025 - // 30 is elected with stake 2008.8712884829595 and score 0.0003333333333333333 - // 50 is elected with stake 2000.0001049958742 and score 0.0003333333333333333 - // 60 is elected with stake 1991.128921508789 and score 0.0004444444444444444 - // 20 is elected with stake 2017.7421569824219 and score 0.0005277777777777777 - // 40 is elected with stake 2000.0001049958742 and score 0.0005555555555555556 - // 70 is elected with stake 1982.2574230340813 and score 0.0007222222222222222 - - assert_eq!(Staking::stakers(11).total, 3000); - assert_eq!(Staking::stakers(31).total, 2035); - assert_eq!(Staking::stakers(51).total, 2000); - assert_eq!(Staking::stakers(61).total, 1968); - assert_eq!(Staking::stakers(21).total, 2035); - assert_eq!(Staking::stakers(41).total, 2024); - assert_eq!(Staking::stakers(71).total, 1936); - }) + with_externalities( + &mut ExtBuilder::default() + .nominate(false) + .validator_pool(true) + .fare(true) + .build(), + || { + let bond_validator = |a, b| { + let _ = Balances::deposit_creating(&(a - 1), b); + assert_ok!(Staking::bond( + Origin::signed(a - 1), + a, + b, + RewardDestination::Controller + )); + assert_ok!(Staking::validate( + Origin::signed(a), + ValidatorPrefs::default() + )); + }; + let bond_nominator = |a, b, v| { + let _ = Balances::deposit_creating(&(a - 1), b); + assert_ok!(Staking::bond( + Origin::signed(a - 1), + a, + b, + RewardDestination::Controller + )); + assert_ok!(Staking::nominate(Origin::signed(a), v)); + }; + + for i in &[10, 20, 30, 40] { + assert_ok!(Staking::set_payee( + Origin::signed(*i), + RewardDestination::Controller + )); + } + + bond_validator(50, 1000); + bond_validator(60, 1000); + bond_validator(70, 1000); + + bond_nominator(2, 2000, vec![11]); + bond_nominator(4, 1000, vec![11, 21]); + bond_nominator(6, 1000, vec![21, 31]); + bond_nominator(8, 1000, vec![31, 41]); + bond_nominator(110, 1000, vec![41, 51]); + bond_nominator(112, 1000, vec![51, 61]); + bond_nominator(114, 1000, vec![61, 71]); + + assert_eq_uvec!(Session::validators(), vec![40, 30]); + assert_ok!(Staking::set_validator_count(7)); + + System::set_block_number(1); + Session::check_rotate_session(System::block_number()); + + assert_eq_uvec!(Session::validators(), vec![10, 60, 40, 20, 50, 30, 70]); + + // Sequential Phragmén with post processing gives + // 10 is elected with stake 3000.0 and score 0.00025 + // 30 is elected with stake 2008.8712884829595 and score 0.0003333333333333333 + // 50 is elected with stake 2000.0001049958742 and score 0.0003333333333333333 + // 60 is elected with stake 1991.128921508789 and score 0.0004444444444444444 + // 20 is elected with stake 2017.7421569824219 and score 0.0005277777777777777 + // 40 is elected with stake 2000.0001049958742 and score 0.0005555555555555556 + // 70 is elected with stake 1982.2574230340813 and score 0.0007222222222222222 + + assert_eq!(Staking::stakers(11).total, 3000); + assert_eq!(Staking::stakers(31).total, 2035); + assert_eq!(Staking::stakers(51).total, 2000); + assert_eq!(Staking::stakers(61).total, 1968); + assert_eq!(Staking::stakers(21).total, 2035); + assert_eq!(Staking::stakers(41).total, 2024); + assert_eq!(Staking::stakers(71).total, 1936); + }, + ) } #[test] fn phragmen_chooses_correct_number_of_validators() { - with_externalities(&mut ExtBuilder::default() - .nominate(true) - .validator_pool(true) - .fare(true) - .validator_count(1) - .build(), - || { - assert_eq!(Staking::validator_count(), 1); - assert_eq!(Session::validators().len(), 1); - - System::set_block_number(1); - Session::check_rotate_session(System::block_number()); - - assert_eq!(Session::validators().len(), 1); - }) + with_externalities( + &mut ExtBuilder::default() + .nominate(true) + .validator_pool(true) + .fare(true) + .validator_count(1) + .build(), + || { + assert_eq!(Staking::validator_count(), 1); + assert_eq!(Session::validators().len(), 1); + + System::set_block_number(1); + Session::check_rotate_session(System::block_number()); + + assert_eq!(Session::validators().len(), 1); + }, + ) } - #[test] fn phragmen_score_should_be_accurate_on_large_stakes() { - with_externalities(&mut ExtBuilder::default() - .nominate(false) - .build() - , || { - let bond_validator = |a, b| { - assert_ok!(Staking::bond(Origin::signed(a-1), a, b, RewardDestination::Controller)); - assert_ok!(Staking::validate(Origin::signed(a), ValidatorPrefs::default())); - }; - - for i in 1..=8 { - let _ = Balances::make_free_balance_be(&i, u64::max_value()); - } - - bond_validator(2, u64::max_value()); - bond_validator(4, u64::max_value()); - bond_validator(6, u64::max_value()-1); - bond_validator(8, u64::max_value()-2); - - System::set_block_number(2); - Session::check_rotate_session(System::block_number()); - - assert_eq!(Session::validators(), vec![4, 2]); - }) + with_externalities(&mut ExtBuilder::default().nominate(false).build(), || { + let bond_validator = |a, b| { + assert_ok!(Staking::bond( + Origin::signed(a - 1), + a, + b, + RewardDestination::Controller + )); + assert_ok!(Staking::validate( + Origin::signed(a), + ValidatorPrefs::default() + )); + }; + + for i in 1..=8 { + let _ = Balances::make_free_balance_be(&i, u64::max_value()); + } + + bond_validator(2, u64::max_value()); + bond_validator(4, u64::max_value()); + bond_validator(6, u64::max_value() - 1); + bond_validator(8, u64::max_value() - 2); + + System::set_block_number(2); + Session::check_rotate_session(System::block_number()); + + assert_eq!(Session::validators(), vec![4, 2]); + }) } #[test] fn phragmen_should_not_overflow_validators() { - with_externalities(&mut ExtBuilder::default() - .nominate(false) - .build() - , || { - let bond_validator = |a, b| { - assert_ok!(Staking::bond(Origin::signed(a-1), a, b, RewardDestination::Controller)); - assert_ok!(Staking::validate(Origin::signed(a), ValidatorPrefs::default())); - }; - let bond_nominator = |a, b, v| { - assert_ok!(Staking::bond(Origin::signed(a-1), a, b, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(a), v)); - }; - let check_exposure = |a| { - let expo = Staking::stakers(&a); - assert_eq!(expo.total, expo.own + expo.others.iter().map(|e| e.value).sum::()); - }; - - for i in 1..=8 { - let _ = Balances::make_free_balance_be(&i, u64::max_value()); - } - - let _ = Staking::chill(Origin::signed(10)); - let _ = Staking::chill(Origin::signed(20)); - - bond_validator(2, u64::max_value()); - bond_validator(4, u64::max_value()); - - bond_nominator(6, u64::max_value()/2, vec![1, 3]); - bond_nominator(8, u64::max_value()/2, vec![1, 3]); - - System::set_block_number(2); - Session::check_rotate_session(System::block_number()); - - assert_eq_uvec!(Session::validators(), vec![4, 2]); - check_exposure(4); - check_exposure(2); - }) + with_externalities(&mut ExtBuilder::default().nominate(false).build(), || { + let bond_validator = |a, b| { + assert_ok!(Staking::bond( + Origin::signed(a - 1), + a, + b, + RewardDestination::Controller + )); + assert_ok!(Staking::validate( + Origin::signed(a), + ValidatorPrefs::default() + )); + }; + let bond_nominator = |a, b, v| { + assert_ok!(Staking::bond( + Origin::signed(a - 1), + a, + b, + RewardDestination::Controller + )); + assert_ok!(Staking::nominate(Origin::signed(a), v)); + }; + let check_exposure = |a| { + let expo = Staking::stakers(&a); + assert_eq!( + expo.total, + expo.own + expo.others.iter().map(|e| e.value).sum::() + ); + }; + + for i in 1..=8 { + let _ = Balances::make_free_balance_be(&i, u64::max_value()); + } + + let _ = Staking::chill(Origin::signed(10)); + let _ = Staking::chill(Origin::signed(20)); + + bond_validator(2, u64::max_value()); + bond_validator(4, u64::max_value()); + + bond_nominator(6, u64::max_value() / 2, vec![1, 3]); + bond_nominator(8, u64::max_value() / 2, vec![1, 3]); + + System::set_block_number(2); + Session::check_rotate_session(System::block_number()); + + assert_eq_uvec!(Session::validators(), vec![4, 2]); + check_exposure(4); + check_exposure(2); + }) } #[test] fn phragmen_should_not_overflow_nominators() { - with_externalities(&mut ExtBuilder::default() - .nominate(false) - .build() - , || { - let bond_validator = |a, b| { - assert_ok!(Staking::bond(Origin::signed(a-1), a, b, RewardDestination::Controller)); - assert_ok!(Staking::validate(Origin::signed(a), ValidatorPrefs::default())); - }; - let bond_nominator = |a, b, v| { - assert_ok!(Staking::bond(Origin::signed(a-1), a, b, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(a), v)); - }; - let check_exposure = |a| { - let expo = Staking::stakers(&a); - assert_eq!(expo.total, expo.own + expo.others.iter().map(|e| e.value).sum::()); - }; - - let _ = Staking::chill(Origin::signed(10)); - let _ = Staking::chill(Origin::signed(20)); - - for i in 1..=8 { - let _ = Balances::make_free_balance_be(&i, u64::max_value()); - } - - bond_validator(2, u64::max_value()/2); - bond_validator(4, u64::max_value()/2); - - bond_nominator(6, u64::max_value(), vec![1, 3]); - bond_nominator(8, u64::max_value(), vec![1, 3]); - - System::set_block_number(2); - Session::check_rotate_session(System::block_number()); - - assert_eq_uvec!(Session::validators(), vec![4, 2]); - check_exposure(4); - check_exposure(2); - }) + with_externalities(&mut ExtBuilder::default().nominate(false).build(), || { + let bond_validator = |a, b| { + assert_ok!(Staking::bond( + Origin::signed(a - 1), + a, + b, + RewardDestination::Controller + )); + assert_ok!(Staking::validate( + Origin::signed(a), + ValidatorPrefs::default() + )); + }; + let bond_nominator = |a, b, v| { + assert_ok!(Staking::bond( + Origin::signed(a - 1), + a, + b, + RewardDestination::Controller + )); + assert_ok!(Staking::nominate(Origin::signed(a), v)); + }; + let check_exposure = |a| { + let expo = Staking::stakers(&a); + assert_eq!( + expo.total, + expo.own + expo.others.iter().map(|e| e.value).sum::() + ); + }; + + let _ = Staking::chill(Origin::signed(10)); + let _ = Staking::chill(Origin::signed(20)); + + for i in 1..=8 { + let _ = Balances::make_free_balance_be(&i, u64::max_value()); + } + + bond_validator(2, u64::max_value() / 2); + bond_validator(4, u64::max_value() / 2); + + bond_nominator(6, u64::max_value(), vec![1, 3]); + bond_nominator(8, u64::max_value(), vec![1, 3]); + + System::set_block_number(2); + Session::check_rotate_session(System::block_number()); + + assert_eq_uvec!(Session::validators(), vec![4, 2]); + check_exposure(4); + check_exposure(2); + }) } #[test] fn phragmen_should_not_overflow_ultimate() { - with_externalities(&mut ExtBuilder::default() - .nominate(false) - .build() - , || { - let bond_validator = |a, b| { - assert_ok!(Staking::bond(Origin::signed(a-1), a, b, RewardDestination::Controller)); - assert_ok!(Staking::validate(Origin::signed(a), ValidatorPrefs::default())); - }; - let bond_nominator = |a, b, v| { - assert_ok!(Staking::bond(Origin::signed(a-1), a, b, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(a), v)); - }; - let check_exposure = |a| { - let expo = Staking::stakers(&a); - assert_eq!(expo.total, expo.own + expo.others.iter().map(|e| e.value).sum::()); - }; - - for i in 1..=8 { - let _ = Balances::make_free_balance_be(&i, u64::max_value()); - } - - bond_validator(2, u64::max_value()); - bond_validator(4, u64::max_value()); - - bond_nominator(6, u64::max_value(), vec![1, 3]); - bond_nominator(8, u64::max_value(), vec![1, 3]); - - System::set_block_number(2); - Session::check_rotate_session(System::block_number()); - - assert_eq_uvec!(Session::validators(), vec![4, 2]); - check_exposure(4); - check_exposure(2); - }) + with_externalities(&mut ExtBuilder::default().nominate(false).build(), || { + let bond_validator = |a, b| { + assert_ok!(Staking::bond( + Origin::signed(a - 1), + a, + b, + RewardDestination::Controller + )); + assert_ok!(Staking::validate( + Origin::signed(a), + ValidatorPrefs::default() + )); + }; + let bond_nominator = |a, b, v| { + assert_ok!(Staking::bond( + Origin::signed(a - 1), + a, + b, + RewardDestination::Controller + )); + assert_ok!(Staking::nominate(Origin::signed(a), v)); + }; + let check_exposure = |a| { + let expo = Staking::stakers(&a); + assert_eq!( + expo.total, + expo.own + expo.others.iter().map(|e| e.value).sum::() + ); + }; + + for i in 1..=8 { + let _ = Balances::make_free_balance_be(&i, u64::max_value()); + } + + bond_validator(2, u64::max_value()); + bond_validator(4, u64::max_value()); + + bond_nominator(6, u64::max_value(), vec![1, 3]); + bond_nominator(8, u64::max_value(), vec![1, 3]); + + System::set_block_number(2); + Session::check_rotate_session(System::block_number()); + + assert_eq_uvec!(Session::validators(), vec![4, 2]); + check_exposure(4); + check_exposure(2); + }) } diff --git a/srml/sudo/src/lib.rs b/srml/sudo/src/lib.rs index 88e3a9c965..810547fcb4 100644 --- a/srml/sudo/src/lib.rs +++ b/srml/sudo/src/lib.rs @@ -124,63 +124,68 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sr_std::prelude::*; use sr_primitives::traits::StaticLookup; -use srml_support::{StorageValue, Parameter, Dispatchable, decl_module, decl_event, decl_storage, ensure}; +use sr_std::prelude::*; +use srml_support::{ + decl_event, decl_module, decl_storage, ensure, Dispatchable, Parameter, StorageValue, +}; use system::ensure_signed; pub trait Trait: system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The overarching event type. + type Event: From> + Into<::Event>; - /// A sudo-able call. - type Proposal: Parameter + Dispatchable; + /// A sudo-able call. + type Proposal: Parameter + Dispatchable; } decl_module! { - // Simple declaration of the `Module` type. Lets the macro know what it's working on. - pub struct Module for enum Call where origin: T::Origin { - fn deposit_event() = default; + // Simple declaration of the `Module` type. Lets the macro know what it's working on. + pub struct Module for enum Call where origin: T::Origin { + fn deposit_event() = default; - /// Authenticates the sudo key and dispatches a function call with `Root` origin. - /// - /// The dispatch origin for this call must be _Signed_. - fn sudo(origin, proposal: Box) { - // This is a public call, so we ensure that the origin is some signed account. - let sender = ensure_signed(origin)?; - ensure!(sender == Self::key(), "only the current sudo key can sudo"); + /// Authenticates the sudo key and dispatches a function call with `Root` origin. + /// + /// The dispatch origin for this call must be _Signed_. + fn sudo(origin, proposal: Box) { + // This is a public call, so we ensure that the origin is some signed account. + let sender = ensure_signed(origin)?; + ensure!(sender == Self::key(), "only the current sudo key can sudo"); - let ok = proposal.dispatch(system::RawOrigin::Root.into()).is_ok(); - Self::deposit_event(RawEvent::Sudid(ok)); - } + let ok = proposal.dispatch(system::RawOrigin::Root.into()).is_ok(); + Self::deposit_event(RawEvent::Sudid(ok)); + } - /// Authenticates the current sudo key and sets the given AccountId (`new`) as the new sudo key. - /// - /// The dispatch origin for this call must be _Signed_. - fn set_key(origin, new: ::Source) { - // This is a public call, so we ensure that the origin is some signed account. - let sender = ensure_signed(origin)?; - ensure!(sender == Self::key(), "only the current sudo key can change the sudo key"); - let new = T::Lookup::lookup(new)?; + /// Authenticates the current sudo key and sets the given AccountId (`new`) as the new sudo key. + /// + /// The dispatch origin for this call must be _Signed_. + fn set_key(origin, new: ::Source) { + // This is a public call, so we ensure that the origin is some signed account. + let sender = ensure_signed(origin)?; + ensure!(sender == Self::key(), "only the current sudo key can change the sudo key"); + let new = T::Lookup::lookup(new)?; - Self::deposit_event(RawEvent::KeyChanged(Self::key())); - >::put(new); - } - } + Self::deposit_event(RawEvent::KeyChanged(Self::key())); + >::put(new); + } + } } decl_event!( - pub enum Event where AccountId = ::AccountId { - /// A sudo just took place. - Sudid(bool), - /// The sudoer just switched identity; the old key is supplied. - KeyChanged(AccountId), - } + pub enum Event + where + AccountId = ::AccountId, + { + /// A sudo just took place. + Sudid(bool), + /// The sudoer just switched identity; the old key is supplied. + KeyChanged(AccountId), + } ); decl_storage! { - trait Store for Module as Sudo { - /// The `AccountId` of the sudo key. - Key get(key) config(): T::AccountId; - } + trait Store for Module as Sudo { + /// The `AccountId` of the sudo key. + Key get(key) config(): T::AccountId; + } } diff --git a/srml/support/procedural/src/lib.rs b/srml/support/procedural/src/lib.rs index 342745efde..a5bacd7151 100644 --- a/srml/support/procedural/src/lib.rs +++ b/srml/support/procedural/src/lib.rs @@ -18,7 +18,7 @@ //! Proc macro of Support code for the runtime. // end::description[] -#![recursion_limit="256"] +#![recursion_limit = "256"] extern crate proc_macro; @@ -102,5 +102,5 @@ use proc_macro::TokenStream; /// `>::get()` or `Dummy::::get()` #[proc_macro] pub fn decl_storage(input: TokenStream) -> TokenStream { - storage::transformation::decl_storage_impl(input) + storage::transformation::decl_storage_impl(input) } diff --git a/srml/support/procedural/src/storage/impls.rs b/srml/support/procedural/src/storage/impls.rs index 5a8f7f65d5..a784d80dbc 100644 --- a/srml/support/procedural/src/storage/impls.rs +++ b/srml/support/procedural/src/storage/impls.rs @@ -14,620 +14,662 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use crate::storage::transformation::{DeclStorageTypeInfos, InstanceOpts}; use proc_macro2::TokenStream as TokenStream2; -use syn; use quote::quote; -use crate::storage::transformation::{DeclStorageTypeInfos, InstanceOpts}; +use syn; pub fn option_unwrap(is_option: bool) -> TokenStream2 { - if !is_option { - // raw type case - quote!( unwrap_or_else ) - } else { - // Option<> type case - quote!( or_else ) - } + if !is_option { + // raw type case + quote!(unwrap_or_else) + } else { + // Option<> type case + quote!(or_else) + } } // prefix for consts in trait Instance pub(crate) const PREFIX_FOR: &str = "PREFIX_FOR_"; pub(crate) const HEAD_KEY_FOR: &str = "HEAD_KEY_FOR_"; -pub(crate) struct Impls<'a, I: Iterator> { - pub scrate: &'a TokenStream2, - pub visibility: &'a syn::Visibility, - pub traitinstance: &'a syn::Ident, - pub traittype: &'a syn::TypeParamBound, - pub instance_opts: &'a InstanceOpts, - pub type_infos: DeclStorageTypeInfos<'a>, - pub fielddefault: TokenStream2, - pub prefix: String, - pub cratename: &'a syn::Ident, - pub name: &'a syn::Ident, - pub attrs: I, +pub(crate) struct Impls<'a, I: Iterator> { + pub scrate: &'a TokenStream2, + pub visibility: &'a syn::Visibility, + pub traitinstance: &'a syn::Ident, + pub traittype: &'a syn::TypeParamBound, + pub instance_opts: &'a InstanceOpts, + pub type_infos: DeclStorageTypeInfos<'a>, + pub fielddefault: TokenStream2, + pub prefix: String, + pub cratename: &'a syn::Ident, + pub name: &'a syn::Ident, + pub attrs: I, } -impl<'a, I: Iterator> Impls<'a, I> { - pub fn simple_value(self) -> TokenStream2 { - let Self { - scrate, - visibility, - traitinstance, - traittype, - instance_opts, - type_infos, - fielddefault, - prefix, - name, - attrs, - .. - } = self; - let DeclStorageTypeInfos { typ, value_type, is_option, .. } = type_infos; - let option_simple_1 = option_unwrap(is_option); - - let mutate_impl = if !is_option { - quote!{ - >::put(&val, storage) - } - } else { - quote!{ - match val { - Some(ref val) => >::put(&val, storage), - None => >::kill(storage), - } - } - }; - - let InstanceOpts { - comma_instance, - equal_default_instance, - bound_instantiable, - instance, - .. - } = instance_opts; - - let final_prefix = if let Some(instance) = instance { - let const_name = syn::Ident::new(&format!("{}{}", PREFIX_FOR, name.to_string()), proc_macro2::Span::call_site()); - quote!{ #instance::#const_name.as_bytes() } - } else { - quote!{ #prefix.as_bytes() } - }; - - // generator for value - quote!{ - #( #[ #attrs ] )* - #visibility struct #name<#traitinstance: #traittype, #instance #bound_instantiable #equal_default_instance>(#scrate::storage::generator::PhantomData<(#traitinstance #comma_instance)>); - - impl<#traitinstance: #traittype, #instance #bound_instantiable> #scrate::storage::generator::StorageValue<#typ> for #name<#traitinstance, #instance> { - type Query = #value_type; - - /// Get the storage key. - fn key() -> &'static [u8] { - #final_prefix - } - - /// Load the value from the provided storage instance. - fn get(storage: &S) -> Self::Query { - storage.get(>::key()) - .#option_simple_1(|| #fielddefault) - } - - /// Take a value from storage, removing it afterwards. - fn take(storage: &S) -> Self::Query { - storage.take(>::key()) - .#option_simple_1(|| #fielddefault) - } - - /// Mutate the value under a key. - fn mutate R, S: #scrate::GenericStorage>(f: F, storage: &S) -> R { - let mut val = >::get(storage); - - let ret = f(&mut val); - #mutate_impl ; - ret - } - } - } - } - - pub fn map(self, kty: &syn::Type) -> TokenStream2 { - let Self { - scrate, - visibility, - traitinstance, - traittype, - instance_opts, - type_infos, - fielddefault, - prefix, - name, - attrs, - .. - } = self; - let DeclStorageTypeInfos { typ, value_type, is_option, .. } = type_infos; - let option_simple_1 = option_unwrap(is_option); - - let mutate_impl = if !is_option { - quote!{ - >::insert(key, &val, storage) - } - } else { - quote!{ - match val { - Some(ref val) => >::insert(key, &val, storage), - None => >::remove(key, storage), - } - } - }; - - let InstanceOpts { - comma_instance, - equal_default_instance, - bound_instantiable, - instance, - .. - } = instance_opts; - - let final_prefix = if let Some(instance) = instance { - let const_name = syn::Ident::new(&format!("{}{}", PREFIX_FOR, name.to_string()), proc_macro2::Span::call_site()); - quote!{ #instance::#const_name.as_bytes() } - } else { - quote!{ #prefix.as_bytes() } - }; - - // generator for map - quote!{ - #( #[ #attrs ] )* - #visibility struct #name<#traitinstance: #traittype, #instance #bound_instantiable #equal_default_instance>(#scrate::storage::generator::PhantomData<(#traitinstance #comma_instance)>); - - impl<#traitinstance: #traittype, #instance #bound_instantiable> #scrate::storage::generator::StorageMap<#kty, #typ> for #name<#traitinstance, #instance> { - type Query = #value_type; - - /// Get the prefix key in storage. - fn prefix() -> &'static [u8] { - #final_prefix - } - - /// Get the storage key used to fetch a value corresponding to a specific key. - fn key_for(x: &#kty) -> #scrate::rstd::vec::Vec { - let mut key = >::prefix().to_vec(); - #scrate::codec::Encode::encode_to(x, &mut key); - key - } - - /// Load the value associated with the given key from the map. - fn get(key: &#kty, storage: &S) -> Self::Query { - let key = >::key_for(key); - storage.get(&key[..]).#option_simple_1(|| #fielddefault) - } - - /// Take the value, reading and removing it. - fn take(key: &#kty, storage: &S) -> Self::Query { - let key = >::key_for(key); - storage.take(&key[..]).#option_simple_1(|| #fielddefault) - } - - /// Mutate the value under a key - fn mutate R, S: #scrate::GenericStorage>(key: &#kty, f: F, storage: &S) -> R { - let mut val = >::get(key, storage); - - let ret = f(&mut val); - #mutate_impl ; - ret - } - - } - } - } - - pub fn linked_map(self, kty: &syn::Type) -> TokenStream2 { - let Self { - scrate, - visibility, - traitinstance, - traittype, - instance_opts, - type_infos, - fielddefault, - prefix, - name, - attrs, - .. - } = self; - - let InstanceOpts { - comma_instance, - equal_default_instance, - bound_instantiable, - instance, - .. - } = instance_opts; - - let final_prefix = if let Some(instance) = instance { - let const_name = syn::Ident::new(&format!("{}{}", PREFIX_FOR, name.to_string()), proc_macro2::Span::call_site()); - quote!{ #instance::#const_name.as_bytes() } - } else { - quote!{ #prefix.as_bytes() } - }; - - // make sure to use different prefix for head and elements. - let final_head_key = if let Some(instance) = instance { - let const_name = syn::Ident::new(&format!("{}{}", HEAD_KEY_FOR, name.to_string()), proc_macro2::Span::call_site()); - quote!{ #instance::#const_name.as_bytes() } - } else { - let final_head_key = format!("head of {}", prefix); - quote!{ #final_head_key.as_bytes() } - }; - - let DeclStorageTypeInfos { typ, value_type, is_option, .. } = type_infos; - let option_simple_1 = option_unwrap(is_option); - let name_lowercase = name.to_string().to_lowercase(); - let inner_module = syn::Ident::new(&format!("__linked_map_details_for_{}_do_not_use", name_lowercase), name.span()); - let linkage = syn::Ident::new(&format!("__LinkageFor{}DoNotUse", name), name.span()); - let phantom_data = quote! { #scrate::storage::generator::PhantomData }; - let as_map = quote!{ > }; - let put_or_insert = quote! { - match linkage { - Some(linkage) => storage.put(key_for, &(val, linkage)), - None => #as_map::insert(key, &val, storage), - } - }; - let mutate_impl = if !type_infos.is_option { - put_or_insert - } else { - quote! { - match val { - Some(ref val) => #put_or_insert, - None => #as_map::remove(key, storage), - } - } - }; - - // generator for linked map - let helpers = quote! { - /// Linkage data of an element (it's successor and predecessor) - #[derive(#scrate::codec::Encode, #scrate::codec::Decode)] - pub(crate) struct #linkage { - /// Previous element key in storage (None for the first element) - pub previous: Option, - /// Next element key in storage (None for the last element) - pub next: Option, - } - - mod #inner_module { - use super::*; - - /// Re-exported version of linkage to overcome proc-macro derivation issue. - pub(crate) use super::#linkage as Linkage; - - impl Default for Linkage { - fn default() -> Self { - Self { - previous: None, - next: None, - } - } - } - - /// A key-value pair iterator for enumerable map. - pub(crate) struct Enumerator<'a, S, K, V> { - pub storage: &'a S, - pub next: Option, - pub _data: #phantom_data, - } - - impl<'a, S: #scrate::GenericStorage, #traitinstance: #traittype, #instance #bound_instantiable> Iterator for Enumerator<'a, S, #kty, (#typ, #traitinstance, #instance)> - where #traitinstance: 'a - { - type Item = (#kty, #typ); - - fn next(&mut self) -> Option { - let next = self.next.take()?; - let key_for = as #scrate::storage::generator::StorageMap<#kty, #typ>>::key_for(&next); - let (val, linkage): (#typ, Linkage<#kty>) = self.storage.get(&*key_for) - .expect("previous/next only contain existing entires; we enumerate using next; entry exists; qed"); - self.next = linkage.next; - Some((next, val)) - } - } - - pub(crate) trait Utils<#traitinstance: #traittype, #instance #bound_instantiable> { - /// Update linkage when this element is removed. - /// - /// Takes care of updating previous and next elements points - /// as well as updates head if the element is first or last. - fn remove_linkage(linkage: Linkage<#kty>, storage: &S); - - /// Read the contained data and it's linkage. - fn read_with_linkage(storage: &S, key: &[u8]) -> Option<(#value_type, Linkage<#kty>)>; - - /// Generate linkage for newly inserted element. - /// - /// Takes care of updating head and previous head's pointer. - fn new_head_linkage( - storage: &S, - key: &#kty, - ) -> Linkage<#kty>; - - /// Read current head pointer. - fn read_head(storage: &S) -> Option<#kty>; - - /// Overwrite current head pointer. - /// - /// If `None` is given head is removed from storage. - fn write_head(storage: &S, head: Option<&#kty>); - } - } - }; - - let structure = quote! { - #( #[ #attrs ] )* - #visibility struct #name<#traitinstance: #traittype, #instance #bound_instantiable #equal_default_instance>(#phantom_data<(#traitinstance #comma_instance)>); - - impl<#traitinstance: #traittype, #instance #bound_instantiable> self::#inner_module::Utils<#traitinstance, #instance> for #name<#traitinstance, #instance> { - fn remove_linkage( - linkage: self::#inner_module::Linkage<#kty>, - storage: &S, - ) { - use self::#inner_module::Utils; - - let next_key = linkage.next.as_ref().map(|x| #as_map::key_for(x)); - let prev_key = linkage.previous.as_ref().map(|x| #as_map::key_for(x)); - - if let Some(prev_key) = prev_key { - // Retrieve previous element and update `next` - let mut res = Self::read_with_linkage(storage, &*prev_key) - .expect("Linkage is updated in case entry is removed; it always points to existing keys; qed"); - res.1.next = linkage.next; - storage.put(&*prev_key, &res); - } else { - // we were first so let's update the head - Self::write_head(storage, linkage.next.as_ref()); - } - - if let Some(next_key) = next_key { - // Update previous of next element - let mut res = Self::read_with_linkage(storage, &*next_key) - .expect("Linkage is updated in case entry is removed; it always points to existing keys; qed"); - res.1.previous = linkage.previous; - storage.put(&*next_key, &res); - } - } - - fn read_with_linkage( - storage: &S, - key: &[u8], - ) -> Option<(#value_type, self::#inner_module::Linkage<#kty>)> { - storage.get(key) - } - - fn new_head_linkage( - storage: &S, - key: &#kty, - ) -> self::#inner_module::Linkage<#kty> { - use self::#inner_module::Utils; - - if let Some(head) = Self::read_head(storage) { - // update previous head predecessor - { - let head_key = #as_map::key_for(&head); +impl<'a, I: Iterator> Impls<'a, I> { + pub fn simple_value(self) -> TokenStream2 { + let Self { + scrate, + visibility, + traitinstance, + traittype, + instance_opts, + type_infos, + fielddefault, + prefix, + name, + attrs, + .. + } = self; + let DeclStorageTypeInfos { + typ, + value_type, + is_option, + .. + } = type_infos; + let option_simple_1 = option_unwrap(is_option); + + let mutate_impl = if !is_option { + quote! { + >::put(&val, storage) + } + } else { + quote! { + match val { + Some(ref val) => >::put(&val, storage), + None => >::kill(storage), + } + } + }; + + let InstanceOpts { + comma_instance, + equal_default_instance, + bound_instantiable, + instance, + .. + } = instance_opts; + + let final_prefix = if let Some(instance) = instance { + let const_name = syn::Ident::new( + &format!("{}{}", PREFIX_FOR, name.to_string()), + proc_macro2::Span::call_site(), + ); + quote! { #instance::#const_name.as_bytes() } + } else { + quote! { #prefix.as_bytes() } + }; + + // generator for value + quote! { + #( #[ #attrs ] )* + #visibility struct #name<#traitinstance: #traittype, #instance #bound_instantiable #equal_default_instance>(#scrate::storage::generator::PhantomData<(#traitinstance #comma_instance)>); + + impl<#traitinstance: #traittype, #instance #bound_instantiable> #scrate::storage::generator::StorageValue<#typ> for #name<#traitinstance, #instance> { + type Query = #value_type; + + /// Get the storage key. + fn key() -> &'static [u8] { + #final_prefix + } + + /// Load the value from the provided storage instance. + fn get(storage: &S) -> Self::Query { + storage.get(>::key()) + .#option_simple_1(|| #fielddefault) + } + + /// Take a value from storage, removing it afterwards. + fn take(storage: &S) -> Self::Query { + storage.take(>::key()) + .#option_simple_1(|| #fielddefault) + } + + /// Mutate the value under a key. + fn mutate R, S: #scrate::GenericStorage>(f: F, storage: &S) -> R { + let mut val = >::get(storage); + + let ret = f(&mut val); + #mutate_impl ; + ret + } + } + } + } + + pub fn map(self, kty: &syn::Type) -> TokenStream2 { + let Self { + scrate, + visibility, + traitinstance, + traittype, + instance_opts, + type_infos, + fielddefault, + prefix, + name, + attrs, + .. + } = self; + let DeclStorageTypeInfos { + typ, + value_type, + is_option, + .. + } = type_infos; + let option_simple_1 = option_unwrap(is_option); + + let mutate_impl = if !is_option { + quote! { + >::insert(key, &val, storage) + } + } else { + quote! { + match val { + Some(ref val) => >::insert(key, &val, storage), + None => >::remove(key, storage), + } + } + }; + + let InstanceOpts { + comma_instance, + equal_default_instance, + bound_instantiable, + instance, + .. + } = instance_opts; + + let final_prefix = if let Some(instance) = instance { + let const_name = syn::Ident::new( + &format!("{}{}", PREFIX_FOR, name.to_string()), + proc_macro2::Span::call_site(), + ); + quote! { #instance::#const_name.as_bytes() } + } else { + quote! { #prefix.as_bytes() } + }; + + // generator for map + quote! { + #( #[ #attrs ] )* + #visibility struct #name<#traitinstance: #traittype, #instance #bound_instantiable #equal_default_instance>(#scrate::storage::generator::PhantomData<(#traitinstance #comma_instance)>); + + impl<#traitinstance: #traittype, #instance #bound_instantiable> #scrate::storage::generator::StorageMap<#kty, #typ> for #name<#traitinstance, #instance> { + type Query = #value_type; + + /// Get the prefix key in storage. + fn prefix() -> &'static [u8] { + #final_prefix + } + + /// Get the storage key used to fetch a value corresponding to a specific key. + fn key_for(x: &#kty) -> #scrate::rstd::vec::Vec { + let mut key = >::prefix().to_vec(); + #scrate::codec::Encode::encode_to(x, &mut key); + key + } + + /// Load the value associated with the given key from the map. + fn get(key: &#kty, storage: &S) -> Self::Query { + let key = >::key_for(key); + storage.get(&key[..]).#option_simple_1(|| #fielddefault) + } + + /// Take the value, reading and removing it. + fn take(key: &#kty, storage: &S) -> Self::Query { + let key = >::key_for(key); + storage.take(&key[..]).#option_simple_1(|| #fielddefault) + } + + /// Mutate the value under a key + fn mutate R, S: #scrate::GenericStorage>(key: &#kty, f: F, storage: &S) -> R { + let mut val = >::get(key, storage); + + let ret = f(&mut val); + #mutate_impl ; + ret + } + + } + } + } + + pub fn linked_map(self, kty: &syn::Type) -> TokenStream2 { + let Self { + scrate, + visibility, + traitinstance, + traittype, + instance_opts, + type_infos, + fielddefault, + prefix, + name, + attrs, + .. + } = self; + + let InstanceOpts { + comma_instance, + equal_default_instance, + bound_instantiable, + instance, + .. + } = instance_opts; + + let final_prefix = if let Some(instance) = instance { + let const_name = syn::Ident::new( + &format!("{}{}", PREFIX_FOR, name.to_string()), + proc_macro2::Span::call_site(), + ); + quote! { #instance::#const_name.as_bytes() } + } else { + quote! { #prefix.as_bytes() } + }; + + // make sure to use different prefix for head and elements. + let final_head_key = if let Some(instance) = instance { + let const_name = syn::Ident::new( + &format!("{}{}", HEAD_KEY_FOR, name.to_string()), + proc_macro2::Span::call_site(), + ); + quote! { #instance::#const_name.as_bytes() } + } else { + let final_head_key = format!("head of {}", prefix); + quote! { #final_head_key.as_bytes() } + }; + + let DeclStorageTypeInfos { + typ, + value_type, + is_option, + .. + } = type_infos; + let option_simple_1 = option_unwrap(is_option); + let name_lowercase = name.to_string().to_lowercase(); + let inner_module = syn::Ident::new( + &format!("__linked_map_details_for_{}_do_not_use", name_lowercase), + name.span(), + ); + let linkage = syn::Ident::new(&format!("__LinkageFor{}DoNotUse", name), name.span()); + let phantom_data = quote! { #scrate::storage::generator::PhantomData }; + let as_map = quote! { > }; + let put_or_insert = quote! { + match linkage { + Some(linkage) => storage.put(key_for, &(val, linkage)), + None => #as_map::insert(key, &val, storage), + } + }; + let mutate_impl = if !type_infos.is_option { + put_or_insert + } else { + quote! { + match val { + Some(ref val) => #put_or_insert, + None => #as_map::remove(key, storage), + } + } + }; + + // generator for linked map + let helpers = quote! { + /// Linkage data of an element (it's successor and predecessor) + #[derive(#scrate::codec::Encode, #scrate::codec::Decode)] + pub(crate) struct #linkage { + /// Previous element key in storage (None for the first element) + pub previous: Option, + /// Next element key in storage (None for the last element) + pub next: Option, + } + + mod #inner_module { + use super::*; + + /// Re-exported version of linkage to overcome proc-macro derivation issue. + pub(crate) use super::#linkage as Linkage; + + impl Default for Linkage { + fn default() -> Self { + Self { + previous: None, + next: None, + } + } + } + + /// A key-value pair iterator for enumerable map. + pub(crate) struct Enumerator<'a, S, K, V> { + pub storage: &'a S, + pub next: Option, + pub _data: #phantom_data, + } + + impl<'a, S: #scrate::GenericStorage, #traitinstance: #traittype, #instance #bound_instantiable> Iterator for Enumerator<'a, S, #kty, (#typ, #traitinstance, #instance)> + where #traitinstance: 'a + { + type Item = (#kty, #typ); + + fn next(&mut self) -> Option { + let next = self.next.take()?; + let key_for = as #scrate::storage::generator::StorageMap<#kty, #typ>>::key_for(&next); + let (val, linkage): (#typ, Linkage<#kty>) = self.storage.get(&*key_for) + .expect("previous/next only contain existing entires; we enumerate using next; entry exists; qed"); + self.next = linkage.next; + Some((next, val)) + } + } + + pub(crate) trait Utils<#traitinstance: #traittype, #instance #bound_instantiable> { + /// Update linkage when this element is removed. + /// + /// Takes care of updating previous and next elements points + /// as well as updates head if the element is first or last. + fn remove_linkage(linkage: Linkage<#kty>, storage: &S); + + /// Read the contained data and it's linkage. + fn read_with_linkage(storage: &S, key: &[u8]) -> Option<(#value_type, Linkage<#kty>)>; + + /// Generate linkage for newly inserted element. + /// + /// Takes care of updating head and previous head's pointer. + fn new_head_linkage( + storage: &S, + key: &#kty, + ) -> Linkage<#kty>; + + /// Read current head pointer. + fn read_head(storage: &S) -> Option<#kty>; + + /// Overwrite current head pointer. + /// + /// If `None` is given head is removed from storage. + fn write_head(storage: &S, head: Option<&#kty>); + } + } + }; + + let structure = quote! { + #( #[ #attrs ] )* + #visibility struct #name<#traitinstance: #traittype, #instance #bound_instantiable #equal_default_instance>(#phantom_data<(#traitinstance #comma_instance)>); + + impl<#traitinstance: #traittype, #instance #bound_instantiable> self::#inner_module::Utils<#traitinstance, #instance> for #name<#traitinstance, #instance> { + fn remove_linkage( + linkage: self::#inner_module::Linkage<#kty>, + storage: &S, + ) { + use self::#inner_module::Utils; + + let next_key = linkage.next.as_ref().map(|x| #as_map::key_for(x)); + let prev_key = linkage.previous.as_ref().map(|x| #as_map::key_for(x)); + + if let Some(prev_key) = prev_key { + // Retrieve previous element and update `next` + let mut res = Self::read_with_linkage(storage, &*prev_key) + .expect("Linkage is updated in case entry is removed; it always points to existing keys; qed"); + res.1.next = linkage.next; + storage.put(&*prev_key, &res); + } else { + // we were first so let's update the head + Self::write_head(storage, linkage.next.as_ref()); + } + + if let Some(next_key) = next_key { + // Update previous of next element + let mut res = Self::read_with_linkage(storage, &*next_key) + .expect("Linkage is updated in case entry is removed; it always points to existing keys; qed"); + res.1.previous = linkage.previous; + storage.put(&*next_key, &res); + } + } + + fn read_with_linkage( + storage: &S, + key: &[u8], + ) -> Option<(#value_type, self::#inner_module::Linkage<#kty>)> { + storage.get(key) + } + + fn new_head_linkage( + storage: &S, + key: &#kty, + ) -> self::#inner_module::Linkage<#kty> { + use self::#inner_module::Utils; + + if let Some(head) = Self::read_head(storage) { + // update previous head predecessor + { + let head_key = #as_map::key_for(&head); let (data, linkage) = Self::read_with_linkage(storage, &*head_key).expect(r#" head is set when first element is inserted and unset when last element is removed; if head is Some then it points to existing key; qed "#); - storage.put(&*head_key, &(data, self::#inner_module::Linkage { - next: linkage.next.as_ref(), - previous: Some(key), - })); - } - // update to current head - Self::write_head(storage, Some(key)); - // return linkage with pointer to previous head - let mut linkage = self::#inner_module::Linkage::default(); - linkage.next = Some(head); - linkage - } else { - // we are first - update the head and produce empty linkage - Self::write_head(storage, Some(key)); - self::#inner_module::Linkage::default() - } - } - - fn read_head(storage: &S) -> Option<#kty> { - storage.get(#final_head_key) - } - - fn write_head(storage: &S, head: Option<&#kty>) { - match head { - Some(head) => storage.put(#final_head_key, head), - None => storage.kill(#final_head_key), - } - } - } - }; - - quote! { - #helpers - - #structure - - impl<#traitinstance: #traittype, #instance #bound_instantiable> #scrate::storage::generator::StorageMap<#kty, #typ> for #name<#traitinstance, #instance> { - type Query = #value_type; - - /// Get the prefix key in storage. - fn prefix() -> &'static [u8] { - #final_prefix - } - - /// Get the storage key used to fetch a value corresponding to a specific key. - fn key_for(key: &#kty) -> #scrate::rstd::vec::Vec { - let mut key_for = #as_map::prefix().to_vec(); - #scrate::codec::Encode::encode_to(&key, &mut key_for); - key_for - } - - /// Load the value associated with the given key from the map. - fn get(key: &#kty, storage: &S) -> Self::Query { - storage.get(&*#as_map::key_for(key)).#option_simple_1(|| #fielddefault) - } - - /// Take the value, reading and removing it. - fn take(key: &#kty, storage: &S) -> Self::Query { - use self::#inner_module::Utils; - - let res: Option<(#value_type, self::#inner_module::Linkage<#kty>)> = storage.take(&*#as_map::key_for(key)); - match res { - Some((data, linkage)) => { - Self::remove_linkage(linkage, storage); - data - }, - None => #fielddefault, - } - } - - /// Remove the value under a key. - fn remove(key: &#kty, storage: &S) { - #as_map::take(key, storage); - } - - /// Store a value to be associated with the given key from the map. - fn insert(key: &#kty, val: &#typ, storage: &S) { - use self::#inner_module::Utils; - - let key_for = &*#as_map::key_for(key); - let linkage = match Self::read_with_linkage(storage, key_for) { - // overwrite but reuse existing linkage - Some((_data, linkage)) => linkage, - // create new linkage - None => Self::new_head_linkage(storage, key), - }; - storage.put(key_for, &(val, linkage)) - } - - /// Mutate the value under a key - fn mutate R, S: #scrate::GenericStorage>(key: &#kty, f: F, storage: &S) -> R { - use self::#inner_module::Utils; - - let key_for = &*#as_map::key_for(key); - let (mut val, linkage) = Self::read_with_linkage(storage, key_for) - .map(|(data, linkage)| (data, Some(linkage))) - .unwrap_or_else(|| (#fielddefault, None)); - - let ret = f(&mut val); - #mutate_impl ; - ret - } - } - - impl<#traitinstance: 'static + #traittype, #instance #bound_instantiable> #scrate::storage::generator::EnumerableStorageMap<#kty, #typ> for #name<#traitinstance, #instance> { - fn head(storage: &S) -> Option<#kty> { - use self::#inner_module::Utils; - - Self::read_head(storage) - } - - fn enumerate<'a, S: #scrate::GenericStorage>(storage: &'a S) -> #scrate::storage::generator::Box + 'a> where - #kty: 'a, - #typ: 'a, - { - use self::#inner_module::{Utils, Enumerator}; - - #scrate::storage::generator::Box::new(Enumerator { - next: Self::read_head(storage), - storage, - _data: #phantom_data::<(#typ, #traitinstance, #instance)>::default(), - }) - } - } - } - } - - pub fn double_map(self, k1ty: &syn::Type, k2ty: &syn::Type, k2_hasher: TokenStream2) -> TokenStream2 { - let Self { - scrate, - visibility, - traitinstance, - traittype, - type_infos, - fielddefault, - prefix, - name, - attrs, - instance_opts, - .. - } = self; - - let DeclStorageTypeInfos { typ, value_type, is_option, .. } = type_infos; - let option_simple_1 = option_unwrap(is_option); - - let as_double_map = quote!{ > }; - - let mutate_impl = if !is_option { - quote!{ - #as_double_map::insert(key1, key2, &val, storage) - } - } else { - quote!{ - match val { - Some(ref val) => #as_double_map::insert(key1, key2, &val, storage), - None => #as_double_map::remove(key1, key2, storage), - } - } - }; - - let InstanceOpts { - comma_instance, - equal_default_instance, - bound_instantiable, - instance, - .. - } = instance_opts; - - let final_prefix = if let Some(instance) = instance { - let const_name = syn::Ident::new(&format!("{}{}", PREFIX_FOR, name.to_string()), proc_macro2::Span::call_site()); - quote!{ #instance::#const_name.as_bytes() } - } else { - quote!{ #prefix.as_bytes() } - }; - - // generator for double map - quote!{ - #( #[ #attrs ] )* - #visibility struct #name<#traitinstance: #traittype, #instance #bound_instantiable #equal_default_instance>(#scrate::storage::generator::PhantomData<(#traitinstance #comma_instance)>); - - impl<#traitinstance: #traittype, #instance #bound_instantiable> #scrate::storage::unhashed::generator::StorageDoubleMap<#k1ty, #k2ty, #typ> for #name<#traitinstance, #instance> { - type Query = #value_type; - - fn prefix() -> &'static [u8] { - #final_prefix - } - - fn key_for(k1: &#k1ty, k2: &#k2ty) -> Vec { - let mut key = #as_double_map::prefix_for(k1); - key.extend(&#scrate::Hashable::#k2_hasher(k2)); - key - } - - fn get(key1: &#k1ty, key2: &#k2ty, storage: &S) -> Self::Query { - let key = #as_double_map::key_for(key1, key2); - storage.get(&key).#option_simple_1(|| #fielddefault) - } - - fn take(key1: &#k1ty, key2: &#k2ty, storage: &S) -> Self::Query { - let key = #as_double_map::key_for(key1, key2); - storage.take(&key).#option_simple_1(|| #fielddefault) - } - - fn mutate R, S: #scrate::GenericUnhashedStorage>(key1: &#k1ty, key2: &#k2ty, f: F, storage: &S) -> R { - let mut val = #as_double_map::get(key1, key2, storage); - - let ret = f(&mut val); - #mutate_impl ; - ret - } - - } - } - - } + storage.put(&*head_key, &(data, self::#inner_module::Linkage { + next: linkage.next.as_ref(), + previous: Some(key), + })); + } + // update to current head + Self::write_head(storage, Some(key)); + // return linkage with pointer to previous head + let mut linkage = self::#inner_module::Linkage::default(); + linkage.next = Some(head); + linkage + } else { + // we are first - update the head and produce empty linkage + Self::write_head(storage, Some(key)); + self::#inner_module::Linkage::default() + } + } + + fn read_head(storage: &S) -> Option<#kty> { + storage.get(#final_head_key) + } + + fn write_head(storage: &S, head: Option<&#kty>) { + match head { + Some(head) => storage.put(#final_head_key, head), + None => storage.kill(#final_head_key), + } + } + } + }; + + quote! { + #helpers + + #structure + + impl<#traitinstance: #traittype, #instance #bound_instantiable> #scrate::storage::generator::StorageMap<#kty, #typ> for #name<#traitinstance, #instance> { + type Query = #value_type; + + /// Get the prefix key in storage. + fn prefix() -> &'static [u8] { + #final_prefix + } + + /// Get the storage key used to fetch a value corresponding to a specific key. + fn key_for(key: &#kty) -> #scrate::rstd::vec::Vec { + let mut key_for = #as_map::prefix().to_vec(); + #scrate::codec::Encode::encode_to(&key, &mut key_for); + key_for + } + + /// Load the value associated with the given key from the map. + fn get(key: &#kty, storage: &S) -> Self::Query { + storage.get(&*#as_map::key_for(key)).#option_simple_1(|| #fielddefault) + } + + /// Take the value, reading and removing it. + fn take(key: &#kty, storage: &S) -> Self::Query { + use self::#inner_module::Utils; + + let res: Option<(#value_type, self::#inner_module::Linkage<#kty>)> = storage.take(&*#as_map::key_for(key)); + match res { + Some((data, linkage)) => { + Self::remove_linkage(linkage, storage); + data + }, + None => #fielddefault, + } + } + + /// Remove the value under a key. + fn remove(key: &#kty, storage: &S) { + #as_map::take(key, storage); + } + + /// Store a value to be associated with the given key from the map. + fn insert(key: &#kty, val: &#typ, storage: &S) { + use self::#inner_module::Utils; + + let key_for = &*#as_map::key_for(key); + let linkage = match Self::read_with_linkage(storage, key_for) { + // overwrite but reuse existing linkage + Some((_data, linkage)) => linkage, + // create new linkage + None => Self::new_head_linkage(storage, key), + }; + storage.put(key_for, &(val, linkage)) + } + + /// Mutate the value under a key + fn mutate R, S: #scrate::GenericStorage>(key: &#kty, f: F, storage: &S) -> R { + use self::#inner_module::Utils; + + let key_for = &*#as_map::key_for(key); + let (mut val, linkage) = Self::read_with_linkage(storage, key_for) + .map(|(data, linkage)| (data, Some(linkage))) + .unwrap_or_else(|| (#fielddefault, None)); + + let ret = f(&mut val); + #mutate_impl ; + ret + } + } + + impl<#traitinstance: 'static + #traittype, #instance #bound_instantiable> #scrate::storage::generator::EnumerableStorageMap<#kty, #typ> for #name<#traitinstance, #instance> { + fn head(storage: &S) -> Option<#kty> { + use self::#inner_module::Utils; + + Self::read_head(storage) + } + + fn enumerate<'a, S: #scrate::GenericStorage>(storage: &'a S) -> #scrate::storage::generator::Box + 'a> where + #kty: 'a, + #typ: 'a, + { + use self::#inner_module::{Utils, Enumerator}; + + #scrate::storage::generator::Box::new(Enumerator { + next: Self::read_head(storage), + storage, + _data: #phantom_data::<(#typ, #traitinstance, #instance)>::default(), + }) + } + } + } + } + + pub fn double_map( + self, + k1ty: &syn::Type, + k2ty: &syn::Type, + k2_hasher: TokenStream2, + ) -> TokenStream2 { + let Self { + scrate, + visibility, + traitinstance, + traittype, + type_infos, + fielddefault, + prefix, + name, + attrs, + instance_opts, + .. + } = self; + + let DeclStorageTypeInfos { + typ, + value_type, + is_option, + .. + } = type_infos; + let option_simple_1 = option_unwrap(is_option); + + let as_double_map = quote! { > }; + + let mutate_impl = if !is_option { + quote! { + #as_double_map::insert(key1, key2, &val, storage) + } + } else { + quote! { + match val { + Some(ref val) => #as_double_map::insert(key1, key2, &val, storage), + None => #as_double_map::remove(key1, key2, storage), + } + } + }; + + let InstanceOpts { + comma_instance, + equal_default_instance, + bound_instantiable, + instance, + .. + } = instance_opts; + + let final_prefix = if let Some(instance) = instance { + let const_name = syn::Ident::new( + &format!("{}{}", PREFIX_FOR, name.to_string()), + proc_macro2::Span::call_site(), + ); + quote! { #instance::#const_name.as_bytes() } + } else { + quote! { #prefix.as_bytes() } + }; + + // generator for double map + quote! { + #( #[ #attrs ] )* + #visibility struct #name<#traitinstance: #traittype, #instance #bound_instantiable #equal_default_instance>(#scrate::storage::generator::PhantomData<(#traitinstance #comma_instance)>); + + impl<#traitinstance: #traittype, #instance #bound_instantiable> #scrate::storage::unhashed::generator::StorageDoubleMap<#k1ty, #k2ty, #typ> for #name<#traitinstance, #instance> { + type Query = #value_type; + + fn prefix() -> &'static [u8] { + #final_prefix + } + + fn key_for(k1: &#k1ty, k2: &#k2ty) -> Vec { + let mut key = #as_double_map::prefix_for(k1); + key.extend(&#scrate::Hashable::#k2_hasher(k2)); + key + } + + fn get(key1: &#k1ty, key2: &#k2ty, storage: &S) -> Self::Query { + let key = #as_double_map::key_for(key1, key2); + storage.get(&key).#option_simple_1(|| #fielddefault) + } + + fn take(key1: &#k1ty, key2: &#k2ty, storage: &S) -> Self::Query { + let key = #as_double_map::key_for(key1, key2); + storage.take(&key).#option_simple_1(|| #fielddefault) + } + + fn mutate R, S: #scrate::GenericUnhashedStorage>(key1: &#k1ty, key2: &#k2ty, f: F, storage: &S) -> R { + let mut val = #as_double_map::get(key1, key2, storage); + + let ret = f(&mut val); + #mutate_impl ; + ret + } + + } + } + } } diff --git a/srml/support/procedural/src/storage/mod.rs b/srml/support/procedural/src/storage/mod.rs index 82290e0de4..af1982c262 100644 --- a/srml/support/procedural/src/storage/mod.rs +++ b/srml/support/procedural/src/storage/mod.rs @@ -19,10 +19,10 @@ // end::description[] use srml_support_procedural_tools::syn_ext as ext; -use srml_support_procedural_tools::{ToTokens, Parse, custom_keyword, custom_keyword_impl}; +use srml_support_procedural_tools::{custom_keyword, custom_keyword_impl, Parse, ToTokens}; -use syn::{Ident, Token}; use syn::token::CustomKeyword; +use syn::{Ident, Token}; mod impls; @@ -31,148 +31,147 @@ pub mod transformation; /// Parsing usage only #[derive(Parse, ToTokens, Debug)] struct StorageDefinition { - pub hidden_crate: Option, - pub visibility: syn::Visibility, - pub trait_token: Token![trait], - pub ident: Ident, - pub for_token: Token![for], - pub module_ident: Ident, - pub mod_lt_token: Token![<], - pub mod_param: syn::GenericParam, - pub mod_instance_param_token: Option, - pub mod_instance: Option, - pub mod_instantiable_token: Option, - pub mod_instantiable: Option, - pub mod_default_instance_token: Option, - pub mod_default_instance: Option, - pub mod_gt_token: Token![>], - pub as_token: Token![as], - pub crate_ident: Ident, - pub content: ext::Braces>, - pub extra_genesis: Option, - pub extra_genesis_skip_phantom_data_field: Option, + pub hidden_crate: Option, + pub visibility: syn::Visibility, + pub trait_token: Token![trait], + pub ident: Ident, + pub for_token: Token![for], + pub module_ident: Ident, + pub mod_lt_token: Token![<], + pub mod_param: syn::GenericParam, + pub mod_instance_param_token: Option, + pub mod_instance: Option, + pub mod_instantiable_token: Option, + pub mod_instantiable: Option, + pub mod_default_instance_token: Option, + pub mod_default_instance: Option, + pub mod_gt_token: Token![>], + pub as_token: Token![as], + pub crate_ident: Ident, + pub content: ext::Braces>, + pub extra_genesis: Option, + pub extra_genesis_skip_phantom_data_field: Option, } #[derive(Parse, ToTokens, Debug)] struct SpecificHiddenCrate { - pub keyword: ext::CustomToken, - pub ident: ext::Parens, + pub keyword: ext::CustomToken, + pub ident: ext::Parens, } #[derive(Parse, ToTokens, Debug)] struct AddExtraGenesis { - pub extragenesis_keyword: ext::CustomToken, - pub content: ext::Braces, + pub extragenesis_keyword: ext::CustomToken, + pub content: ext::Braces, } #[derive(Parse, ToTokens, Debug)] struct ExtraGenesisSkipPhantomDataField { - pub genesis_phantom_keyword: ext::CustomToken, - pub token: Token![;], + pub genesis_phantom_keyword: ext::CustomToken, + pub token: Token![;], } #[derive(Parse, ToTokens, Debug)] struct AddExtraGenesisContent { - pub lines: ext::Punctuated, + pub lines: ext::Punctuated, } #[derive(Parse, ToTokens, Debug)] enum AddExtraGenesisLineEnum { - AddExtraGenesisLine(AddExtraGenesisLine), - AddExtraGenesisBuild(DeclStorageBuild), + AddExtraGenesisLine(AddExtraGenesisLine), + AddExtraGenesisBuild(DeclStorageBuild), } #[derive(Parse, ToTokens, Debug)] struct AddExtraGenesisLine { - pub attrs: ext::OuterAttributes, - pub config_keyword: ext::CustomToken, - pub extra_field: ext::Parens, - pub coldot_token: Token![:], - pub extra_type: syn::Type, - pub default_value: ext::Opt, + pub attrs: ext::OuterAttributes, + pub config_keyword: ext::CustomToken, + pub extra_field: ext::Parens, + pub coldot_token: Token![:], + pub extra_type: syn::Type, + pub default_value: ext::Opt, } #[derive(Parse, ToTokens, Debug)] struct DeclStorageLine { - // attrs (main use case is doc) - pub attrs: ext::OuterAttributes, - // visibility (no need to make optional - pub visibility: syn::Visibility, - // name - pub name: Ident, - pub getter: Option, - pub config: Option, - pub build: Option, - pub coldot_token: Token![:], - pub storage_type: DeclStorageType, - pub default_value: ext::Opt, + // attrs (main use case is doc) + pub attrs: ext::OuterAttributes, + // visibility (no need to make optional + pub visibility: syn::Visibility, + // name + pub name: Ident, + pub getter: Option, + pub config: Option, + pub build: Option, + pub coldot_token: Token![:], + pub storage_type: DeclStorageType, + pub default_value: ext::Opt, } - #[derive(Parse, ToTokens, Debug)] struct DeclStorageGetter { - pub getter_keyword: ext::CustomToken, - pub getfn: ext::Parens, + pub getter_keyword: ext::CustomToken, + pub getfn: ext::Parens, } #[derive(Parse, ToTokens, Debug)] struct DeclStorageConfig { - pub config_keyword: ext::CustomToken, - pub expr: ext::Parens>, + pub config_keyword: ext::CustomToken, + pub expr: ext::Parens>, } #[derive(Parse, ToTokens, Debug)] struct DeclStorageBuild { - pub build_keyword: ext::CustomToken, - pub expr: ext::Parens, + pub build_keyword: ext::CustomToken, + pub expr: ext::Parens, } #[derive(Parse, ToTokens, Debug)] enum DeclStorageType { - Map(DeclStorageMap), - LinkedMap(DeclStorageLinkedMap), - DoubleMap(DeclStorageDoubleMap), - Simple(syn::Type), + Map(DeclStorageMap), + LinkedMap(DeclStorageLinkedMap), + DoubleMap(DeclStorageDoubleMap), + Simple(syn::Type), } #[derive(Parse, ToTokens, Debug)] struct DeclStorageMap { - pub map_keyword: ext::CustomToken, - pub key: syn::Type, - pub ass_keyword: Token![=>], - pub value: syn::Type, + pub map_keyword: ext::CustomToken, + pub key: syn::Type, + pub ass_keyword: Token![=>], + pub value: syn::Type, } #[derive(Parse, ToTokens, Debug)] struct DeclStorageLinkedMap { - pub map_keyword: ext::CustomToken, - pub key: syn::Type, - pub ass_keyword: Token![=>], - pub value: syn::Type, + pub map_keyword: ext::CustomToken, + pub key: syn::Type, + pub ass_keyword: Token![=>], + pub value: syn::Type, } #[derive(Parse, ToTokens, Debug)] struct DeclStorageDoubleMap { - pub map_keyword: ext::CustomToken, - pub key1: syn::Type, - pub comma_keyword: Token![,], - pub key2_hasher: DeclStorageDoubleMapHasher, - pub key2: ext::Parens, - pub ass_keyword: Token![=>], - pub value: syn::Type, + pub map_keyword: ext::CustomToken, + pub key1: syn::Type, + pub comma_keyword: Token![,], + pub key2_hasher: DeclStorageDoubleMapHasher, + pub key2: ext::Parens, + pub ass_keyword: Token![=>], + pub value: syn::Type, } #[derive(Parse, ToTokens, Debug)] enum DeclStorageDoubleMapHasher { - Blake2_256(ext::CustomToken), - Twox256(ext::CustomToken), - Twox128(ext::CustomToken), + Blake2_256(ext::CustomToken), + Twox256(ext::CustomToken), + Twox128(ext::CustomToken), } #[derive(Parse, ToTokens, Debug)] struct DeclStorageDefault { - pub equal_token: Token![=], - pub expr: syn::Expr, + pub equal_token: Token![=], + pub expr: syn::Expr, } custom_keyword_impl!(SpecificHiddenCrate, "hiddencrate", "hiddencrate as keyword"); @@ -180,7 +179,11 @@ custom_keyword_impl!(DeclStorageConfig, "config", "build as keyword"); custom_keyword!(ConfigKeyword, "config", "config as keyword"); custom_keyword!(BuildKeyword, "build", "build as keyword"); custom_keyword_impl!(DeclStorageBuild, "build", "storage build config"); -custom_keyword_impl!(AddExtraGenesis, "add_extra_genesis", "storage extra genesis"); +custom_keyword_impl!( + AddExtraGenesis, + "add_extra_genesis", + "storage extra genesis" +); custom_keyword_impl!(DeclStorageGetter, "get", "storage getter"); custom_keyword!(MapKeyword, "map", "map as keyword"); custom_keyword!(LinkedMapKeyword, "linked_map", "linked_map as keyword"); @@ -188,4 +191,8 @@ custom_keyword!(DoubleMapKeyword, "double_map", "double_map as keyword"); custom_keyword!(Blake2_256Keyword, "blake2_256", "Blake2_256 as keyword"); custom_keyword!(Twox256Keyword, "twox_256", "Twox_256 as keyword"); custom_keyword!(Twox128Keyword, "twox_128", "Twox_128 as keyword"); -custom_keyword_impl!(ExtraGenesisSkipPhantomDataField, "extra_genesis_skip_phantom_data_field", "extra_genesis_skip_phantom_data_field as keyword"); +custom_keyword_impl!( + ExtraGenesisSkipPhantomDataField, + "extra_genesis_skip_phantom_data_field", + "extra_genesis_skip_phantom_data_field as keyword" +); diff --git a/srml/support/procedural/src/storage/transformation.rs b/srml/support/procedural/src/storage/transformation.rs index f00b5e8309..7d9516182b 100644 --- a/srml/support/procedural/src/storage/transformation.rs +++ b/srml/support/procedural/src/storage/transformation.rs @@ -19,22 +19,20 @@ // end::description[] use srml_support_procedural_tools::syn_ext as ext; -use srml_support_procedural_tools::{generate_crate_access, generate_hidden_includes, clean_type_string}; +use srml_support_procedural_tools::{ + clean_type_string, generate_crate_access, generate_hidden_includes, +}; use proc_macro::TokenStream; use proc_macro2::TokenStream as TokenStream2; +use quote::quote; use syn::{ - Ident, - GenericParam, - spanned::Spanned, - parse::{ - Error, - Result, - }, - parse_macro_input, + parse::{Error, Result}, + parse_macro_input, + spanned::Spanned, + GenericParam, Ident, }; -use quote::quote; use super::*; @@ -51,204 +49,207 @@ macro_rules! try_tok(( $expre : expr ) => { }); pub fn decl_storage_impl(input: TokenStream) -> TokenStream { - let def = parse_macro_input!(input as StorageDefinition); - - let StorageDefinition { - hidden_crate, - visibility, - ident: storetype, - module_ident, - mod_param: strait, - mod_instance, - mod_instantiable, - mod_default_instance, - crate_ident: cratename, - content: ext::Braces { content: storage_lines, ..}, - extra_genesis, - extra_genesis_skip_phantom_data_field, - .. - } = def; - - let instance_opts = match get_instance_opts(mod_instance, mod_instantiable, mod_default_instance) { - Ok(opts) => opts, - Err(err) => return err.to_compile_error().into(), - }; - - let hidden_crate_name = hidden_crate.map(|rc| rc.ident.content).map(|i| i.to_string()) - .unwrap_or_else(|| "decl_storage".to_string()); - let scrate = generate_crate_access(&hidden_crate_name, "srml-support"); - let scrate_decl = generate_hidden_includes( - &hidden_crate_name, - "srml-support", - ); - - let ( - traitinstance, - traittypes, - ) = if let GenericParam::Type(syn::TypeParam {ident, bounds, ..}) = strait { - (ident, bounds) - } else { - return try_tok!(Err(Error::new(strait.span(), "Missing declare store generic params"))); - }; - - let traittype = if let Some(traittype) = traittypes.first() { - traittype.into_value() - } else { - return try_tok!(Err(Error::new(traittypes.span(), "Trait bound expected"))); - }; - - let extra_genesis = try_tok!(decl_store_extra_genesis( - &scrate, - &traitinstance, - &traittype, - &instance_opts, - &storage_lines, - &extra_genesis, - extra_genesis_skip_phantom_data_field.is_some(), - )); - let decl_storage_items = decl_storage_items( - &scrate, - &traitinstance, - &traittype, - &instance_opts, - &cratename, - &storage_lines, - ); - let decl_store_items = decl_store_items( - &storage_lines, - ); - let impl_store_items = impl_store_items( - &traitinstance, - &instance_opts.instance, - &storage_lines, - ); - let impl_store_fns = impl_store_fns( - &scrate, - &traitinstance, - &instance_opts.instance, - &storage_lines, - ); - let (store_default_struct, store_functions_to_metadata) = store_functions_to_metadata( - &scrate, - &traitinstance, - &traittype, - &instance_opts, - &storage_lines, - ); - - let InstanceOpts { - instance, - bound_instantiable, - .. - } = instance_opts; - - let cratename_string = cratename.to_string(); - let expanded = quote! { - #scrate_decl - #decl_storage_items - #visibility trait #storetype { - #decl_store_items - } - #store_default_struct - impl<#traitinstance: #traittype, #instance #bound_instantiable> #storetype for #module_ident<#traitinstance, #instance> { - #impl_store_items - } - impl<#traitinstance: 'static + #traittype, #instance #bound_instantiable> #module_ident<#traitinstance, #instance> { - #impl_store_fns - #[doc(hidden)] - pub fn store_metadata() -> #scrate::storage::generator::StorageMetadata { - #scrate::storage::generator::StorageMetadata { - functions: #scrate::storage::generator::DecodeDifferent::Encode(#store_functions_to_metadata) , - } - } - #[doc(hidden)] - pub fn store_metadata_functions() -> &'static [#scrate::storage::generator::StorageFunctionMetadata] { - #store_functions_to_metadata - } - #[doc(hidden)] - pub fn store_metadata_name() -> &'static str { - #cratename_string - } - } - - #extra_genesis - - }; - - expanded.into() + let def = parse_macro_input!(input as StorageDefinition); + + let StorageDefinition { + hidden_crate, + visibility, + ident: storetype, + module_ident, + mod_param: strait, + mod_instance, + mod_instantiable, + mod_default_instance, + crate_ident: cratename, + content: ext::Braces { + content: storage_lines, + .. + }, + extra_genesis, + extra_genesis_skip_phantom_data_field, + .. + } = def; + + let instance_opts = + match get_instance_opts(mod_instance, mod_instantiable, mod_default_instance) { + Ok(opts) => opts, + Err(err) => return err.to_compile_error().into(), + }; + + let hidden_crate_name = hidden_crate + .map(|rc| rc.ident.content) + .map(|i| i.to_string()) + .unwrap_or_else(|| "decl_storage".to_string()); + let scrate = generate_crate_access(&hidden_crate_name, "srml-support"); + let scrate_decl = generate_hidden_includes(&hidden_crate_name, "srml-support"); + + let (traitinstance, traittypes) = + if let GenericParam::Type(syn::TypeParam { ident, bounds, .. }) = strait { + (ident, bounds) + } else { + return try_tok!(Err(Error::new( + strait.span(), + "Missing declare store generic params" + ))); + }; + + let traittype = if let Some(traittype) = traittypes.first() { + traittype.into_value() + } else { + return try_tok!(Err(Error::new(traittypes.span(), "Trait bound expected"))); + }; + + let extra_genesis = try_tok!(decl_store_extra_genesis( + &scrate, + &traitinstance, + &traittype, + &instance_opts, + &storage_lines, + &extra_genesis, + extra_genesis_skip_phantom_data_field.is_some(), + )); + let decl_storage_items = decl_storage_items( + &scrate, + &traitinstance, + &traittype, + &instance_opts, + &cratename, + &storage_lines, + ); + let decl_store_items = decl_store_items(&storage_lines); + let impl_store_items = + impl_store_items(&traitinstance, &instance_opts.instance, &storage_lines); + let impl_store_fns = impl_store_fns( + &scrate, + &traitinstance, + &instance_opts.instance, + &storage_lines, + ); + let (store_default_struct, store_functions_to_metadata) = store_functions_to_metadata( + &scrate, + &traitinstance, + &traittype, + &instance_opts, + &storage_lines, + ); + + let InstanceOpts { + instance, + bound_instantiable, + .. + } = instance_opts; + + let cratename_string = cratename.to_string(); + let expanded = quote! { + #scrate_decl + #decl_storage_items + #visibility trait #storetype { + #decl_store_items + } + #store_default_struct + impl<#traitinstance: #traittype, #instance #bound_instantiable> #storetype for #module_ident<#traitinstance, #instance> { + #impl_store_items + } + impl<#traitinstance: 'static + #traittype, #instance #bound_instantiable> #module_ident<#traitinstance, #instance> { + #impl_store_fns + #[doc(hidden)] + pub fn store_metadata() -> #scrate::storage::generator::StorageMetadata { + #scrate::storage::generator::StorageMetadata { + functions: #scrate::storage::generator::DecodeDifferent::Encode(#store_functions_to_metadata) , + } + } + #[doc(hidden)] + pub fn store_metadata_functions() -> &'static [#scrate::storage::generator::StorageFunctionMetadata] { + #store_functions_to_metadata + } + #[doc(hidden)] + pub fn store_metadata_name() -> &'static str { + #cratename_string + } + } + + #extra_genesis + + }; + + expanded.into() } fn decl_store_extra_genesis( - scrate: &TokenStream2, - traitinstance: &Ident, - traittype: &syn::TypeParamBound, - instance_opts: &InstanceOpts, - storage_lines: &ext::Punctuated, - extra_genesis: &Option, - extra_genesis_skip_phantom_data_field: bool, + scrate: &TokenStream2, + traitinstance: &Ident, + traittype: &syn::TypeParamBound, + instance_opts: &InstanceOpts, + storage_lines: &ext::Punctuated, + extra_genesis: &Option, + extra_genesis_skip_phantom_data_field: bool, ) -> Result { - - let InstanceOpts { - comma_instance, - equal_default_instance, - bound_instantiable, - instance, - .. - } = instance_opts; - - let mut is_trait_needed = false; - let mut has_trait_field = false; - let mut serde_complete_bound = std::collections::HashSet::new(); - let mut config_field = TokenStream2::new(); - let mut config_field_default = TokenStream2::new(); - let mut builders = TokenStream2::new(); - for sline in storage_lines.inner.iter() { - - let DeclStorageLine { - attrs, - name, - getter, - config, - build, - storage_type, - default_value, - .. - } = sline; - - let type_infos = get_type_infos(storage_type); - - let mut opt_build; - // need build line - if let Some(ref config) = config { - let ident = if let Some(ident) = config.expr.content.as_ref() { - quote!( #ident ) - } else if let Some(ref getter) = getter { - let ident = &getter.getfn.content; - quote!( #ident ) - } else { - return Err( + let InstanceOpts { + comma_instance, + equal_default_instance, + bound_instantiable, + instance, + .. + } = instance_opts; + + let mut is_trait_needed = false; + let mut has_trait_field = false; + let mut serde_complete_bound = std::collections::HashSet::new(); + let mut config_field = TokenStream2::new(); + let mut config_field_default = TokenStream2::new(); + let mut builders = TokenStream2::new(); + for sline in storage_lines.inner.iter() { + let DeclStorageLine { + attrs, + name, + getter, + config, + build, + storage_type, + default_value, + .. + } = sline; + + let type_infos = get_type_infos(storage_type); + + let mut opt_build; + // need build line + if let Some(ref config) = config { + let ident = if let Some(ident) = config.expr.content.as_ref() { + quote!( #ident ) + } else if let Some(ref getter) = getter { + let ident = &getter.getfn.content; + quote!( #ident ) + } else { + return Err( Error::new_spanned( name, "Invalid storage definiton, couldn't find config identifier: storage must either have a get identifier \ `get(ident)` or a defined config identifier `config(ident)`" ) ); - }; - if type_infos.kind.is_simple() && ext::has_parametric_type(type_infos.value_type, traitinstance) { - is_trait_needed = true; - has_trait_field = true; - } - - serde_complete_bound.insert(type_infos.value_type); - if let DeclStorageTypeInfosKind::Map { key_type, .. } = type_infos.kind { - serde_complete_bound.insert(key_type); - } - - // Propagate doc attributes. - let attrs = attrs.inner.iter().filter_map(|a| a.parse_meta().ok()).filter(|m| m.name() == "doc"); - - let storage_type = type_infos.typ.clone(); - config_field.extend(match type_infos.kind { + }; + if type_infos.kind.is_simple() + && ext::has_parametric_type(type_infos.value_type, traitinstance) + { + is_trait_needed = true; + has_trait_field = true; + } + + serde_complete_bound.insert(type_infos.value_type); + if let DeclStorageTypeInfosKind::Map { key_type, .. } = type_infos.kind { + serde_complete_bound.insert(key_type); + } + + // Propagate doc attributes. + let attrs = attrs + .inner + .iter() + .filter_map(|a| a.parse_meta().ok()) + .filter(|m| m.name() == "doc"); + + let storage_type = type_infos.typ.clone(); + config_field.extend(match type_infos.kind { DeclStorageTypeInfosKind::Simple => { quote!( #( #[ #attrs ] )* pub #ident: #storage_type, ) }, @@ -259,25 +260,34 @@ fn decl_store_extra_genesis( quote!( #( #[ #attrs ] )* pub #ident: Vec<(#key1_type, #key2_type, #storage_type)>, ) }, }); - opt_build = Some(build.as_ref().map(|b| &b.expr.content).map(|b|quote!( #b )) + opt_build = Some(build.as_ref().map(|b| &b.expr.content).map(|b|quote!( #b )) .unwrap_or_else(|| quote!( (|config: &GenesisConfig<#traitinstance, #instance>| config.#ident.clone()) ))); - let fielddefault = default_value.inner.as_ref().map(|d| &d.expr).map(|d| - if type_infos.is_option { - quote!( #d.unwrap_or_default() ) - } else { - quote!( #d ) - }).unwrap_or_else(|| quote!( Default::default() )); - - config_field_default.extend(quote!( #ident: #fielddefault, )); - } else { - opt_build = build.as_ref().map(|b| &b.expr.content).map(|b| quote!( #b )); - } - - let typ = type_infos.typ; - if let Some(builder) = opt_build { - is_trait_needed = true; - builders.extend(match type_infos.kind { + let fielddefault = default_value + .inner + .as_ref() + .map(|d| &d.expr) + .map(|d| { + if type_infos.is_option { + quote!( #d.unwrap_or_default() ) + } else { + quote!( #d ) + } + }) + .unwrap_or_else(|| quote!(Default::default())); + + config_field_default.extend(quote!( #ident: #fielddefault, )); + } else { + opt_build = build + .as_ref() + .map(|b| &b.expr.content) + .map(|b| quote!( #b )); + } + + let typ = type_infos.typ; + if let Some(builder) = opt_build { + is_trait_needed = true; + builders.extend(match type_infos.kind { DeclStorageTypeInfosKind::Simple => { quote!{{ use #scrate::rstd::{cell::RefCell, marker::PhantomData}; @@ -310,631 +320,710 @@ fn decl_store_extra_genesis( }} }, }); - } - - } - - let mut has_scall = false; - let mut scall = quote!{ ( |_, _, _| {} ) }; - let mut genesis_extrafields = TokenStream2::new(); - let mut genesis_extrafields_default = TokenStream2::new(); - - // extra genesis - if let Some(eg) = extra_genesis { - for ex_content in eg.content.content.lines.inner.iter() { - match ex_content { - AddExtraGenesisLineEnum::AddExtraGenesisLine(AddExtraGenesisLine { - attrs, - extra_field, - extra_type, - default_value, - .. - }) => { - if ext::has_parametric_type(&extra_type, traitinstance) { - is_trait_needed = true; - has_trait_field = true; - } - - serde_complete_bound.insert(extra_type); - - let extrafield = &extra_field.content; - genesis_extrafields.extend(quote!{ - #attrs pub #extrafield: #extra_type, - }); - let extra_default = default_value.inner.as_ref().map(|d| &d.expr).map(|e| quote!{ #e }) - .unwrap_or_else(|| quote!( Default::default() )); - genesis_extrafields_default.extend(quote!{ - #extrafield: #extra_default, - }); - }, - AddExtraGenesisLineEnum::AddExtraGenesisBuild(DeclStorageBuild{ expr, .. }) => { - if has_scall { - return Err(Error::new(expr.span(), "Only one build expression allowed for extra genesis")); - } - let content = &expr.content; - scall = quote!( ( #content ) ); - has_scall = true; - }, - } - } - } - - - let serde_bug_bound = if !serde_complete_bound.is_empty() { - let mut b_ser = String::new(); - let mut b_dser = String::new(); - // panic!("{:#?}", serde_complete_bound); - serde_complete_bound.into_iter().for_each(|bound| { - let stype = quote!(#bound); - b_ser.push_str(&format!("{} : {}::serde::Serialize, ", stype, scrate)); - b_dser.push_str(&format!("{} : {}::serde::de::DeserializeOwned, ", stype, scrate)); - }); - - quote! { - #[serde(bound(serialize = #b_ser))] - #[serde(bound(deserialize = #b_dser))] - } - } else { - quote!() - }; - - let is_extra_genesis_needed = has_scall - || !config_field.is_empty() - || !genesis_extrafields.is_empty() - || !builders.is_empty(); - Ok(if is_extra_genesis_needed { - let (fparam_struct, fparam_impl, sparam, ph_field, ph_default) = if is_trait_needed { - if (has_trait_field && instance.is_none()) || extra_genesis_skip_phantom_data_field { - // no phantom data required - ( - quote!(<#traitinstance: #traittype, #instance #bound_instantiable #equal_default_instance>), - quote!(<#traitinstance: #traittype, #instance #bound_instantiable>), - quote!(<#traitinstance, #instance>), - quote!(), - quote!(), - ) - } else { - // need phantom data - ( - quote!(<#traitinstance: #traittype, #instance #bound_instantiable #equal_default_instance>), - quote!(<#traitinstance: #traittype, #instance #bound_instantiable>), - quote!(<#traitinstance, #instance>), - - quote!{ - #[serde(skip)] - pub _genesis_phantom_data: #scrate::storage::generator::PhantomData<(#traitinstance #comma_instance)>, - }, - quote!{ - _genesis_phantom_data: Default::default(), - }, - ) - } - } else { - // do not even need type parameter - (quote!(), quote!(), quote!(), quote!(), quote!()) - }; - quote!{ - - #[derive(#scrate::Serialize, #scrate::Deserialize)] - #[cfg(feature = "std")] - #[serde(rename_all = "camelCase")] - #[serde(deny_unknown_fields)] - #serde_bug_bound - pub struct GenesisConfig#fparam_struct { - #ph_field - #config_field - #genesis_extrafields - } - - #[cfg(feature = "std")] - impl#fparam_impl Default for GenesisConfig#sparam { - fn default() -> Self { - GenesisConfig { - #ph_default - #config_field_default - #genesis_extrafields_default - } - } - } - - #[cfg(feature = "std")] - impl#fparam_impl #scrate::runtime_primitives::BuildStorage for GenesisConfig#sparam { - fn assimilate_storage(self, r: &mut #scrate::runtime_primitives::StorageOverlay, c: &mut #scrate::runtime_primitives::ChildrenStorageOverlay) -> ::std::result::Result<(), String> { - use #scrate::rstd::{cell::RefCell, marker::PhantomData}; - let storage = (RefCell::new(r), PhantomData::::default()); - - #builders - - let r = storage.0.into_inner(); - - #scall(r, c, &self); - - Ok(()) - } - } - } - } else { - quote!() - }) + } + } + + let mut has_scall = false; + let mut scall = quote! { ( |_, _, _| {} ) }; + let mut genesis_extrafields = TokenStream2::new(); + let mut genesis_extrafields_default = TokenStream2::new(); + + // extra genesis + if let Some(eg) = extra_genesis { + for ex_content in eg.content.content.lines.inner.iter() { + match ex_content { + AddExtraGenesisLineEnum::AddExtraGenesisLine(AddExtraGenesisLine { + attrs, + extra_field, + extra_type, + default_value, + .. + }) => { + if ext::has_parametric_type(&extra_type, traitinstance) { + is_trait_needed = true; + has_trait_field = true; + } + + serde_complete_bound.insert(extra_type); + + let extrafield = &extra_field.content; + genesis_extrafields.extend(quote! { + #attrs pub #extrafield: #extra_type, + }); + let extra_default = default_value + .inner + .as_ref() + .map(|d| &d.expr) + .map(|e| quote! { #e }) + .unwrap_or_else(|| quote!(Default::default())); + genesis_extrafields_default.extend(quote! { + #extrafield: #extra_default, + }); + } + AddExtraGenesisLineEnum::AddExtraGenesisBuild(DeclStorageBuild { + expr, .. + }) => { + if has_scall { + return Err(Error::new( + expr.span(), + "Only one build expression allowed for extra genesis", + )); + } + let content = &expr.content; + scall = quote!( ( #content ) ); + has_scall = true; + } + } + } + } + + let serde_bug_bound = if !serde_complete_bound.is_empty() { + let mut b_ser = String::new(); + let mut b_dser = String::new(); + // panic!("{:#?}", serde_complete_bound); + serde_complete_bound.into_iter().for_each(|bound| { + let stype = quote!(#bound); + b_ser.push_str(&format!("{} : {}::serde::Serialize, ", stype, scrate)); + b_dser.push_str(&format!( + "{} : {}::serde::de::DeserializeOwned, ", + stype, scrate + )); + }); + + quote! { + #[serde(bound(serialize = #b_ser))] + #[serde(bound(deserialize = #b_dser))] + } + } else { + quote!() + }; + + let is_extra_genesis_needed = has_scall + || !config_field.is_empty() + || !genesis_extrafields.is_empty() + || !builders.is_empty(); + Ok(if is_extra_genesis_needed { + let (fparam_struct, fparam_impl, sparam, ph_field, ph_default) = if is_trait_needed { + if (has_trait_field && instance.is_none()) || extra_genesis_skip_phantom_data_field { + // no phantom data required + ( + quote!(<#traitinstance: #traittype, #instance #bound_instantiable #equal_default_instance>), + quote!(<#traitinstance: #traittype, #instance #bound_instantiable>), + quote!(<#traitinstance, #instance>), + quote!(), + quote!(), + ) + } else { + // need phantom data + ( + quote!(<#traitinstance: #traittype, #instance #bound_instantiable #equal_default_instance>), + quote!(<#traitinstance: #traittype, #instance #bound_instantiable>), + quote!(<#traitinstance, #instance>), + quote! { + #[serde(skip)] + pub _genesis_phantom_data: #scrate::storage::generator::PhantomData<(#traitinstance #comma_instance)>, + }, + quote! { + _genesis_phantom_data: Default::default(), + }, + ) + } + } else { + // do not even need type parameter + (quote!(), quote!(), quote!(), quote!(), quote!()) + }; + quote! { + + #[derive(#scrate::Serialize, #scrate::Deserialize)] + #[cfg(feature = "std")] + #[serde(rename_all = "camelCase")] + #[serde(deny_unknown_fields)] + #serde_bug_bound + pub struct GenesisConfig#fparam_struct { + #ph_field + #config_field + #genesis_extrafields + } + + #[cfg(feature = "std")] + impl#fparam_impl Default for GenesisConfig#sparam { + fn default() -> Self { + GenesisConfig { + #ph_default + #config_field_default + #genesis_extrafields_default + } + } + } + + #[cfg(feature = "std")] + impl#fparam_impl #scrate::runtime_primitives::BuildStorage for GenesisConfig#sparam { + fn assimilate_storage(self, r: &mut #scrate::runtime_primitives::StorageOverlay, c: &mut #scrate::runtime_primitives::ChildrenStorageOverlay) -> ::std::result::Result<(), String> { + use #scrate::rstd::{cell::RefCell, marker::PhantomData}; + let storage = (RefCell::new(r), PhantomData::::default()); + + #builders + + let r = storage.0.into_inner(); + + #scall(r, c, &self); + + Ok(()) + } + } + } + } else { + quote!() + }) } fn decl_storage_items( - scrate: &TokenStream2, - traitinstance: &Ident, - traittype: &syn::TypeParamBound, - instance_opts: &InstanceOpts, - cratename: &Ident, - storage_lines: &ext::Punctuated, + scrate: &TokenStream2, + traitinstance: &Ident, + traittype: &syn::TypeParamBound, + instance_opts: &InstanceOpts, + cratename: &Ident, + storage_lines: &ext::Punctuated, ) -> TokenStream2 { - - let mut impls = TokenStream2::new(); - - let InstanceOpts { - instance, - default_instance, - instantiable, - .. - } = instance_opts; - - let build_prefix = |cratename, name| format!("{} {}", cratename, name); - - // Build Instantiable trait - if instance.is_some() { - let mut const_names = vec![]; - - for sline in storage_lines.inner.iter() { - let DeclStorageLine { - storage_type, - name, - .. - } = sline; - - let prefix = build_prefix(cratename, name); - - let type_infos = get_type_infos(storage_type); - - let const_name = syn::Ident::new(&format!("{}{}", impls::PREFIX_FOR, name.to_string()), proc_macro2::Span::call_site()); - let partial_const_value = prefix.clone(); - const_names.push((const_name, partial_const_value)); - - if let DeclStorageTypeInfosKind::Map { is_linked: true, .. } = type_infos.kind { - let const_name = syn::Ident::new(&format!("{}{}", impls::HEAD_KEY_FOR, name.to_string()), proc_macro2::Span::call_site()); - let partial_const_value = format!("head of {}", prefix); - const_names.push((const_name, partial_const_value)); - } - } - - // Declare Instance trait - { - let mut const_impls = TokenStream2::new(); - for (const_name, _) in &const_names { - const_impls.extend(quote! { - const #const_name: &'static str; - }); - } - - impls.extend(quote! { - /// Tag a type as an instance of a module. - /// - /// Defines storage prefixes, they must be unique. - pub trait #instantiable: 'static { - #const_impls - } - }); - } - - let instances = (0..NUMBER_OF_INSTANCE) - .map(|i| { - let name = format!("Instance{}", i); - let ident = syn::Ident::new(&name, proc_macro2::Span::call_site()); - (name, ident, quote! {#[doc=r"Module instance"]}) - }) - .chain(default_instance.clone().map(|ident| (String::new(), ident, quote! {#[doc=r"Default module instance"]}))); - - // Impl Instance trait for instances - for (prefix, ident, doc) in instances { - let mut const_impls = TokenStream2::new(); - - for (const_name, partial_const_value) in &const_names { - let const_value = format!("{}{}", partial_const_value, prefix); - const_impls.extend(quote! { - const #const_name: &'static str = #const_value; - }); - } - - impls.extend(quote! { - // Those trait are derived because of wrong bounds for generics - #[cfg_attr(feature = "std", derive(Debug))] - #[derive(Clone, Eq, PartialEq, #scrate::codec::Encode, #scrate::codec::Decode)] - #doc - pub struct #ident; - impl #instantiable for #ident { - #const_impls - } - }); - } - } - - for sline in storage_lines.inner.iter() { - let DeclStorageLine { - attrs, - name, - storage_type, - default_value, - visibility, - .. - } = sline; - - let type_infos = get_type_infos(storage_type); - let kind = type_infos.kind.clone(); - // Propagate doc attributes. - let attrs = attrs.inner.iter().filter_map(|a| a.parse_meta().ok()).filter(|m| m.name() == "doc"); - - let i = impls::Impls { - scrate, - visibility, - cratename, - traitinstance, - traittype, - instance_opts, - type_infos, - fielddefault: default_value.inner.as_ref().map(|d| &d.expr).map(|d| quote!( #d )) - .unwrap_or_else(|| quote!{ Default::default() }), - prefix: build_prefix(cratename, name), - name, - attrs, - }; - - let implementation = match kind { - DeclStorageTypeInfosKind::Simple => { - i.simple_value() - }, - DeclStorageTypeInfosKind::Map { key_type, is_linked: false } => { - i.map(key_type) - }, - DeclStorageTypeInfosKind::Map { key_type, is_linked: true } => { - i.linked_map(key_type) - }, - DeclStorageTypeInfosKind::DoubleMap { key1_type, key2_type, key2_hasher } => { - i.double_map(key1_type, key2_type, key2_hasher) - }, - }; - impls.extend(implementation) - } - impls + let mut impls = TokenStream2::new(); + + let InstanceOpts { + instance, + default_instance, + instantiable, + .. + } = instance_opts; + + let build_prefix = |cratename, name| format!("{} {}", cratename, name); + + // Build Instantiable trait + if instance.is_some() { + let mut const_names = vec![]; + + for sline in storage_lines.inner.iter() { + let DeclStorageLine { + storage_type, name, .. + } = sline; + + let prefix = build_prefix(cratename, name); + + let type_infos = get_type_infos(storage_type); + + let const_name = syn::Ident::new( + &format!("{}{}", impls::PREFIX_FOR, name.to_string()), + proc_macro2::Span::call_site(), + ); + let partial_const_value = prefix.clone(); + const_names.push((const_name, partial_const_value)); + + if let DeclStorageTypeInfosKind::Map { + is_linked: true, .. + } = type_infos.kind + { + let const_name = syn::Ident::new( + &format!("{}{}", impls::HEAD_KEY_FOR, name.to_string()), + proc_macro2::Span::call_site(), + ); + let partial_const_value = format!("head of {}", prefix); + const_names.push((const_name, partial_const_value)); + } + } + + // Declare Instance trait + { + let mut const_impls = TokenStream2::new(); + for (const_name, _) in &const_names { + const_impls.extend(quote! { + const #const_name: &'static str; + }); + } + + impls.extend(quote! { + /// Tag a type as an instance of a module. + /// + /// Defines storage prefixes, they must be unique. + pub trait #instantiable: 'static { + #const_impls + } + }); + } + + let instances = (0..NUMBER_OF_INSTANCE) + .map(|i| { + let name = format!("Instance{}", i); + let ident = syn::Ident::new(&name, proc_macro2::Span::call_site()); + (name, ident, quote! {#[doc=r"Module instance"]}) + }) + .chain(default_instance.clone().map(|ident| { + ( + String::new(), + ident, + quote! {#[doc=r"Default module instance"]}, + ) + })); + + // Impl Instance trait for instances + for (prefix, ident, doc) in instances { + let mut const_impls = TokenStream2::new(); + + for (const_name, partial_const_value) in &const_names { + let const_value = format!("{}{}", partial_const_value, prefix); + const_impls.extend(quote! { + const #const_name: &'static str = #const_value; + }); + } + + impls.extend(quote! { + // Those trait are derived because of wrong bounds for generics + #[cfg_attr(feature = "std", derive(Debug))] + #[derive(Clone, Eq, PartialEq, #scrate::codec::Encode, #scrate::codec::Decode)] + #doc + pub struct #ident; + impl #instantiable for #ident { + #const_impls + } + }); + } + } + + for sline in storage_lines.inner.iter() { + let DeclStorageLine { + attrs, + name, + storage_type, + default_value, + visibility, + .. + } = sline; + + let type_infos = get_type_infos(storage_type); + let kind = type_infos.kind.clone(); + // Propagate doc attributes. + let attrs = attrs + .inner + .iter() + .filter_map(|a| a.parse_meta().ok()) + .filter(|m| m.name() == "doc"); + + let i = impls::Impls { + scrate, + visibility, + cratename, + traitinstance, + traittype, + instance_opts, + type_infos, + fielddefault: default_value + .inner + .as_ref() + .map(|d| &d.expr) + .map(|d| quote!( #d )) + .unwrap_or_else(|| quote! { Default::default() }), + prefix: build_prefix(cratename, name), + name, + attrs, + }; + + let implementation = match kind { + DeclStorageTypeInfosKind::Simple => i.simple_value(), + DeclStorageTypeInfosKind::Map { + key_type, + is_linked: false, + } => i.map(key_type), + DeclStorageTypeInfosKind::Map { + key_type, + is_linked: true, + } => i.linked_map(key_type), + DeclStorageTypeInfosKind::DoubleMap { + key1_type, + key2_type, + key2_hasher, + } => i.double_map(key1_type, key2_type, key2_hasher), + }; + impls.extend(implementation) + } + impls } - -fn decl_store_items( - storage_lines: &ext::Punctuated, -) -> TokenStream2 { - storage_lines.inner.iter().map(|sline| &sline.name) - .fold(TokenStream2::new(), |mut items, name| { - items.extend(quote!(type #name;)); - items - }) +fn decl_store_items(storage_lines: &ext::Punctuated) -> TokenStream2 { + storage_lines.inner.iter().map(|sline| &sline.name).fold( + TokenStream2::new(), + |mut items, name| { + items.extend(quote!(type #name;)); + items + }, + ) } fn impl_store_items( - traitinstance: &Ident, - instance: &Option, - storage_lines: &ext::Punctuated, + traitinstance: &Ident, + instance: &Option, + storage_lines: &ext::Punctuated, ) -> TokenStream2 { - storage_lines.inner.iter().map(|sline| &sline.name) - .fold(TokenStream2::new(), |mut items, name| { - items.extend( - quote!( - type #name = #name<#traitinstance, #instance>; - ) - ); - items - }) + storage_lines.inner.iter().map(|sline| &sline.name).fold( + TokenStream2::new(), + |mut items, name| { + items.extend(quote!( + type #name = #name<#traitinstance, #instance>; + )); + items + }, + ) } fn impl_store_fns( - scrate: &TokenStream2, - traitinstance: &Ident, - instance: &Option, - storage_lines: &ext::Punctuated, + scrate: &TokenStream2, + traitinstance: &Ident, + instance: &Option, + storage_lines: &ext::Punctuated, ) -> TokenStream2 { - let mut items = TokenStream2::new(); - for sline in storage_lines.inner.iter() { - let DeclStorageLine { - attrs, - name, - getter, - storage_type, - .. - } = sline; - - if let Some(getter) = getter { - let get_fn = &getter.getfn.content; - - let type_infos = get_type_infos(storage_type); - let value_type = type_infos.value_type; - - // Propagate doc attributes. - let attrs = attrs.inner.iter().filter_map(|a| a.parse_meta().ok()).filter(|m| m.name() == "doc"); - - let typ = type_infos.typ; - let item = match type_infos.kind { - DeclStorageTypeInfosKind::Simple => { - quote!{ - #( #[ #attrs ] )* - pub fn #get_fn() -> #value_type { - <#name<#traitinstance, #instance> as #scrate::storage::generator::StorageValue<#typ>> :: get(&#scrate::storage::RuntimeStorage) - } - } - }, - DeclStorageTypeInfosKind::Map { key_type, .. } => { - quote!{ - #( #[ #attrs ] )* - pub fn #get_fn>(key: K) -> #value_type { - <#name<#traitinstance, #instance> as #scrate::storage::generator::StorageMap<#key_type, #typ>> :: get(key.borrow(), &#scrate::storage::RuntimeStorage) - } - } - } - DeclStorageTypeInfosKind::DoubleMap { key1_type, key2_type, .. } => { - quote!{ - pub fn #get_fn(k1: KArg1, k2: KArg2) -> #value_type - where - KArg1: #scrate::storage::generator::Borrow<#key1_type>, - KArg2: #scrate::storage::generator::Borrow<#key2_type>, - { - <#name<#traitinstance> as #scrate::storage::unhashed::generator::StorageDoubleMap<#key1_type, #key2_type, #typ>> :: get(k1.borrow(), k2.borrow(), &#scrate::storage::RuntimeStorage) - } - } - } - }; - items.extend(item); - } - } - items + let mut items = TokenStream2::new(); + for sline in storage_lines.inner.iter() { + let DeclStorageLine { + attrs, + name, + getter, + storage_type, + .. + } = sline; + + if let Some(getter) = getter { + let get_fn = &getter.getfn.content; + + let type_infos = get_type_infos(storage_type); + let value_type = type_infos.value_type; + + // Propagate doc attributes. + let attrs = attrs + .inner + .iter() + .filter_map(|a| a.parse_meta().ok()) + .filter(|m| m.name() == "doc"); + + let typ = type_infos.typ; + let item = match type_infos.kind { + DeclStorageTypeInfosKind::Simple => { + quote! { + #( #[ #attrs ] )* + pub fn #get_fn() -> #value_type { + <#name<#traitinstance, #instance> as #scrate::storage::generator::StorageValue<#typ>> :: get(&#scrate::storage::RuntimeStorage) + } + } + } + DeclStorageTypeInfosKind::Map { key_type, .. } => { + quote! { + #( #[ #attrs ] )* + pub fn #get_fn>(key: K) -> #value_type { + <#name<#traitinstance, #instance> as #scrate::storage::generator::StorageMap<#key_type, #typ>> :: get(key.borrow(), &#scrate::storage::RuntimeStorage) + } + } + } + DeclStorageTypeInfosKind::DoubleMap { + key1_type, + key2_type, + .. + } => { + quote! { + pub fn #get_fn(k1: KArg1, k2: KArg2) -> #value_type + where + KArg1: #scrate::storage::generator::Borrow<#key1_type>, + KArg2: #scrate::storage::generator::Borrow<#key2_type>, + { + <#name<#traitinstance> as #scrate::storage::unhashed::generator::StorageDoubleMap<#key1_type, #key2_type, #typ>> :: get(k1.borrow(), k2.borrow(), &#scrate::storage::RuntimeStorage) + } + } + } + }; + items.extend(item); + } + } + items } -fn store_functions_to_metadata ( - scrate: &TokenStream2, - traitinstance: &Ident, - traittype: &syn::TypeParamBound, - instance_opts: &InstanceOpts, - storage_lines: &ext::Punctuated, +fn store_functions_to_metadata( + scrate: &TokenStream2, + traitinstance: &Ident, + traittype: &syn::TypeParamBound, + instance_opts: &InstanceOpts, + storage_lines: &ext::Punctuated, ) -> (TokenStream2, TokenStream2) { - - let InstanceOpts { - comma_instance, - equal_default_instance, - bound_instantiable, - instance, - .. - } = instance_opts; - - let mut items = TokenStream2::new(); - let mut default_getter_struct_def = TokenStream2::new(); - for sline in storage_lines.inner.iter() { - let DeclStorageLine { - attrs, - name, - storage_type, - default_value, - .. - } = sline; - - let type_infos = get_type_infos(storage_type); - let value_type = type_infos.value_type; - - let typ = type_infos.typ; - let styp = clean_type_string(&typ.to_string()); - let stype = match type_infos.kind { - DeclStorageTypeInfosKind::Simple => { - quote!{ - #scrate::storage::generator::StorageFunctionType::Plain( - #scrate::storage::generator::DecodeDifferent::Encode(#styp), - ) - } - }, - DeclStorageTypeInfosKind::Map { key_type, is_linked } => { - let kty = clean_type_string("e!(#key_type).to_string()); - quote!{ - #scrate::storage::generator::StorageFunctionType::Map { - key: #scrate::storage::generator::DecodeDifferent::Encode(#kty), - value: #scrate::storage::generator::DecodeDifferent::Encode(#styp), - is_linked: #is_linked, - } - } - }, - DeclStorageTypeInfosKind::DoubleMap { key1_type, key2_type, key2_hasher } => { - let k1ty = clean_type_string("e!(#key1_type).to_string()); - let k2ty = clean_type_string("e!(#key2_type).to_string()); - let k2_hasher = clean_type_string(&key2_hasher.to_string()); - quote!{ - #scrate::storage::generator::StorageFunctionType::DoubleMap { - key1: #scrate::storage::generator::DecodeDifferent::Encode(#k1ty), - key2: #scrate::storage::generator::DecodeDifferent::Encode(#k2ty), - value: #scrate::storage::generator::DecodeDifferent::Encode(#styp), - key2_hasher: #scrate::storage::generator::DecodeDifferent::Encode(#k2_hasher), - } - } - }, - }; - let modifier = if type_infos.is_option { - quote!{ - #scrate::storage::generator::StorageFunctionModifier::Optional - } - } else { - quote!{ - #scrate::storage::generator::StorageFunctionModifier::Default - } - }; - let default = default_value.inner.as_ref().map(|d| &d.expr) - .map(|d| { - quote!( #d ) - }) - .unwrap_or_else(|| quote!( Default::default() )); - let mut docs = TokenStream2::new(); - for attr in attrs.inner.iter().filter_map(|v| v.parse_meta().ok()) { - if let syn::Meta::NameValue(syn::MetaNameValue{ - ref ident, - ref lit, - .. - }) = attr { - if ident == "doc" { - docs.extend(quote!(#lit,)); - } - } - } - let str_name = name.to_string(); - let struct_name = proc_macro2::Ident::new(&("__GetByteStruct".to_string() + &str_name), name.span()); - let cache_name = proc_macro2::Ident::new(&("__CACHE_GET_BYTE_STRUCT_".to_string() + &str_name), name.span()); - let item = quote! { - #scrate::storage::generator::StorageFunctionMetadata { - name: #scrate::storage::generator::DecodeDifferent::Encode(#str_name), - modifier: #modifier, - ty: #stype, - default: #scrate::storage::generator::DecodeDifferent::Encode( - #scrate::storage::generator::DefaultByteGetter( - &#struct_name::<#traitinstance, #instance>(#scrate::rstd::marker::PhantomData) - ) - ), - documentation: #scrate::storage::generator::DecodeDifferent::Encode(&[ #docs ]), - }, - }; - items.extend(item); - let def_get = quote! { - #[doc(hidden)] - pub struct #struct_name<#traitinstance, #instance #bound_instantiable #equal_default_instance>(pub #scrate::rstd::marker::PhantomData<(#traitinstance #comma_instance)>); - #[cfg(feature = "std")] - #[allow(non_upper_case_globals)] - static #cache_name: #scrate::once_cell::sync::OnceCell<#scrate::rstd::vec::Vec> = #scrate::once_cell::sync::OnceCell::INIT; - #[cfg(feature = "std")] - impl<#traitinstance: #traittype, #instance #bound_instantiable> #scrate::storage::generator::DefaultByte for #struct_name<#traitinstance, #instance> { - fn default_byte(&self) -> #scrate::rstd::vec::Vec { - use #scrate::codec::Encode; - #cache_name.get_or_init(|| { - let def_val: #value_type = #default; - <#value_type as Encode>::encode(&def_val) - }).clone() - } - } - #[cfg(not(feature = "std"))] - impl<#traitinstance: #traittype, #instance #bound_instantiable> #scrate::storage::generator::DefaultByte for #struct_name<#traitinstance, #instance> { - fn default_byte(&self) -> #scrate::rstd::vec::Vec { - use #scrate::codec::Encode; - let def_val: #value_type = #default; - <#value_type as Encode>::encode(&def_val) - } - } - }; - default_getter_struct_def.extend(def_get); - } - (default_getter_struct_def, quote!{ - { - &[ - #items - ] - } - }) + let InstanceOpts { + comma_instance, + equal_default_instance, + bound_instantiable, + instance, + .. + } = instance_opts; + + let mut items = TokenStream2::new(); + let mut default_getter_struct_def = TokenStream2::new(); + for sline in storage_lines.inner.iter() { + let DeclStorageLine { + attrs, + name, + storage_type, + default_value, + .. + } = sline; + + let type_infos = get_type_infos(storage_type); + let value_type = type_infos.value_type; + + let typ = type_infos.typ; + let styp = clean_type_string(&typ.to_string()); + let stype = match type_infos.kind { + DeclStorageTypeInfosKind::Simple => { + quote! { + #scrate::storage::generator::StorageFunctionType::Plain( + #scrate::storage::generator::DecodeDifferent::Encode(#styp), + ) + } + } + DeclStorageTypeInfosKind::Map { + key_type, + is_linked, + } => { + let kty = clean_type_string("e!(#key_type).to_string()); + quote! { + #scrate::storage::generator::StorageFunctionType::Map { + key: #scrate::storage::generator::DecodeDifferent::Encode(#kty), + value: #scrate::storage::generator::DecodeDifferent::Encode(#styp), + is_linked: #is_linked, + } + } + } + DeclStorageTypeInfosKind::DoubleMap { + key1_type, + key2_type, + key2_hasher, + } => { + let k1ty = clean_type_string("e!(#key1_type).to_string()); + let k2ty = clean_type_string("e!(#key2_type).to_string()); + let k2_hasher = clean_type_string(&key2_hasher.to_string()); + quote! { + #scrate::storage::generator::StorageFunctionType::DoubleMap { + key1: #scrate::storage::generator::DecodeDifferent::Encode(#k1ty), + key2: #scrate::storage::generator::DecodeDifferent::Encode(#k2ty), + value: #scrate::storage::generator::DecodeDifferent::Encode(#styp), + key2_hasher: #scrate::storage::generator::DecodeDifferent::Encode(#k2_hasher), + } + } + } + }; + let modifier = if type_infos.is_option { + quote! { + #scrate::storage::generator::StorageFunctionModifier::Optional + } + } else { + quote! { + #scrate::storage::generator::StorageFunctionModifier::Default + } + }; + let default = default_value + .inner + .as_ref() + .map(|d| &d.expr) + .map(|d| quote!( #d )) + .unwrap_or_else(|| quote!(Default::default())); + let mut docs = TokenStream2::new(); + for attr in attrs.inner.iter().filter_map(|v| v.parse_meta().ok()) { + if let syn::Meta::NameValue(syn::MetaNameValue { + ref ident, ref lit, .. + }) = attr + { + if ident == "doc" { + docs.extend(quote!(#lit,)); + } + } + } + let str_name = name.to_string(); + let struct_name = + proc_macro2::Ident::new(&("__GetByteStruct".to_string() + &str_name), name.span()); + let cache_name = proc_macro2::Ident::new( + &("__CACHE_GET_BYTE_STRUCT_".to_string() + &str_name), + name.span(), + ); + let item = quote! { + #scrate::storage::generator::StorageFunctionMetadata { + name: #scrate::storage::generator::DecodeDifferent::Encode(#str_name), + modifier: #modifier, + ty: #stype, + default: #scrate::storage::generator::DecodeDifferent::Encode( + #scrate::storage::generator::DefaultByteGetter( + &#struct_name::<#traitinstance, #instance>(#scrate::rstd::marker::PhantomData) + ) + ), + documentation: #scrate::storage::generator::DecodeDifferent::Encode(&[ #docs ]), + }, + }; + items.extend(item); + let def_get = quote! { + #[doc(hidden)] + pub struct #struct_name<#traitinstance, #instance #bound_instantiable #equal_default_instance>(pub #scrate::rstd::marker::PhantomData<(#traitinstance #comma_instance)>); + #[cfg(feature = "std")] + #[allow(non_upper_case_globals)] + static #cache_name: #scrate::once_cell::sync::OnceCell<#scrate::rstd::vec::Vec> = #scrate::once_cell::sync::OnceCell::INIT; + #[cfg(feature = "std")] + impl<#traitinstance: #traittype, #instance #bound_instantiable> #scrate::storage::generator::DefaultByte for #struct_name<#traitinstance, #instance> { + fn default_byte(&self) -> #scrate::rstd::vec::Vec { + use #scrate::codec::Encode; + #cache_name.get_or_init(|| { + let def_val: #value_type = #default; + <#value_type as Encode>::encode(&def_val) + }).clone() + } + } + #[cfg(not(feature = "std"))] + impl<#traitinstance: #traittype, #instance #bound_instantiable> #scrate::storage::generator::DefaultByte for #struct_name<#traitinstance, #instance> { + fn default_byte(&self) -> #scrate::rstd::vec::Vec { + use #scrate::codec::Encode; + let def_val: #value_type = #default; + <#value_type as Encode>::encode(&def_val) + } + } + }; + default_getter_struct_def.extend(def_get); + } + ( + default_getter_struct_def, + quote! { + { + &[ + #items + ] + } + }, + ) } - #[derive(Debug, Clone)] pub(crate) struct DeclStorageTypeInfos<'a> { - pub is_option: bool, - pub typ: TokenStream2, - pub value_type: &'a syn::Type, - kind: DeclStorageTypeInfosKind<'a>, + pub is_option: bool, + pub typ: TokenStream2, + pub value_type: &'a syn::Type, + kind: DeclStorageTypeInfosKind<'a>, } #[derive(Debug, Clone)] enum DeclStorageTypeInfosKind<'a> { - Simple, - Map { - key_type: &'a syn::Type, - is_linked: bool, - }, - DoubleMap { - key1_type: &'a syn::Type, - key2_type: &'a syn::Type, - key2_hasher: TokenStream2, - } + Simple, + Map { + key_type: &'a syn::Type, + is_linked: bool, + }, + DoubleMap { + key1_type: &'a syn::Type, + key2_type: &'a syn::Type, + key2_hasher: TokenStream2, + }, } impl<'a> DeclStorageTypeInfosKind<'a> { - fn is_simple(&self) -> bool { - match *self { - DeclStorageTypeInfosKind::Simple => true, - _ => false, - } - } + fn is_simple(&self) -> bool { + match *self { + DeclStorageTypeInfosKind::Simple => true, + _ => false, + } + } } fn get_type_infos(storage_type: &DeclStorageType) -> DeclStorageTypeInfos { - let (value_type, kind) = match storage_type { - DeclStorageType::Simple(ref st) => (st, DeclStorageTypeInfosKind::Simple), - DeclStorageType::Map(ref map) => (&map.value, DeclStorageTypeInfosKind::Map { - key_type: &map.key, - is_linked: false, - }), - DeclStorageType::LinkedMap(ref map) => (&map.value, DeclStorageTypeInfosKind::Map { - key_type: &map.key, - is_linked: true, - }), - DeclStorageType::DoubleMap(ref map) => (&map.value, DeclStorageTypeInfosKind::DoubleMap { - key1_type: &map.key1, - key2_type: &map.key2.content, - key2_hasher: { let h = &map.key2_hasher; quote! { #h } }, - }), - }; - - let extracted_type = ext::extract_type_option(value_type); - let is_option = extracted_type.is_some(); - let typ = extracted_type.unwrap_or(quote!( #value_type )); - - DeclStorageTypeInfos { - is_option, - typ, - value_type, - kind, - } - + let (value_type, kind) = match storage_type { + DeclStorageType::Simple(ref st) => (st, DeclStorageTypeInfosKind::Simple), + DeclStorageType::Map(ref map) => ( + &map.value, + DeclStorageTypeInfosKind::Map { + key_type: &map.key, + is_linked: false, + }, + ), + DeclStorageType::LinkedMap(ref map) => ( + &map.value, + DeclStorageTypeInfosKind::Map { + key_type: &map.key, + is_linked: true, + }, + ), + DeclStorageType::DoubleMap(ref map) => ( + &map.value, + DeclStorageTypeInfosKind::DoubleMap { + key1_type: &map.key1, + key2_type: &map.key2.content, + key2_hasher: { + let h = &map.key2_hasher; + quote! { #h } + }, + }, + ), + }; + + let extracted_type = ext::extract_type_option(value_type); + let is_option = extracted_type.is_some(); + let typ = extracted_type.unwrap_or(quote!( #value_type )); + + DeclStorageTypeInfos { + is_option, + typ, + value_type, + kind, + } } #[derive(Default)] pub(crate) struct InstanceOpts { - pub instance: Option, - pub default_instance: Option, - pub instantiable: Option, - pub comma_instance: TokenStream2, - pub equal_default_instance: TokenStream2, - pub bound_instantiable: TokenStream2, + pub instance: Option, + pub default_instance: Option, + pub instantiable: Option, + pub comma_instance: TokenStream2, + pub equal_default_instance: TokenStream2, + pub bound_instantiable: TokenStream2, } fn get_instance_opts( - instance: Option, - instantiable: Option, - default_instance: Option, + instance: Option, + instantiable: Option, + default_instance: Option, ) -> syn::Result { - - let right_syntax = "Should be $Instance: $Instantiable = $DefaultInstance"; - - match (instance, instantiable, default_instance) { - (Some(instance), Some(instantiable), default_instance_def) => { - let (equal_default_instance, default_instance) = if let Some(default_instance) = default_instance_def { - (quote!{= #default_instance}, Some(default_instance)) - } else { - (quote!{}, None) - }; - Ok(InstanceOpts { - comma_instance: quote!{, #instance}, - equal_default_instance, - bound_instantiable: quote!{: #instantiable}, - instance: Some(instance), - default_instance, - instantiable: Some(instantiable), - }) - }, - (None, None, None) => Ok(Default::default()), - (Some(instance), None, _) => Err(syn::Error::new(instance.span(), format!("Expect instantiable trait bound for instance: {}. {}", instance, right_syntax))), - (None, Some(instantiable), _) => Err(syn::Error::new(instantiable.span(), format!("Expect instance generic for bound instantiable: {}. {}", instantiable, right_syntax))), - (None, _, Some(default_instance)) => Err(syn::Error::new(default_instance.span(), format!("Expect instance generic for default instance: {}. {}", default_instance, right_syntax))), - } + let right_syntax = "Should be $Instance: $Instantiable = $DefaultInstance"; + + match (instance, instantiable, default_instance) { + (Some(instance), Some(instantiable), default_instance_def) => { + let (equal_default_instance, default_instance) = + if let Some(default_instance) = default_instance_def { + (quote! {= #default_instance}, Some(default_instance)) + } else { + (quote! {}, None) + }; + Ok(InstanceOpts { + comma_instance: quote! {, #instance}, + equal_default_instance, + bound_instantiable: quote! {: #instantiable}, + instance: Some(instance), + default_instance, + instantiable: Some(instantiable), + }) + } + (None, None, None) => Ok(Default::default()), + (Some(instance), None, _) => Err(syn::Error::new( + instance.span(), + format!( + "Expect instantiable trait bound for instance: {}. {}", + instance, right_syntax + ), + )), + (None, Some(instantiable), _) => Err(syn::Error::new( + instantiable.span(), + format!( + "Expect instance generic for bound instantiable: {}. {}", + instantiable, right_syntax + ), + )), + (None, _, Some(default_instance)) => Err(syn::Error::new( + default_instance.span(), + format!( + "Expect instance generic for default instance: {}. {}", + default_instance, right_syntax + ), + )), + } } diff --git a/srml/support/procedural/tools/derive/src/lib.rs b/srml/support/procedural/tools/derive/src/lib.rs index 0e3fcb2247..1c5ea6d62b 100644 --- a/srml/support/procedural/tools/derive/src/lib.rs +++ b/srml/support/procedural/tools/derive/src/lib.rs @@ -24,32 +24,36 @@ extern crate proc_macro; use proc_macro::TokenStream; use proc_macro2::Span; -use syn::parse_macro_input; use quote::quote; +use syn::parse_macro_input; pub(crate) fn fields_idents( - fields: impl Iterator, + fields: impl Iterator, ) -> impl Iterator { - fields.enumerate().map(|(ix, field)| { - field.ident.clone().map(|i| quote!{#i}).unwrap_or_else(|| { - let f_ix: syn::Ident = syn::Ident::new(&format!("f_{}", ix), Span::call_site()); - quote!( #f_ix ) - }) - }) + fields.enumerate().map(|(ix, field)| { + field.ident.clone().map(|i| quote! {#i}).unwrap_or_else(|| { + let f_ix: syn::Ident = syn::Ident::new(&format!("f_{}", ix), Span::call_site()); + quote!( #f_ix ) + }) + }) } pub(crate) fn fields_access( - fields: impl Iterator, + fields: impl Iterator, ) -> impl Iterator { - fields.enumerate().map(|(ix, field)| { - field.ident.clone().map(|i| quote!( #i )).unwrap_or_else(|| { - let f_ix: syn::Index = syn::Index { - index: ix as u32, - span: Span::call_site(), - }; - quote!( #f_ix ) - }) - }) + fields.enumerate().map(|(ix, field)| { + field + .ident + .clone() + .map(|i| quote!( #i )) + .unwrap_or_else(|| { + let f_ix: syn::Index = syn::Index { + index: ix as u32, + span: Span::call_site(), + }; + quote!( #f_ix ) + }) + }) } /// self defined parsing struct or enum. @@ -61,108 +65,106 @@ pub(crate) fn fields_access( /// Please use carefully, this will fully parse successfull variant twice. #[proc_macro_derive(Parse)] pub fn derive_parse(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as syn::Item); - match item { - syn::Item::Enum(input) => derive_parse_enum(input), - syn::Item::Struct(input) => derive_parse_struct(input), - _ => TokenStream::new(), // ignore - } + let item = parse_macro_input!(input as syn::Item); + match item { + syn::Item::Enum(input) => derive_parse_enum(input), + syn::Item::Struct(input) => derive_parse_struct(input), + _ => TokenStream::new(), // ignore + } } fn derive_parse_struct(input: syn::ItemStruct) -> TokenStream { - let syn::ItemStruct { - ident, - generics, - fields, - .. - } = input; - let field_names = { - let name = fields_idents(fields.iter().map(Clone::clone)); - quote!{ - #( - #name, - )* - } - }; - let field = fields_idents(fields.iter().map(Clone::clone)); - let tokens = quote! { - impl #generics syn::parse::Parse for #ident #generics { - fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { - #( - let #field = input.parse()?; - )* - Ok(Self { - #field_names - }) - } - } - }; - tokens.into() + let syn::ItemStruct { + ident, + generics, + fields, + .. + } = input; + let field_names = { + let name = fields_idents(fields.iter().map(Clone::clone)); + quote! { + #( + #name, + )* + } + }; + let field = fields_idents(fields.iter().map(Clone::clone)); + let tokens = quote! { + impl #generics syn::parse::Parse for #ident #generics { + fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { + #( + let #field = input.parse()?; + )* + Ok(Self { + #field_names + }) + } + } + }; + tokens.into() } fn derive_parse_enum(input: syn::ItemEnum) -> TokenStream { - let syn::ItemEnum { - ident, - generics, - variants, - .. - } = input; - let variants = variants.iter().map(|v| { - let variant_ident = v.ident.clone(); - let fields_build = if v.fields.iter().count() > 0 { - let fields_id = fields_idents(v.fields.iter().map(Clone::clone)); - quote!( (#(#fields_id), *) ) - } else { - quote!() - }; - - let fields_procs = fields_idents(v.fields.iter().map(Clone::clone)) - .map(|fident| { - quote!{ - let mut #fident = match fork.parse() { - Ok(r) => r, - Err(_e) => break, - }; - } - }); - let fields_procs_again = fields_idents(v.fields.iter().map(Clone::clone)) - .map(|fident| { - quote!{ - #fident = input.parse().expect("was parsed just before"); - } - }); - - // double parse to update input cursor position - // next syn crate version should be checked for a way - // to copy position/state from a fork - quote!{ - let mut fork = input.fork(); - loop { - #(#fields_procs)* - #(#fields_procs_again)* - return Ok(#ident::#variant_ident#fields_build); - } - } - }); - - let tokens = quote! { - impl #generics syn::parse::Parse for #ident #generics { - fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { - #( - #variants - )* - // no early return from any variants - Err( - syn::parse::Error::new( - proc_macro2::Span::call_site(), - "derived enum no matching variants" - ) - ) - } - } - - }; - tokens.into() + let syn::ItemEnum { + ident, + generics, + variants, + .. + } = input; + let variants = variants.iter().map(|v| { + let variant_ident = v.ident.clone(); + let fields_build = if v.fields.iter().count() > 0 { + let fields_id = fields_idents(v.fields.iter().map(Clone::clone)); + quote!( (#(#fields_id), *) ) + } else { + quote!() + }; + + let fields_procs = fields_idents(v.fields.iter().map(Clone::clone)).map(|fident| { + quote! { + let mut #fident = match fork.parse() { + Ok(r) => r, + Err(_e) => break, + }; + } + }); + let fields_procs_again = fields_idents(v.fields.iter().map(Clone::clone)).map(|fident| { + quote! { + #fident = input.parse().expect("was parsed just before"); + } + }); + + // double parse to update input cursor position + // next syn crate version should be checked for a way + // to copy position/state from a fork + quote! { + let mut fork = input.fork(); + loop { + #(#fields_procs)* + #(#fields_procs_again)* + return Ok(#ident::#variant_ident#fields_build); + } + } + }); + + let tokens = quote! { + impl #generics syn::parse::Parse for #ident #generics { + fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { + #( + #variants + )* + // no early return from any variants + Err( + syn::parse::Error::new( + proc_macro2::Span::call_site(), + "derived enum no matching variants" + ) + ) + } + } + + }; + tokens.into() } /// self defined parsing struct or enum. @@ -172,72 +174,72 @@ fn derive_parse_enum(input: syn::ItemEnum) -> TokenStream { /// it only output fields (empty field act as a None). #[proc_macro_derive(ToTokens)] pub fn derive_totokens(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as syn::Item); - match item { - syn::Item::Enum(input) => derive_totokens_enum(input), - syn::Item::Struct(input) => derive_totokens_struct(input), - _ => TokenStream::new(), // ignore - } + let item = parse_macro_input!(input as syn::Item); + match item { + syn::Item::Enum(input) => derive_totokens_enum(input), + syn::Item::Struct(input) => derive_totokens_struct(input), + _ => TokenStream::new(), // ignore + } } fn derive_totokens_struct(input: syn::ItemStruct) -> TokenStream { - let syn::ItemStruct { - ident, - generics, - fields, - .. - } = input; - - let fields = fields_access(fields.iter().map(Clone::clone)); - let tokens = quote! { - - impl #generics quote::ToTokens for #ident #generics { - fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { - #( - self.#fields.to_tokens(tokens); - )* - } - } - - }; - tokens.into() + let syn::ItemStruct { + ident, + generics, + fields, + .. + } = input; + + let fields = fields_access(fields.iter().map(Clone::clone)); + let tokens = quote! { + + impl #generics quote::ToTokens for #ident #generics { + fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { + #( + self.#fields.to_tokens(tokens); + )* + } + } + + }; + tokens.into() } fn derive_totokens_enum(input: syn::ItemEnum) -> TokenStream { - let syn::ItemEnum { - ident, - generics, - variants, - .. - } = input; - let variants = variants.iter().map(|v| { - let v_ident = v.ident.clone(); - let fields_build = if v.fields.iter().count() > 0 { - let fields_id = fields_idents(v.fields.iter().map(Clone::clone)); - quote!( (#(#fields_id), *) ) - } else { - quote!() - }; - let field = fields_idents(v.fields.iter().map(Clone::clone)); - quote! { - #ident::#v_ident#fields_build => { - #( - #field.to_tokens(tokens); - )* - }, - } - }); - let tokens = quote! { - impl #generics quote::ToTokens for #ident #generics { - fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { - match self { - #( - #variants - )* - } - } - } - }; - - tokens.into() + let syn::ItemEnum { + ident, + generics, + variants, + .. + } = input; + let variants = variants.iter().map(|v| { + let v_ident = v.ident.clone(); + let fields_build = if v.fields.iter().count() > 0 { + let fields_id = fields_idents(v.fields.iter().map(Clone::clone)); + quote!( (#(#fields_id), *) ) + } else { + quote!() + }; + let field = fields_idents(v.fields.iter().map(Clone::clone)); + quote! { + #ident::#v_ident#fields_build => { + #( + #field.to_tokens(tokens); + )* + }, + } + }); + let tokens = quote! { + impl #generics quote::ToTokens for #ident #generics { + fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { + match self { + #( + #variants + )* + } + } + } + }; + + tokens.into() } diff --git a/srml/support/procedural/tools/src/lib.rs b/srml/support/procedural/tools/src/lib.rs index 34b96df810..e47f2028dd 100644 --- a/srml/support/procedural/tools/src/lib.rs +++ b/srml/support/procedural/tools/src/lib.rs @@ -22,94 +22,96 @@ pub use srml_support_procedural_tools_derive::*; use proc_macro_crate::crate_name; -use syn::parse::Error; use quote::quote; +use syn::parse::Error; pub mod syn_ext; #[macro_export] macro_rules! custom_keyword_impl { - ($name:ident, $keyident:expr, $keydisp:expr) => { - - impl CustomKeyword for $name { - fn ident() -> &'static str { $keyident } - fn display() -> &'static str { $keydisp } - } - - } + ($name:ident, $keyident:expr, $keydisp:expr) => { + impl CustomKeyword for $name { + fn ident() -> &'static str { + $keyident + } + fn display() -> &'static str { + $keydisp + } + } + }; } #[macro_export] macro_rules! custom_keyword { - ($name:ident, $keyident:expr, $keydisp:expr) => { - - #[derive(Debug)] - struct $name; - - custom_keyword_impl!($name, $keyident, $keydisp); + ($name:ident, $keyident:expr, $keydisp:expr) => { + #[derive(Debug)] + struct $name; - } + custom_keyword_impl!($name, $keyident, $keydisp); + }; } // FIXME #1569, remove the following functions, which are copied from sr-api-macros -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; use syn::Ident; fn generate_hidden_includes_mod_name(unique_id: &str) -> Ident { - Ident::new(&format!("sr_api_hidden_includes_{}", unique_id), Span::call_site()) + Ident::new( + &format!("sr_api_hidden_includes_{}", unique_id), + Span::call_site(), + ) } /// Generates the access to the `srml-support` crate. pub fn generate_crate_access(unique_id: &str, def_crate: &str) -> TokenStream { - if ::std::env::var("CARGO_PKG_NAME").unwrap() == def_crate { - quote::quote!( crate ) - } else { - let mod_name = generate_hidden_includes_mod_name(unique_id); - quote::quote!( self::#mod_name::hidden_include ) - } + if ::std::env::var("CARGO_PKG_NAME").unwrap() == def_crate { + quote::quote!(crate) + } else { + let mod_name = generate_hidden_includes_mod_name(unique_id); + quote::quote!( self::#mod_name::hidden_include ) + } } /// Generates the hidden includes that are required to make the macro independent from its scope. pub fn generate_hidden_includes(unique_id: &str, def_crate: &str) -> TokenStream { - if ::std::env::var("CARGO_PKG_NAME").unwrap() == def_crate { - TokenStream::new() - } else { - let mod_name = generate_hidden_includes_mod_name(unique_id); - - match crate_name(def_crate) { - Ok(name) => { - let name = Ident::new(&name, Span::call_site()); - quote::quote!( - #[doc(hidden)] - mod #mod_name { - pub extern crate #name as hidden_include; - } - ) - }, - Err(e) => { - let err = Error::new(Span::call_site(), &e).to_compile_error(); - quote!( #err ) - } - } - - } + if ::std::env::var("CARGO_PKG_NAME").unwrap() == def_crate { + TokenStream::new() + } else { + let mod_name = generate_hidden_includes_mod_name(unique_id); + + match crate_name(def_crate) { + Ok(name) => { + let name = Ident::new(&name, Span::call_site()); + quote::quote!( + #[doc(hidden)] + mod #mod_name { + pub extern crate #name as hidden_include; + } + ) + } + Err(e) => { + let err = Error::new(Span::call_site(), &e).to_compile_error(); + quote!( #err ) + } + } + } } // fn to remove white spaces arount string types // (basically whitespaces arount tokens) pub fn clean_type_string(input: &str) -> String { - input - .replace(" ::", "::") - .replace(":: ", "::") - .replace(" ,", ",") - .replace(" ;", ";") - .replace(" [", "[") - .replace("[ ", "[") - .replace(" ]", "]") - .replace(" (", "(") - .replace("( ", "(") - .replace(" )", ")") - .replace(" <", "<") - .replace("< ", "<") - .replace(" >", ">") + input + .replace(" ::", "::") + .replace(":: ", "::") + .replace(" ,", ",") + .replace(" ;", ";") + .replace(" [", "[") + .replace("[ ", "[") + .replace(" ]", "]") + .replace(" (", "(") + .replace("( ", "(") + .replace(" )", ")") + .replace(" <", "<") + .replace("< ", "<") + .replace(" >", ">") } diff --git a/srml/support/procedural/tools/src/syn_ext.rs b/srml/support/procedural/tools/src/syn_ext.rs index c2136b2cd8..5e906b24e3 100644 --- a/srml/support/procedural/tools/src/syn_ext.rs +++ b/srml/support/procedural/tools/src/syn_ext.rs @@ -18,54 +18,48 @@ //! Extension to syn types, mainly for parsing // end::description[] -use syn::parse::{ - Parse, - ParseStream, - Result, -}; -use syn::token::CustomKeyword; use proc_macro2::TokenStream as T2; -use quote::{ToTokens, quote}; +use quote::{quote, ToTokens}; +use srml_support_procedural_tools_derive::{Parse, ToTokens}; use std::iter::once; +use syn::parse::{Parse, ParseStream, Result}; +use syn::token::CustomKeyword; use syn::Ident; -use srml_support_procedural_tools_derive::{ToTokens, Parse}; /// stop parsing here getting remaining token as content /// Warn duplicate stream (part of) #[derive(Parse, ToTokens, Debug)] pub struct StopParse { - pub inner: T2, + pub inner: T2, } // inner macro really dependant on syn naming convention, do not export macro_rules! groups_impl { - ($name:ident, $tok:ident, $deli:ident, $parse:ident) => { - - #[derive(Debug)] - pub struct $name

{ - pub token: syn::token::$tok, - pub content: P, - } - - impl Parse for $name

{ - fn parse(input: ParseStream) -> Result { - let syn::group::$name { token, content } = syn::group::$parse(input)?; - let content = content.parse()?; - Ok($name { token, content, }) - } - } - - impl ToTokens for $name

{ - fn to_tokens(&self, tokens: &mut T2) { - let mut inner_stream = T2::new(); - self.content.to_tokens(&mut inner_stream); - let token_tree: proc_macro2::TokenTree = - proc_macro2::Group::new(proc_macro2::Delimiter::$deli, inner_stream).into(); - tokens.extend(once(token_tree)); - } - } - - } + ($name:ident, $tok:ident, $deli:ident, $parse:ident) => { + #[derive(Debug)] + pub struct $name

{ + pub token: syn::token::$tok, + pub content: P, + } + + impl Parse for $name

{ + fn parse(input: ParseStream) -> Result { + let syn::group::$name { token, content } = syn::group::$parse(input)?; + let content = content.parse()?; + Ok($name { token, content }) + } + } + + impl ToTokens for $name

{ + fn to_tokens(&self, tokens: &mut T2) { + let mut inner_stream = T2::new(); + self.content.to_tokens(&mut inner_stream); + let token_tree: proc_macro2::TokenTree = + proc_macro2::Group::new(proc_macro2::Delimiter::$deli, inner_stream).into(); + tokens.extend(once(token_tree)); + } + } + }; } groups_impl!(Braces, Brace, Brace, parse_braces); @@ -76,254 +70,259 @@ groups_impl!(Parens, Paren, Parenthesis, parse_parens); pub struct CustomToken(std::marker::PhantomData); impl Parse for CustomToken { - fn parse(input: ParseStream) -> Result { - let ident: syn::Ident = input.parse()?; - - if ident.to_string().as_str() != T::ident() { - return Err(syn::parse::Error::new_spanned(ident, "expected another custom token")) - } - Ok(CustomToken(std::marker::PhantomData)) - } + fn parse(input: ParseStream) -> Result { + let ident: syn::Ident = input.parse()?; + + if ident.to_string().as_str() != T::ident() { + return Err(syn::parse::Error::new_spanned( + ident, + "expected another custom token", + )); + } + Ok(CustomToken(std::marker::PhantomData)) + } } impl ToTokens for CustomToken { - fn to_tokens(&self, tokens: &mut T2) { - use std::str::FromStr; - tokens.extend(T2::from_str(T::ident()).expect("custom keyword should parse to ident")); - } + fn to_tokens(&self, tokens: &mut T2) { + use std::str::FromStr; + tokens.extend(T2::from_str(T::ident()).expect("custom keyword should parse to ident")); + } } impl CustomKeyword for CustomToken { - fn ident() -> &'static str { ::ident() } - fn display() -> &'static str { ::display() } + fn ident() -> &'static str { + ::ident() + } + fn display() -> &'static str { + ::display() + } } #[derive(Debug)] -pub struct PunctuatedInner { - pub inner: syn::punctuated::Punctuated, - pub variant: V, +pub struct PunctuatedInner { + pub inner: syn::punctuated::Punctuated, + pub variant: V, } #[derive(Debug)] pub struct NoTrailing; - #[derive(Debug)] pub struct Trailing; -pub type Punctuated = PunctuatedInner; +pub type Punctuated = PunctuatedInner; -pub type PunctuatedTrailing = PunctuatedInner; +pub type PunctuatedTrailing = PunctuatedInner; -impl Parse for PunctuatedInner { - fn parse(input: ParseStream) -> Result { - Ok(PunctuatedInner { - inner: syn::punctuated::Punctuated::parse_separated_nonempty(input)?, - variant: Trailing, - }) - } +impl Parse for PunctuatedInner { + fn parse(input: ParseStream) -> Result { + Ok(PunctuatedInner { + inner: syn::punctuated::Punctuated::parse_separated_nonempty(input)?, + variant: Trailing, + }) + } } -impl Parse for PunctuatedInner { - fn parse(input: ParseStream) -> Result { - Ok(PunctuatedInner { - inner: syn::punctuated::Punctuated::parse_terminated(input)?, - variant: NoTrailing, - }) - } +impl Parse for PunctuatedInner { + fn parse(input: ParseStream) -> Result { + Ok(PunctuatedInner { + inner: syn::punctuated::Punctuated::parse_terminated(input)?, + variant: NoTrailing, + }) + } } -impl ToTokens for PunctuatedInner { - fn to_tokens(&self, tokens: &mut T2) { - self.inner.to_tokens(tokens) - } +impl ToTokens for PunctuatedInner { + fn to_tokens(&self, tokens: &mut T2) { + self.inner.to_tokens(tokens) + } } /// Note that syn Meta is almost fine for use case (lacks only `ToToken`) #[derive(Debug, Clone)] pub struct Meta { - pub inner: syn::Meta, + pub inner: syn::Meta, } impl Parse for Meta { - fn parse(input: ParseStream) -> Result { - Ok(Meta { - inner: syn::Meta::parse(input)?, - }) - } + fn parse(input: ParseStream) -> Result { + Ok(Meta { + inner: syn::Meta::parse(input)?, + }) + } } impl ToTokens for Meta { - fn to_tokens(&self, tokens: &mut T2) { - match self.inner { - syn::Meta::Word(ref ident) => { - let ident = ident.clone(); - let toks = quote!{ - #[#ident] - }; - tokens.extend(toks); - }, - syn::Meta::List(ref l) => l.to_tokens(tokens), - syn::Meta::NameValue(ref n) => n.to_tokens(tokens), - } - } + fn to_tokens(&self, tokens: &mut T2) { + match self.inner { + syn::Meta::Word(ref ident) => { + let ident = ident.clone(); + let toks = quote! { + #[#ident] + }; + tokens.extend(toks); + } + syn::Meta::List(ref l) => l.to_tokens(tokens), + syn::Meta::NameValue(ref n) => n.to_tokens(tokens), + } + } } #[derive(Debug)] pub struct OuterAttributes { - pub inner: Vec, + pub inner: Vec, } impl Parse for OuterAttributes { - fn parse(input: ParseStream) -> Result { - let inner = syn::Attribute::parse_outer(input)?; - Ok(OuterAttributes { - inner, - }) - } + fn parse(input: ParseStream) -> Result { + let inner = syn::Attribute::parse_outer(input)?; + Ok(OuterAttributes { inner }) + } } impl ToTokens for OuterAttributes { - fn to_tokens(&self, tokens: &mut T2) { - for att in self.inner.iter() { - att.to_tokens(tokens); - } - } + fn to_tokens(&self, tokens: &mut T2) { + for att in self.inner.iter() { + att.to_tokens(tokens); + } + } } #[derive(Debug)] pub struct Opt

{ - pub inner: Option

, + pub inner: Option

, } impl Parse for Opt

{ - // Note that it cost a double parsing (same as enum derive) - fn parse(input: ParseStream) -> Result { - let inner = match input.fork().parse::

() { - Ok(_item) => Some(input.parse().expect("Same parsing ran before")), - Err(_e) => None, - }; - - Ok(Opt { inner }) - } + // Note that it cost a double parsing (same as enum derive) + fn parse(input: ParseStream) -> Result { + let inner = match input.fork().parse::

() { + Ok(_item) => Some(input.parse().expect("Same parsing ran before")), + Err(_e) => None, + }; + + Ok(Opt { inner }) + } } impl ToTokens for Opt

{ - fn to_tokens(&self, tokens: &mut T2) { - if let Some(ref p) = self.inner { - p.to_tokens(tokens); - } - } + fn to_tokens(&self, tokens: &mut T2) { + if let Some(ref p) = self.inner { + p.to_tokens(tokens); + } + } } pub fn extract_type_option(typ: &syn::Type) -> Option { - if let syn::Type::Path(ref path) = typ { - path.path.segments.last().and_then(|v| { - if v.value().ident == "Option" { - if let syn::PathArguments::AngleBracketed(ref a) = v.value().arguments { - let args = &a.args; - Some(quote!{ #args }) - } else { - None - } - } else { - None - } - }) - } else { - None - } + if let syn::Type::Path(ref path) = typ { + path.path.segments.last().and_then(|v| { + if v.value().ident == "Option" { + if let syn::PathArguments::AngleBracketed(ref a) = v.value().arguments { + let args = &a.args; + Some(quote! { #args }) + } else { + None + } + } else { + None + } + }) + } else { + None + } } pub fn is_parametric_type_def(typ: &syn::Type, default: bool) -> bool { - match *typ { - syn::Type::Path(ref path) => { - path.path.segments.iter().any(|v| { - if let syn::PathArguments::AngleBracketed(..) = v.arguments { - true - } else { - false - } - }) - }, - syn::Type::Slice(ref inner) => is_parametric_type_def(&inner.elem, default), - syn::Type::Array(ref inner) => is_parametric_type_def(&inner.elem, default), - syn::Type::Ptr(ref inner) => is_parametric_type_def(&inner.elem, default), - syn::Type::Reference(ref inner) => is_parametric_type_def(&inner.elem, default), - syn::Type::BareFn(ref inner) => inner.variadic.is_some(), - syn::Type::Never(..) => false, - syn::Type::Tuple(ref inner) => - inner.elems.iter().any(|t| is_parametric_type_def(t, default)), - syn::Type::TraitObject(..) => true, - syn::Type::ImplTrait(..) => true, - syn::Type::Paren(ref inner) => is_parametric_type_def(&inner.elem, default), - syn::Type::Group(ref inner) => is_parametric_type_def(&inner.elem, default), - syn::Type::Infer(..) => true, - syn::Type::Macro(..) => default, - syn::Type::Verbatim(..) => default, - } + match *typ { + syn::Type::Path(ref path) => path.path.segments.iter().any(|v| { + if let syn::PathArguments::AngleBracketed(..) = v.arguments { + true + } else { + false + } + }), + syn::Type::Slice(ref inner) => is_parametric_type_def(&inner.elem, default), + syn::Type::Array(ref inner) => is_parametric_type_def(&inner.elem, default), + syn::Type::Ptr(ref inner) => is_parametric_type_def(&inner.elem, default), + syn::Type::Reference(ref inner) => is_parametric_type_def(&inner.elem, default), + syn::Type::BareFn(ref inner) => inner.variadic.is_some(), + syn::Type::Never(..) => false, + syn::Type::Tuple(ref inner) => inner + .elems + .iter() + .any(|t| is_parametric_type_def(t, default)), + syn::Type::TraitObject(..) => true, + syn::Type::ImplTrait(..) => true, + syn::Type::Paren(ref inner) => is_parametric_type_def(&inner.elem, default), + syn::Type::Group(ref inner) => is_parametric_type_def(&inner.elem, default), + syn::Type::Infer(..) => true, + syn::Type::Macro(..) => default, + syn::Type::Verbatim(..) => default, + } } /// check if type has any type parameter, defaults to true for some cases. pub fn is_parametric_type(typ: &syn::Type) -> bool { - is_parametric_type_def(typ, true) + is_parametric_type_def(typ, true) } fn has_parametric_type_def_in_path(path: &syn::Path, ident: &Ident, default: bool) -> bool { - path.segments.iter().any(|v| { - if ident == &v.ident { - return true; - } - if let syn::PathArguments::AngleBracketed(ref a) = v.arguments { - for arg in a.args.iter() { - if let syn::GenericArgument::Type(ref typ) = arg { - if has_parametric_type_def(typ, ident, default) { - return true; - } - } - // potentially missing matches here - } - false - } else { - false - } - }) - + path.segments.iter().any(|v| { + if ident == &v.ident { + return true; + } + if let syn::PathArguments::AngleBracketed(ref a) = v.arguments { + for arg in a.args.iter() { + if let syn::GenericArgument::Type(ref typ) = arg { + if has_parametric_type_def(typ, ident, default) { + return true; + } + } + // potentially missing matches here + } + false + } else { + false + } + }) } pub fn has_parametric_type_def(typ: &syn::Type, ident: &Ident, default: bool) -> bool { - match *typ { - syn::Type::Path(ref path) => has_parametric_type_def_in_path(&path.path, ident, default), - syn::Type::Slice(ref inner) => has_parametric_type_def(&inner.elem, ident, default), - syn::Type::Array(ref inner) => has_parametric_type_def(&inner.elem, ident, default), - syn::Type::Ptr(ref inner) => has_parametric_type_def(&inner.elem, ident, default), - syn::Type::Reference(ref inner) => has_parametric_type_def(&inner.elem, ident, default), - syn::Type::BareFn(ref inner) => inner.variadic.is_some(), - syn::Type::Never(..) => false, - syn::Type::Tuple(ref inner) => - inner.elems.iter().any(|t| has_parametric_type_def(t, ident, default)), - syn::Type::TraitObject(ref to) => { - to.bounds.iter().any(|bound| { - if let syn::TypeParamBound::Trait(ref t) = bound { - has_parametric_type_def_in_path(&t.path, ident, default) - } else { false } - }) - }, - syn::Type::ImplTrait(ref it) => { - it.bounds.iter().any(|bound| { - if let syn::TypeParamBound::Trait(ref t) = bound { - has_parametric_type_def_in_path(&t.path, ident, default) - } else { false } - }) - }, - syn::Type::Paren(ref inner) => has_parametric_type_def(&inner.elem, ident, default), - syn::Type::Group(ref inner) => has_parametric_type_def(&inner.elem, ident, default), - syn::Type::Infer(..) => default, - syn::Type::Macro(..) => default, - syn::Type::Verbatim(..) => default, - } + match *typ { + syn::Type::Path(ref path) => has_parametric_type_def_in_path(&path.path, ident, default), + syn::Type::Slice(ref inner) => has_parametric_type_def(&inner.elem, ident, default), + syn::Type::Array(ref inner) => has_parametric_type_def(&inner.elem, ident, default), + syn::Type::Ptr(ref inner) => has_parametric_type_def(&inner.elem, ident, default), + syn::Type::Reference(ref inner) => has_parametric_type_def(&inner.elem, ident, default), + syn::Type::BareFn(ref inner) => inner.variadic.is_some(), + syn::Type::Never(..) => false, + syn::Type::Tuple(ref inner) => inner + .elems + .iter() + .any(|t| has_parametric_type_def(t, ident, default)), + syn::Type::TraitObject(ref to) => to.bounds.iter().any(|bound| { + if let syn::TypeParamBound::Trait(ref t) = bound { + has_parametric_type_def_in_path(&t.path, ident, default) + } else { + false + } + }), + syn::Type::ImplTrait(ref it) => it.bounds.iter().any(|bound| { + if let syn::TypeParamBound::Trait(ref t) = bound { + has_parametric_type_def_in_path(&t.path, ident, default) + } else { + false + } + }), + syn::Type::Paren(ref inner) => has_parametric_type_def(&inner.elem, ident, default), + syn::Type::Group(ref inner) => has_parametric_type_def(&inner.elem, ident, default), + syn::Type::Infer(..) => default, + syn::Type::Macro(..) => default, + syn::Type::Verbatim(..) => default, + } } /// check if type has a type parameter, defaults to true for some cases. pub fn has_parametric_type(typ: &syn::Type, ident: &Ident) -> bool { - has_parametric_type_def(typ, ident, true) + has_parametric_type_def(typ, ident, true) } diff --git a/srml/support/src/dispatch.rs b/srml/support/src/dispatch.rs index 64ea5938c4..de34b94b8d 100644 --- a/srml/support/src/dispatch.rs +++ b/srml/support/src/dispatch.rs @@ -17,15 +17,15 @@ //! Dispatch system. Contains a macro for defining runtime modules and //! generating values representing lazy module function calls. -pub use crate::rstd::prelude::{Vec, Clone, Eq, PartialEq}; -#[cfg(feature = "std")] -pub use std::fmt; +pub use crate::codec::{Codec, Decode, Encode, EncodeAsRef, HasCompact, Input, Output}; +pub use crate::rstd::prelude::{Clone, Eq, PartialEq, Vec}; pub use crate::rstd::result; -pub use crate::codec::{Codec, Decode, Encode, Input, Output, HasCompact, EncodeAsRef}; pub use srml_metadata::{ - FunctionMetadata, DecodeDifferent, DecodeDifferentArray, - FunctionArgumentMetadata, OuterDispatchMetadata, OuterDispatchCall + DecodeDifferent, DecodeDifferentArray, FunctionArgumentMetadata, FunctionMetadata, + OuterDispatchCall, OuterDispatchMetadata, }; +#[cfg(feature = "std")] +pub use std::fmt; /// A type that can not be instantiated. pub enum Never {} @@ -37,18 +37,18 @@ pub type Result = result::Result<(), &'static str>; /// A lazy call (module function and argument values) that can be executed via its dispatch() /// method. pub trait Dispatchable { - /// Every function call to your runtime has an origin which specifies where the extrinsic was - /// generated from. In the case of a signed extrinsic (transaction), the origin contains an - /// identifier for the caller. The origin can be empty in the case of an inherent extrinsic. - type Origin; - type Trait; - fn dispatch(self, origin: Self::Origin) -> Result; + /// Every function call to your runtime has an origin which specifies where the extrinsic was + /// generated from. In the case of a signed extrinsic (transaction), the origin contains an + /// identifier for the caller. The origin can be empty in the case of an inherent extrinsic. + type Origin; + type Trait; + fn dispatch(self, origin: Self::Origin) -> Result; } /// Serializable version of Dispatchable. /// This value can be used as a "function" in an extrinsic. pub trait Callable { - type Call: Dispatchable + Codec + Clone + PartialEq + Eq; + type Call: Dispatchable + Codec + Clone + PartialEq + Eq; } // dirty hack to work around serde_derive issue @@ -952,7 +952,7 @@ macro_rules! __impl_encode { } pub trait IsSubType { - fn is_aux_sub_type(&self) -> Option<&::Call>; + fn is_aux_sub_type(&self) -> Option<&::Call>; } /// Implement a meta-dispatch module to dispatch to other dispatchers. @@ -1061,7 +1061,6 @@ macro_rules! __call_to_functions { }; } - /// Convert a list of functions into a list of `FunctionMetadata` items. #[macro_export] #[doc(hidden)] @@ -1142,115 +1141,109 @@ macro_rules! __function_to_metadata { // Do not complain about unused `dispatch` and `dispatch_aux`. #[allow(dead_code)] mod tests { - use super::*; - use crate::runtime_primitives::traits::{OnInitialize, OnFinalize}; - - pub trait Trait { - type Origin; - type BlockNumber: Into; - } - - pub mod system { - use super::Result; - - pub fn ensure_root(_: R) -> Result { - Ok(()) - } - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin { - /// Hi, this is a comment. - fn aux_0(_origin) -> Result { unreachable!() } - fn aux_1(_origin, #[compact] _data: u32) -> Result { unreachable!() } - fn aux_2(_origin, _data: i32, _data2: String) -> Result { unreachable!() } - fn aux_3() -> Result { unreachable!() } - fn aux_4(_data: i32) -> Result { unreachable!() } - - fn on_initialize(n: T::BlockNumber) { if n.into() == 42 { panic!("on_initialize") } } - fn on_finalize(n: T::BlockNumber) { if n.into() == 42 { panic!("on_finalize") } } - fn offchain_worker() {} - } - } - - const EXPECTED_METADATA: &'static [FunctionMetadata] = &[ - FunctionMetadata { - name: DecodeDifferent::Encode("aux_0"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[ - " Hi, this is a comment." - ]) - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_1"), - arguments: DecodeDifferent::Encode(&[ - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data"), - ty: DecodeDifferent::Encode("Compact") - } - ]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_2"), - arguments: DecodeDifferent::Encode(&[ - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data"), - ty: DecodeDifferent::Encode("i32"), - }, - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data2"), - ty: DecodeDifferent::Encode("String"), - } - ]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_3"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_4"), - arguments: DecodeDifferent::Encode(&[ - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data"), - ty: DecodeDifferent::Encode("i32"), - } - ]), - documentation: DecodeDifferent::Encode(&[]), - } - ]; - - struct TraitImpl {} - - impl Trait for TraitImpl { - type Origin = u32; - type BlockNumber = u32; - } - - #[test] - fn module_json_metadata() { - let metadata = Module::::call_functions(); - assert_eq!(EXPECTED_METADATA, metadata); - } - - #[test] - fn compact_attr() { - let call: Call = Call::aux_1(0); - let encoded = call.encode(); - assert_eq!(encoded.len(), 2); - } - - #[test] - #[should_panic(expected = "on_initialize")] - fn on_initialize_should_work() { - as OnInitialize>::on_initialize(42); - } - - #[test] - #[should_panic(expected = "on_finalize")] - fn on_finalize_should_work() { - as OnFinalize>::on_finalize(42); - } + use super::*; + use crate::runtime_primitives::traits::{OnFinalize, OnInitialize}; + + pub trait Trait { + type Origin; + type BlockNumber: Into; + } + + pub mod system { + use super::Result; + + pub fn ensure_root(_: R) -> Result { + Ok(()) + } + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin { + /// Hi, this is a comment. + fn aux_0(_origin) -> Result { unreachable!() } + fn aux_1(_origin, #[compact] _data: u32) -> Result { unreachable!() } + fn aux_2(_origin, _data: i32, _data2: String) -> Result { unreachable!() } + fn aux_3() -> Result { unreachable!() } + fn aux_4(_data: i32) -> Result { unreachable!() } + + fn on_initialize(n: T::BlockNumber) { if n.into() == 42 { panic!("on_initialize") } } + fn on_finalize(n: T::BlockNumber) { if n.into() == 42 { panic!("on_finalize") } } + fn offchain_worker() {} + } + } + + const EXPECTED_METADATA: &'static [FunctionMetadata] = &[ + FunctionMetadata { + name: DecodeDifferent::Encode("aux_0"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[" Hi, this is a comment."]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_1"), + arguments: DecodeDifferent::Encode(&[FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data"), + ty: DecodeDifferent::Encode("Compact"), + }]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_2"), + arguments: DecodeDifferent::Encode(&[ + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data"), + ty: DecodeDifferent::Encode("i32"), + }, + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data2"), + ty: DecodeDifferent::Encode("String"), + }, + ]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_3"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_4"), + arguments: DecodeDifferent::Encode(&[FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data"), + ty: DecodeDifferent::Encode("i32"), + }]), + documentation: DecodeDifferent::Encode(&[]), + }, + ]; + + struct TraitImpl {} + + impl Trait for TraitImpl { + type Origin = u32; + type BlockNumber = u32; + } + + #[test] + fn module_json_metadata() { + let metadata = Module::::call_functions(); + assert_eq!(EXPECTED_METADATA, metadata); + } + + #[test] + fn compact_attr() { + let call: Call = Call::aux_1(0); + let encoded = call.encode(); + assert_eq!(encoded.len(), 2); + } + + #[test] + #[should_panic(expected = "on_initialize")] + fn on_initialize_should_work() { + as OnInitialize>::on_initialize(42); + } + + #[test] + #[should_panic(expected = "on_finalize")] + fn on_finalize_should_work() { + as OnFinalize>::on_finalize(42); + } } diff --git a/srml/support/src/double_map.rs b/srml/support/src/double_map.rs index 80d974064d..e9cbc73bb5 100644 --- a/srml/support/src/double_map.rs +++ b/srml/support/src/double_map.rs @@ -16,8 +16,8 @@ //! An implementation of double map backed by storage. -use crate::rstd::prelude::*; use crate::codec::{Codec, Encode}; +use crate::rstd::prelude::*; use crate::storage::unhashed; use sr_std::borrow::Borrow; @@ -34,107 +34,107 @@ use sr_std::borrow::Borrow; /// /// Hasher are implemented in derive_key* methods. pub trait StorageDoubleMapWithHasher { - type Key1: Codec; - type Key2: Codec; - type Value: Codec + Default; - - const PREFIX: &'static [u8]; - - /// Insert an entry into this map. - fn insert(k1: &Q, k2: &R, val: Self::Value) - where - Self::Key1: Borrow, - Self::Key2: Borrow, - Q: Codec, - R: Codec - { - unhashed::put(&Self::full_key(k1, k2)[..], &val); - } - - /// Remove an entry from this map. - fn remove(k1: &Q, k2: &R) - where - Self::Key1: Borrow, - Self::Key2: Borrow, - Q: Codec, - R: Codec - { - unhashed::kill(&Self::full_key(k1, k2)[..]); - } - - /// Get an entry from this map. - /// - /// If there is entry stored under the given keys, returns `None`. - fn get(k1: &Q, k2: &R) -> Option - where - Self::Key1: Borrow, - Self::Key2: Borrow, - Q: Codec, - R: Codec - { - unhashed::get(&Self::full_key(k1, k2)[..]) - } - - /// Returns `true` if value under the specified keys exists. - fn exists(k1: &Q, k2: &R) -> bool - where - Self::Key1: Borrow, - Self::Key2: Borrow, - Q: Codec, - R: Codec - { - unhashed::exists(&Self::full_key(k1, k2)[..]) - } - - /// Removes all entries that shares the `k1` as the first key. - fn remove_prefix(k1: &Q) - where - Self::Key1: Borrow, - Q: Codec - { - unhashed::kill_prefix(&Self::derive_key1(Self::encode_key1(k1))) - } - - /// Encode key1 into Vec and prepend a prefix - fn encode_key1(key: &Q) -> Vec - where - Self::Key1: Borrow, - Q: Codec - { - let mut raw_prefix = Vec::new(); - raw_prefix.extend(Self::PREFIX); - key.encode_to(&mut raw_prefix); - raw_prefix - } - - /// Encode key2 into Vec - fn encode_key2(key: &R) -> Vec - where - Self::Key2: Borrow, - R: Codec - { - Encode::encode(&key) - } - - /// Derive the first part of the key - fn derive_key1(key1_data: Vec) -> Vec; - - /// Derive the remaining part of the key - fn derive_key2(key2_data: Vec) -> Vec; - - /// Returns a compound key that consist of the two parts: (prefix, `k1`) and `k2`. - /// The first part is hashed and then concatenated with a hash of `k2`. - fn full_key(k1: &Q, k2: &R) -> Vec - where - Self::Key1: Borrow, - Self::Key2: Borrow, - Q: Codec, - R: Codec - { - let key1_data = Self::encode_key1(k1); - let key2_data = Self::encode_key2(k2); - let mut key = Self::derive_key1(key1_data); - key.extend(Self::derive_key2(key2_data)); - key - } + type Key1: Codec; + type Key2: Codec; + type Value: Codec + Default; + + const PREFIX: &'static [u8]; + + /// Insert an entry into this map. + fn insert(k1: &Q, k2: &R, val: Self::Value) + where + Self::Key1: Borrow, + Self::Key2: Borrow, + Q: Codec, + R: Codec, + { + unhashed::put(&Self::full_key(k1, k2)[..], &val); + } + + /// Remove an entry from this map. + fn remove(k1: &Q, k2: &R) + where + Self::Key1: Borrow, + Self::Key2: Borrow, + Q: Codec, + R: Codec, + { + unhashed::kill(&Self::full_key(k1, k2)[..]); + } + + /// Get an entry from this map. + /// + /// If there is entry stored under the given keys, returns `None`. + fn get(k1: &Q, k2: &R) -> Option + where + Self::Key1: Borrow, + Self::Key2: Borrow, + Q: Codec, + R: Codec, + { + unhashed::get(&Self::full_key(k1, k2)[..]) + } + + /// Returns `true` if value under the specified keys exists. + fn exists(k1: &Q, k2: &R) -> bool + where + Self::Key1: Borrow, + Self::Key2: Borrow, + Q: Codec, + R: Codec, + { + unhashed::exists(&Self::full_key(k1, k2)[..]) + } + + /// Removes all entries that shares the `k1` as the first key. + fn remove_prefix(k1: &Q) + where + Self::Key1: Borrow, + Q: Codec, + { + unhashed::kill_prefix(&Self::derive_key1(Self::encode_key1(k1))) + } + + /// Encode key1 into Vec and prepend a prefix + fn encode_key1(key: &Q) -> Vec + where + Self::Key1: Borrow, + Q: Codec, + { + let mut raw_prefix = Vec::new(); + raw_prefix.extend(Self::PREFIX); + key.encode_to(&mut raw_prefix); + raw_prefix + } + + /// Encode key2 into Vec + fn encode_key2(key: &R) -> Vec + where + Self::Key2: Borrow, + R: Codec, + { + Encode::encode(&key) + } + + /// Derive the first part of the key + fn derive_key1(key1_data: Vec) -> Vec; + + /// Derive the remaining part of the key + fn derive_key2(key2_data: Vec) -> Vec; + + /// Returns a compound key that consist of the two parts: (prefix, `k1`) and `k2`. + /// The first part is hashed and then concatenated with a hash of `k2`. + fn full_key(k1: &Q, k2: &R) -> Vec + where + Self::Key1: Borrow, + Self::Key2: Borrow, + Q: Codec, + R: Codec, + { + let key1_data = Self::encode_key1(k1); + let key2_data = Self::encode_key2(k2); + let mut key = Self::derive_key1(key1_data); + key.extend(Self::derive_key2(key2_data)); + key + } } diff --git a/srml/support/src/event.rs b/srml/support/src/event.rs index 9ca2bcf532..8b1775e0fb 100644 --- a/srml/support/src/event.rs +++ b/srml/support/src/event.rs @@ -17,7 +17,7 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -pub use srml_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnEncode}; +pub use srml_metadata::{DecodeDifferent, EventMetadata, FnEncode, OuterEventMetadata}; /// Implement the `Event` for a module. /// @@ -507,56 +507,56 @@ macro_rules! __impl_outer_event_json_metadata { #[cfg(test)] #[allow(dead_code)] mod tests { - use super::*; - use serde_derive::Serialize; - use parity_codec::{Encode, Decode}; - - mod system { - pub trait Trait { - type Origin; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - decl_event!( - pub enum Event { - SystemEvent, - } - ); - } - - mod system_renamed { - pub trait Trait { - type Origin; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - decl_event!( - pub enum Event { - SystemEvent, - } - ); - } - - mod event_module { - pub trait Trait { - type Origin; - type Balance; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - decl_event!( + use super::*; + use parity_codec::{Decode, Encode}; + use serde_derive::Serialize; + + mod system { + pub trait Trait { + type Origin; + type BlockNumber; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + decl_event!( + pub enum Event { + SystemEvent, + } + ); + } + + mod system_renamed { + pub trait Trait { + type Origin; + type BlockNumber; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + decl_event!( + pub enum Event { + SystemEvent, + } + ); + } + + mod event_module { + pub trait Trait { + type Origin; + type Balance; + type BlockNumber; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + decl_event!( /// Event without renaming the generic parameter `Balance` and `Origin`. pub enum Event where ::Balance, ::Origin { @@ -566,51 +566,52 @@ mod tests { EventWithoutParams, } ); - } - - mod event_module2 { - pub trait Trait { - type Origin; - type Balance; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - decl_event!( - /// Event with renamed generic parameter - pub enum Event where - BalanceRenamed = ::Balance, - OriginRenamed = ::Origin - { - TestEvent(BalanceRenamed), - TestOrigin(OriginRenamed), - } - ); - } - - mod event_module3 { - decl_event!( - pub enum Event { - HiEvent, - } - ); - } - - mod event_module4 { - pub trait Trait { - type Origin; - type Balance; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - decl_event!( + } + + mod event_module2 { + pub trait Trait { + type Origin; + type Balance; + type BlockNumber; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + decl_event!( + /// Event with renamed generic parameter + pub enum Event + where + BalanceRenamed = ::Balance, + OriginRenamed = ::Origin, + { + TestEvent(BalanceRenamed), + TestOrigin(OriginRenamed), + } + ); + } + + mod event_module3 { + decl_event!( + pub enum Event { + HiEvent, + } + ); + } + + mod event_module4 { + pub trait Trait { + type Origin; + type Balance; + type BlockNumber; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + decl_event!( /// Event finish formatting on an unnamed one with trailling comma pub enum Event where ::Balance, @@ -619,144 +620,149 @@ mod tests { TestEvent(Balance, Origin), } ); - } - - mod event_module5 { - pub trait Trait { - type Origin; - type Balance; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - decl_event!( - /// Event finish formatting on an named one with trailling comma - pub enum Event where - BalanceRenamed = ::Balance, - OriginRenamed = ::Origin, - { - TestEvent(BalanceRenamed, OriginRenamed), - } - ); - } - - #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, Serialize)] - pub struct TestRuntime; - - impl_outer_event! { - pub enum TestEvent for TestRuntime { - event_module, - event_module2, - event_module3, - } - } - - #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, Serialize)] - pub struct TestRuntime2; - - impl_outer_event! { - pub enum TestEventSystemRenamed for TestRuntime2 where system = system_renamed { - event_module, - event_module2, - event_module3, - } - } - - impl event_module::Trait for TestRuntime { - type Origin = u32; - type Balance = u32; - type BlockNumber = u32; - } - - impl event_module2::Trait for TestRuntime { - type Origin = u32; - type Balance = u32; - type BlockNumber = u32; - } - - impl system::Trait for TestRuntime { - type Origin = u32; - type BlockNumber = u32; - } - - impl event_module::Trait for TestRuntime2 { - type Origin = u32; - type Balance = u32; - type BlockNumber = u32; - } - - impl event_module2::Trait for TestRuntime2 { - type Origin = u32; - type Balance = u32; - type BlockNumber = u32; - } - - impl system_renamed::Trait for TestRuntime2 { - type Origin = u32; - type BlockNumber = u32; - } - - const EXPECTED_METADATA: OuterEventMetadata = OuterEventMetadata { - name: DecodeDifferent::Encode("TestEvent"), - events: DecodeDifferent::Encode(&[ - ( - "system", - FnEncode(|| &[ - EventMetadata { - name: DecodeDifferent::Encode("SystemEvent"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - } - ]) - ), - ( - "event_module", - FnEncode(|| &[ - EventMetadata { - name: DecodeDifferent::Encode("TestEvent"), - arguments: DecodeDifferent::Encode(&[ "Balance", "Origin" ]), - documentation: DecodeDifferent::Encode(&[ " Hi, I am a comment." ]) - }, - EventMetadata { - name: DecodeDifferent::Encode("EventWithoutParams"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[ " Dog" ]), - }, - ]) - ), - ( - "event_module2", - FnEncode(|| &[ - EventMetadata { - name: DecodeDifferent::Encode("TestEvent"), - arguments: DecodeDifferent::Encode(&[ "BalanceRenamed" ]), - documentation: DecodeDifferent::Encode(&[]) - }, - EventMetadata { - name: DecodeDifferent::Encode("TestOrigin"), - arguments: DecodeDifferent::Encode(&[ "OriginRenamed" ]), - documentation: DecodeDifferent::Encode(&[]), - }, - ]) - ), - ( - "event_module3", - FnEncode(|| &[ - EventMetadata { - name: DecodeDifferent::Encode("HiEvent"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]) - } - ]) - ) - ]) - }; - - #[test] - fn outer_event_metadata() { - assert_eq!(EXPECTED_METADATA, TestRuntime::outer_event_metadata()); - } + } + + mod event_module5 { + pub trait Trait { + type Origin; + type Balance; + type BlockNumber; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + decl_event!( + /// Event finish formatting on an named one with trailling comma + pub enum Event + where + BalanceRenamed = ::Balance, + OriginRenamed = ::Origin, + { + TestEvent(BalanceRenamed, OriginRenamed), + } + ); + } + + #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, Serialize)] + pub struct TestRuntime; + + impl_outer_event! { + pub enum TestEvent for TestRuntime { + event_module, + event_module2, + event_module3, + } + } + + #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, Serialize)] + pub struct TestRuntime2; + + impl_outer_event! { + pub enum TestEventSystemRenamed for TestRuntime2 where system = system_renamed { + event_module, + event_module2, + event_module3, + } + } + + impl event_module::Trait for TestRuntime { + type Origin = u32; + type Balance = u32; + type BlockNumber = u32; + } + + impl event_module2::Trait for TestRuntime { + type Origin = u32; + type Balance = u32; + type BlockNumber = u32; + } + + impl system::Trait for TestRuntime { + type Origin = u32; + type BlockNumber = u32; + } + + impl event_module::Trait for TestRuntime2 { + type Origin = u32; + type Balance = u32; + type BlockNumber = u32; + } + + impl event_module2::Trait for TestRuntime2 { + type Origin = u32; + type Balance = u32; + type BlockNumber = u32; + } + + impl system_renamed::Trait for TestRuntime2 { + type Origin = u32; + type BlockNumber = u32; + } + + const EXPECTED_METADATA: OuterEventMetadata = OuterEventMetadata { + name: DecodeDifferent::Encode("TestEvent"), + events: DecodeDifferent::Encode(&[ + ( + "system", + FnEncode(|| { + &[EventMetadata { + name: DecodeDifferent::Encode("SystemEvent"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }] + }), + ), + ( + "event_module", + FnEncode(|| { + &[ + EventMetadata { + name: DecodeDifferent::Encode("TestEvent"), + arguments: DecodeDifferent::Encode(&["Balance", "Origin"]), + documentation: DecodeDifferent::Encode(&[" Hi, I am a comment."]), + }, + EventMetadata { + name: DecodeDifferent::Encode("EventWithoutParams"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[" Dog"]), + }, + ] + }), + ), + ( + "event_module2", + FnEncode(|| { + &[ + EventMetadata { + name: DecodeDifferent::Encode("TestEvent"), + arguments: DecodeDifferent::Encode(&["BalanceRenamed"]), + documentation: DecodeDifferent::Encode(&[]), + }, + EventMetadata { + name: DecodeDifferent::Encode("TestOrigin"), + arguments: DecodeDifferent::Encode(&["OriginRenamed"]), + documentation: DecodeDifferent::Encode(&[]), + }, + ] + }), + ), + ( + "event_module3", + FnEncode(|| { + &[EventMetadata { + name: DecodeDifferent::Encode("HiEvent"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }] + }), + ), + ]), + }; + + #[test] + fn outer_event_metadata() { + assert_eq!(EXPECTED_METADATA, TestRuntime::outer_event_metadata()); + } } diff --git a/srml/support/src/hashable.rs b/srml/support/src/hashable.rs index 886c88b23a..842264de82 100644 --- a/srml/support/src/hashable.rs +++ b/srml/support/src/hashable.rs @@ -20,19 +20,19 @@ use crate::codec::Codec; use runtime_io::{blake2_256, twox_128, twox_256}; pub trait Hashable: Sized { - fn blake2_256(&self) -> [u8; 32]; - fn twox_128(&self) -> [u8; 16]; - fn twox_256(&self) -> [u8; 32]; + fn blake2_256(&self) -> [u8; 32]; + fn twox_128(&self) -> [u8; 16]; + fn twox_256(&self) -> [u8; 32]; } impl Hashable for T { - fn blake2_256(&self) -> [u8; 32] { - self.using_encoded(blake2_256) - } - fn twox_128(&self) -> [u8; 16] { - self.using_encoded(twox_128) - } - fn twox_256(&self) -> [u8; 32] { - self.using_encoded(twox_256) - } + fn blake2_256(&self) -> [u8; 32] { + self.using_encoded(blake2_256) + } + fn twox_128(&self) -> [u8; 16] { + self.using_encoded(twox_128) + } + fn twox_256(&self) -> [u8; 32] { + self.using_encoded(twox_256) + } } diff --git a/srml/support/src/inherent.rs b/srml/support/src/inherent.rs index 8a4fb669d1..8b8cc6d225 100644 --- a/srml/support/src/inherent.rs +++ b/srml/support/src/inherent.rs @@ -19,8 +19,7 @@ pub use crate::rstd::vec::Vec; #[doc(hidden)] pub use crate::runtime_primitives::traits::{Block as BlockT, Extrinsic}; #[doc(hidden)] -pub use inherents::{InherentData, ProvideInherent, CheckInherentsResult, IsFatalError}; - +pub use inherents::{CheckInherentsResult, InherentData, IsFatalError, ProvideInherent}; /// Implement the outer inherent. /// All given modules need to implement `ProvideInherent`. diff --git a/srml/support/src/lib.rs b/srml/support/src/lib.rs index d99db6ddb8..0e968b8bdc 100644 --- a/srml/support/src/lib.rs +++ b/srml/support/src/lib.rs @@ -23,17 +23,17 @@ extern crate bitmask; #[cfg(feature = "std")] -pub use serde; #[doc(hidden)] -pub use sr_std as rstd; +pub use once_cell; #[doc(hidden)] pub use parity_codec as codec; -#[cfg(feature = "std")] -#[doc(hidden)] -pub use once_cell; #[doc(hidden)] pub use paste; +#[cfg(feature = "std")] +pub use serde; pub use sr_primitives as runtime_primitives; +#[doc(hidden)] +pub use sr_std as rstd; pub use self::storage::generator::Storage as GenericStorage; pub use self::storage::unhashed::generator::UnhashedStorage as GenericUnhashedStorage; @@ -56,10 +56,12 @@ pub mod inherent; mod double_map; pub mod traits; -pub use self::storage::{StorageVec, StorageList, StorageValue, StorageMap, EnumerableStorageMap, StorageDoubleMap}; -pub use self::hashable::Hashable; -pub use self::dispatch::{Parameter, Dispatchable, Callable, IsSubType}; +pub use self::dispatch::{Callable, Dispatchable, IsSubType, Parameter}; pub use self::double_map::StorageDoubleMapWithHasher; +pub use self::hashable::Hashable; +pub use self::storage::{ + EnumerableStorageMap, StorageDoubleMap, StorageList, StorageMap, StorageValue, StorageVec, +}; pub use runtime_io::print; #[doc(inline)] @@ -67,47 +69,47 @@ pub use srml_support_procedural::decl_storage; #[macro_export] macro_rules! fail { - ( $y:expr ) => {{ - return Err($y); - }} + ( $y:expr ) => {{ + return Err($y); + }}; } #[macro_export] macro_rules! ensure { - ( $x:expr, $y:expr ) => {{ - if !$x { - $crate::fail!($y); - } - }} + ( $x:expr, $y:expr ) => {{ + if !$x { + $crate::fail!($y); + } + }}; } #[macro_export] #[cfg(feature = "std")] macro_rules! assert_noop { - ( $x:expr , $y:expr ) => { - let h = runtime_io::storage_root(); - $crate::assert_err!($x, $y); - assert_eq!(h, runtime_io::storage_root()); - } + ( $x:expr , $y:expr ) => { + let h = runtime_io::storage_root(); + $crate::assert_err!($x, $y); + assert_eq!(h, runtime_io::storage_root()); + }; } #[macro_export] #[cfg(feature = "std")] macro_rules! assert_err { - ( $x:expr , $y:expr ) => { - assert_eq!($x, Err($y)); - } + ( $x:expr , $y:expr ) => { + assert_eq!($x, Err($y)); + }; } #[macro_export] #[cfg(feature = "std")] macro_rules! assert_ok { - ( $x:expr ) => { - assert_eq!($x, Ok(())); - }; - ( $x:expr, $y:expr ) => { - assert_eq!($x, Ok($y)); - } + ( $x:expr ) => { + assert_eq!($x, Ok(())); + }; + ( $x:expr, $y:expr ) => { + assert_eq!($x, Ok($y)); + }; } /// Panic when the vectors are different, without taking the order into account. @@ -134,21 +136,23 @@ macro_rules! assert_ok { #[macro_export] #[cfg(feature = "std")] macro_rules! assert_eq_uvec { - ( $x:expr, $y:expr ) => { - $crate::__assert_eq_uvec!($x, $y); - $crate::__assert_eq_uvec!($y, $x); - } + ( $x:expr, $y:expr ) => { + $crate::__assert_eq_uvec!($x, $y); + $crate::__assert_eq_uvec!($y, $x); + }; } #[macro_export] #[doc(hidden)] #[cfg(feature = "std")] macro_rules! __assert_eq_uvec { - ( $x:expr, $y:expr ) => { - $x.iter().for_each(|e| { - if !$y.contains(e) { panic!(format!("vectors not equal: {:?} != {:?}", $x, $y)); } - }); - } + ( $x:expr, $y:expr ) => { + $x.iter().for_each(|e| { + if !$y.contains(e) { + panic!(format!("vectors not equal: {:?} != {:?}", $x, $y)); + } + }); + }; } /// The void type - it cannot exist. @@ -178,259 +182,283 @@ macro_rules! for_each_tuple { #[cfg(test)] mod tests { - use super::*; - use parity_codec::Codec; - use runtime_io::{with_externalities, Blake2Hasher}; - use runtime_primitives::BuildStorage; - pub use srml_metadata::{ - DecodeDifferent, StorageMetadata, StorageFunctionMetadata, - StorageFunctionType, StorageFunctionModifier, - DefaultByte, DefaultByteGetter, - }; - pub use rstd::marker::PhantomData; - - pub trait Trait { - type BlockNumber: Codec + Default; - type Origin; - } - - mod module { - #![allow(dead_code)] - - use super::Trait; - - decl_module! { - pub struct Module for enum Call where origin: T::Origin { - - } - } - } - use self::module::Module; - - decl_storage! { - trait Store for Module as Example { - pub Data get(data) build(|_| vec![(15u32, 42u64)]): linked_map u32 => u64; - pub GenericData get(generic_data): linked_map T::BlockNumber => T::BlockNumber; - pub GenericData2 get(generic_data2): linked_map T::BlockNumber => Option; - - pub DataDM config(test_config) build(|_| vec![(15u32, 16u32, 42u64)]): double_map u32, blake2_256(u32) => u64; - pub GenericDataDM: double_map T::BlockNumber, twox_128(T::BlockNumber) => T::BlockNumber; - pub GenericData2DM: double_map T::BlockNumber, twox_256(T::BlockNumber) => Option; - } - } - - struct Test; - impl Trait for Test { - type BlockNumber = u32; - type Origin = u32; - } - - fn new_test_ext() -> runtime_io::TestExternalities { - GenesisConfig::::default().build_storage().unwrap().0.into() - } - - type Map = Data; - - #[test] - fn linked_map_basic_insert_remove_should_work() { - with_externalities(&mut new_test_ext(), || { - // initialized during genesis - assert_eq!(Map::get(&15u32), 42u64); - - // get / insert / take - let key = 17u32; - assert_eq!(Map::get(&key), 0u64); - Map::insert(key, 4u64); - assert_eq!(Map::get(&key), 4u64); - assert_eq!(Map::take(&key), 4u64); - assert_eq!(Map::get(&key), 0u64); - - // mutate - Map::mutate(&key, |val| { - *val = 15; - }); - assert_eq!(Map::get(&key), 15u64); - - // remove - Map::remove(&key); - assert_eq!(Map::get(&key), 0u64); - }); - } - - #[test] - fn linked_map_enumeration_and_head_should_work() { - with_externalities(&mut new_test_ext(), || { - assert_eq!(Map::head(), Some(15)); - assert_eq!(Map::enumerate().collect::>(), vec![(15, 42)]); - // insert / remove - let key = 17u32; - Map::insert(key, 4u64); - assert_eq!(Map::head(), Some(key)); - assert_eq!(Map::enumerate().collect::>(), vec![(key, 4), (15, 42)]); - assert_eq!(Map::take(&15), 42u64); - assert_eq!(Map::take(&key), 4u64); - assert_eq!(Map::head(), None); - assert_eq!(Map::enumerate().collect::>(), vec![]); - - // Add couple of more elements - Map::insert(key, 42u64); - assert_eq!(Map::head(), Some(key)); - assert_eq!(Map::enumerate().collect::>(), vec![(key, 42)]); - Map::insert(key + 1, 43u64); - assert_eq!(Map::head(), Some(key + 1)); - assert_eq!(Map::enumerate().collect::>(), vec![(key + 1, 43), (key, 42)]); - - // mutate - let key = key + 2; - Map::mutate(&key, |val| { - *val = 15; - }); - assert_eq!(Map::enumerate().collect::>(), vec![(key, 15), (key - 1, 43), (key - 2, 42)]); - assert_eq!(Map::head(), Some(key)); - Map::mutate(&key, |val| { - *val = 17; - }); - assert_eq!(Map::enumerate().collect::>(), vec![(key, 17), (key - 1, 43), (key - 2, 42)]); - - // remove first - Map::remove(&key); - assert_eq!(Map::head(), Some(key - 1)); - assert_eq!(Map::enumerate().collect::>(), vec![(key - 1, 43), (key - 2, 42)]); - - // remove last from the list - Map::remove(&(key - 2)); - assert_eq!(Map::head(), Some(key - 1)); - assert_eq!(Map::enumerate().collect::>(), vec![(key - 1, 43)]); - - // remove the last element - Map::remove(&(key - 1)); - assert_eq!(Map::head(), None); - assert_eq!(Map::enumerate().collect::>(), vec![]); - }); - } - - #[test] - fn double_map_basic_insert_remove_remove_prefix_should_work() { - with_externalities(&mut new_test_ext(), || { - type DoubleMap = DataDM; - // initialized during genesis - assert_eq!(DoubleMap::get(&15u32, &16u32), 42u64); - - // get / insert / take - let key1 = 17u32; - let key2 = 18u32; - assert_eq!(DoubleMap::get(key1, key2), 0u64); - DoubleMap::insert(key1, key2, 4u64); - assert_eq!(DoubleMap::get(key1, key2), 4u64); - assert_eq!(DoubleMap::take(key1, key2), 4u64); - assert_eq!(DoubleMap::get(key1, key2), 0u64); - - // mutate - DoubleMap::mutate(key1, key2, |val| { - *val = 15; - }); - assert_eq!(DoubleMap::get(key1, key2), 15u64); - - // remove - DoubleMap::remove(key1, key2); - assert_eq!(DoubleMap::get(key1, key2), 0u64); - - // remove prefix - DoubleMap::insert(key1, key2, 4u64); - DoubleMap::insert(key1, key2+1, 4u64); - DoubleMap::insert(key1+1, key2, 4u64); - DoubleMap::insert(key1+1, key2+1, 4u64); - DoubleMap::remove_prefix(key1); - assert_eq!(DoubleMap::get(key1, key2), 0u64); - assert_eq!(DoubleMap::get(key1, key2+1), 0u64); - assert_eq!(DoubleMap::get(key1+1, key2), 4u64); - assert_eq!(DoubleMap::get(key1+1, key2+1), 4u64); - }); - } - - const EXPECTED_METADATA: StorageMetadata = StorageMetadata { - functions: DecodeDifferent::Encode(&[ - StorageFunctionMetadata { - name: DecodeDifferent::Encode("Data"), - modifier: StorageFunctionModifier::Default, - ty: StorageFunctionType::Map{ - key: DecodeDifferent::Encode("u32"), value: DecodeDifferent::Encode("u64"), is_linked: true - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructData(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("GenericData"), - modifier: StorageFunctionModifier::Default, - ty: StorageFunctionType::Map{ - key: DecodeDifferent::Encode("T::BlockNumber"), value: DecodeDifferent::Encode("T::BlockNumber"), is_linked: true - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGenericData(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("GenericData2"), - modifier: StorageFunctionModifier::Optional, - ty: StorageFunctionType::Map{ - key: DecodeDifferent::Encode("T::BlockNumber"), value: DecodeDifferent::Encode("T::BlockNumber"), is_linked: true - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGenericData2(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("DataDM"), - modifier: StorageFunctionModifier::Default, - ty: StorageFunctionType::DoubleMap{ - key1: DecodeDifferent::Encode("u32"), - key2: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("u64"), - key2_hasher: DecodeDifferent::Encode("blake2_256"), - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructDataDM(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("GenericDataDM"), - modifier: StorageFunctionModifier::Default, - ty: StorageFunctionType::DoubleMap{ - key1: DecodeDifferent::Encode("T::BlockNumber"), - key2: DecodeDifferent::Encode("T::BlockNumber"), - value: DecodeDifferent::Encode("T::BlockNumber"), - key2_hasher: DecodeDifferent::Encode("twox_128"), - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGenericDataDM(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("GenericData2DM"), - modifier: StorageFunctionModifier::Optional, - ty: StorageFunctionType::DoubleMap{ - key1: DecodeDifferent::Encode("T::BlockNumber"), - key2: DecodeDifferent::Encode("T::BlockNumber"), - value: DecodeDifferent::Encode("T::BlockNumber"), - key2_hasher: DecodeDifferent::Encode("twox_256"), - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGenericData2DM(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - ]) - }; - - #[test] - fn store_metadata() { - let metadata = Module::::store_metadata(); - assert_eq!(EXPECTED_METADATA, metadata); - } + use super::*; + use parity_codec::Codec; + pub use rstd::marker::PhantomData; + use runtime_io::{with_externalities, Blake2Hasher}; + use runtime_primitives::BuildStorage; + pub use srml_metadata::{ + DecodeDifferent, DefaultByte, DefaultByteGetter, StorageFunctionMetadata, + StorageFunctionModifier, StorageFunctionType, StorageMetadata, + }; + + pub trait Trait { + type BlockNumber: Codec + Default; + type Origin; + } + + mod module { + #![allow(dead_code)] + + use super::Trait; + + decl_module! { + pub struct Module for enum Call where origin: T::Origin { + + } + } + } + use self::module::Module; + + decl_storage! { + trait Store for Module as Example { + pub Data get(data) build(|_| vec![(15u32, 42u64)]): linked_map u32 => u64; + pub GenericData get(generic_data): linked_map T::BlockNumber => T::BlockNumber; + pub GenericData2 get(generic_data2): linked_map T::BlockNumber => Option; + + pub DataDM config(test_config) build(|_| vec![(15u32, 16u32, 42u64)]): double_map u32, blake2_256(u32) => u64; + pub GenericDataDM: double_map T::BlockNumber, twox_128(T::BlockNumber) => T::BlockNumber; + pub GenericData2DM: double_map T::BlockNumber, twox_256(T::BlockNumber) => Option; + } + } + + struct Test; + impl Trait for Test { + type BlockNumber = u32; + type Origin = u32; + } + + fn new_test_ext() -> runtime_io::TestExternalities { + GenesisConfig::::default() + .build_storage() + .unwrap() + .0 + .into() + } + + type Map = Data; + + #[test] + fn linked_map_basic_insert_remove_should_work() { + with_externalities(&mut new_test_ext(), || { + // initialized during genesis + assert_eq!(Map::get(&15u32), 42u64); + + // get / insert / take + let key = 17u32; + assert_eq!(Map::get(&key), 0u64); + Map::insert(key, 4u64); + assert_eq!(Map::get(&key), 4u64); + assert_eq!(Map::take(&key), 4u64); + assert_eq!(Map::get(&key), 0u64); + + // mutate + Map::mutate(&key, |val| { + *val = 15; + }); + assert_eq!(Map::get(&key), 15u64); + + // remove + Map::remove(&key); + assert_eq!(Map::get(&key), 0u64); + }); + } + + #[test] + fn linked_map_enumeration_and_head_should_work() { + with_externalities(&mut new_test_ext(), || { + assert_eq!(Map::head(), Some(15)); + assert_eq!(Map::enumerate().collect::>(), vec![(15, 42)]); + // insert / remove + let key = 17u32; + Map::insert(key, 4u64); + assert_eq!(Map::head(), Some(key)); + assert_eq!( + Map::enumerate().collect::>(), + vec![(key, 4), (15, 42)] + ); + assert_eq!(Map::take(&15), 42u64); + assert_eq!(Map::take(&key), 4u64); + assert_eq!(Map::head(), None); + assert_eq!(Map::enumerate().collect::>(), vec![]); + + // Add couple of more elements + Map::insert(key, 42u64); + assert_eq!(Map::head(), Some(key)); + assert_eq!(Map::enumerate().collect::>(), vec![(key, 42)]); + Map::insert(key + 1, 43u64); + assert_eq!(Map::head(), Some(key + 1)); + assert_eq!( + Map::enumerate().collect::>(), + vec![(key + 1, 43), (key, 42)] + ); + + // mutate + let key = key + 2; + Map::mutate(&key, |val| { + *val = 15; + }); + assert_eq!( + Map::enumerate().collect::>(), + vec![(key, 15), (key - 1, 43), (key - 2, 42)] + ); + assert_eq!(Map::head(), Some(key)); + Map::mutate(&key, |val| { + *val = 17; + }); + assert_eq!( + Map::enumerate().collect::>(), + vec![(key, 17), (key - 1, 43), (key - 2, 42)] + ); + + // remove first + Map::remove(&key); + assert_eq!(Map::head(), Some(key - 1)); + assert_eq!( + Map::enumerate().collect::>(), + vec![(key - 1, 43), (key - 2, 42)] + ); + + // remove last from the list + Map::remove(&(key - 2)); + assert_eq!(Map::head(), Some(key - 1)); + assert_eq!(Map::enumerate().collect::>(), vec![(key - 1, 43)]); + + // remove the last element + Map::remove(&(key - 1)); + assert_eq!(Map::head(), None); + assert_eq!(Map::enumerate().collect::>(), vec![]); + }); + } + + #[test] + fn double_map_basic_insert_remove_remove_prefix_should_work() { + with_externalities(&mut new_test_ext(), || { + type DoubleMap = DataDM; + // initialized during genesis + assert_eq!(DoubleMap::get(&15u32, &16u32), 42u64); + + // get / insert / take + let key1 = 17u32; + let key2 = 18u32; + assert_eq!(DoubleMap::get(key1, key2), 0u64); + DoubleMap::insert(key1, key2, 4u64); + assert_eq!(DoubleMap::get(key1, key2), 4u64); + assert_eq!(DoubleMap::take(key1, key2), 4u64); + assert_eq!(DoubleMap::get(key1, key2), 0u64); + + // mutate + DoubleMap::mutate(key1, key2, |val| { + *val = 15; + }); + assert_eq!(DoubleMap::get(key1, key2), 15u64); + + // remove + DoubleMap::remove(key1, key2); + assert_eq!(DoubleMap::get(key1, key2), 0u64); + + // remove prefix + DoubleMap::insert(key1, key2, 4u64); + DoubleMap::insert(key1, key2 + 1, 4u64); + DoubleMap::insert(key1 + 1, key2, 4u64); + DoubleMap::insert(key1 + 1, key2 + 1, 4u64); + DoubleMap::remove_prefix(key1); + assert_eq!(DoubleMap::get(key1, key2), 0u64); + assert_eq!(DoubleMap::get(key1, key2 + 1), 0u64); + assert_eq!(DoubleMap::get(key1 + 1, key2), 4u64); + assert_eq!(DoubleMap::get(key1 + 1, key2 + 1), 4u64); + }); + } + + const EXPECTED_METADATA: StorageMetadata = StorageMetadata { + functions: DecodeDifferent::Encode(&[ + StorageFunctionMetadata { + name: DecodeDifferent::Encode("Data"), + modifier: StorageFunctionModifier::Default, + ty: StorageFunctionType::Map { + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("u64"), + is_linked: true, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructData( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("GenericData"), + modifier: StorageFunctionModifier::Default, + ty: StorageFunctionType::Map { + key: DecodeDifferent::Encode("T::BlockNumber"), + value: DecodeDifferent::Encode("T::BlockNumber"), + is_linked: true, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructGenericData( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("GenericData2"), + modifier: StorageFunctionModifier::Optional, + ty: StorageFunctionType::Map { + key: DecodeDifferent::Encode("T::BlockNumber"), + value: DecodeDifferent::Encode("T::BlockNumber"), + is_linked: true, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructGenericData2( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("DataDM"), + modifier: StorageFunctionModifier::Default, + ty: StorageFunctionType::DoubleMap { + key1: DecodeDifferent::Encode("u32"), + key2: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("u64"), + key2_hasher: DecodeDifferent::Encode("blake2_256"), + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructDataDM( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("GenericDataDM"), + modifier: StorageFunctionModifier::Default, + ty: StorageFunctionType::DoubleMap { + key1: DecodeDifferent::Encode("T::BlockNumber"), + key2: DecodeDifferent::Encode("T::BlockNumber"), + value: DecodeDifferent::Encode("T::BlockNumber"), + key2_hasher: DecodeDifferent::Encode("twox_128"), + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructGenericDataDM( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("GenericData2DM"), + modifier: StorageFunctionModifier::Optional, + ty: StorageFunctionType::DoubleMap { + key1: DecodeDifferent::Encode("T::BlockNumber"), + key2: DecodeDifferent::Encode("T::BlockNumber"), + value: DecodeDifferent::Encode("T::BlockNumber"), + key2_hasher: DecodeDifferent::Encode("twox_256"), + }, + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGenericData2DM(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + ]), + }; + + #[test] + fn store_metadata() { + let metadata = Module::::store_metadata(); + assert_eq!(EXPECTED_METADATA, metadata); + } } diff --git a/srml/support/src/metadata.rs b/srml/support/src/metadata.rs index f7594d27b7..18b51180b3 100644 --- a/srml/support/src/metadata.rs +++ b/srml/support/src/metadata.rs @@ -15,9 +15,8 @@ // along with Substrate. If not, see . pub use srml_metadata::{ - DecodeDifferent, FnEncode, RuntimeMetadata, - ModuleMetadata, RuntimeMetadataV3, - DefaultByteGetter, RuntimeMetadataPrefixed, + DecodeDifferent, DefaultByteGetter, FnEncode, ModuleMetadata, RuntimeMetadata, + RuntimeMetadataPrefixed, RuntimeMetadataV3, }; /// Implements the metadata support for the given runtime and all its modules. @@ -119,7 +118,6 @@ macro_rules! __runtime_modules_to_metadata_calls_call { }; } - #[macro_export] #[doc(hidden)] macro_rules! __runtime_modules_to_metadata_calls_event { @@ -226,66 +224,63 @@ macro_rules! __runtime_modules_to_metadata_calls_storage { }; } - #[cfg(test)] // Do not complain about unused `dispatch` and `dispatch_aux`. #[allow(dead_code)] mod tests { - use super::*; - use srml_metadata::{ - EventMetadata, - StorageFunctionModifier, StorageFunctionType, FunctionMetadata, - StorageFunctionMetadata, - ModuleMetadata, RuntimeMetadataPrefixed - }; - use crate::codec::{Encode, Decode}; - - mod system { - pub trait Trait { - type Origin: Into>> + From>; - type AccountId; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - decl_event!( - pub enum Event { - SystemEvent, - } - ); - - #[derive(Clone, PartialEq, Eq, Debug)] - pub enum RawOrigin { - Root, - Signed(AccountId), - Inherent, - } - - impl From> for RawOrigin { - fn from(s: Option) -> RawOrigin { - match s { - Some(who) => RawOrigin::Signed(who), - None => RawOrigin::Inherent, - } - } - } - - pub type Origin = RawOrigin<::AccountId>; - } - - mod event_module { - use crate::dispatch::Result; - - pub trait Trait { - type Origin; - type Balance; - type BlockNumber; - } - - decl_event!( + use super::*; + use crate::codec::{Decode, Encode}; + use srml_metadata::{ + EventMetadata, FunctionMetadata, ModuleMetadata, RuntimeMetadataPrefixed, + StorageFunctionMetadata, StorageFunctionModifier, StorageFunctionType, + }; + + mod system { + pub trait Trait { + type Origin: Into>> + From>; + type AccountId; + type BlockNumber; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + decl_event!( + pub enum Event { + SystemEvent, + } + ); + + #[derive(Clone, PartialEq, Eq, Debug)] + pub enum RawOrigin { + Root, + Signed(AccountId), + Inherent, + } + + impl From> for RawOrigin { + fn from(s: Option) -> RawOrigin { + match s { + Some(who) => RawOrigin::Signed(who), + None => RawOrigin::Inherent, + } + } + } + + pub type Origin = RawOrigin<::AccountId>; + } + + mod event_module { + use crate::dispatch::Result; + + pub trait Trait { + type Origin; + type Balance; + type BlockNumber; + } + + decl_event!( pub enum Event where ::Balance { /// Hi, I am a comment. @@ -293,168 +288,158 @@ mod tests { } ); - decl_module! { - pub struct Module for enum Call where origin: T::Origin { - fn aux_0(_origin) -> Result { unreachable!() } - } - } - } - - mod event_module2 { - pub trait Trait { - type Origin; - type Balance; - type BlockNumber; - } - - decl_event!( + decl_module! { + pub struct Module for enum Call where origin: T::Origin { + fn aux_0(_origin) -> Result { unreachable!() } + } + } + } + + mod event_module2 { + pub trait Trait { + type Origin; + type Balance; + type BlockNumber; + } + + decl_event!( pub enum Event where ::Balance { TestEvent(Balance), } ); - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - crate::decl_storage! { - trait Store for Module as TestStorage { - StorageMethod : Option; - } - add_extra_genesis { - build(|_, _, _| {}); - } - } - } - - type EventModule = event_module::Module; - type EventModule2 = event_module2::Module; - - #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] - pub struct TestRuntime; - - impl_outer_event! { - pub enum TestEvent for TestRuntime { - event_module, - event_module2, - } - } - - impl_outer_origin! { - pub enum Origin for TestRuntime {} - } - - impl_outer_dispatch! { - pub enum Call for TestRuntime where origin: Origin { - event_module::EventModule, - event_module2::EventModule2, - } - } - - impl event_module::Trait for TestRuntime { - type Origin = Origin; - type Balance = u32; - type BlockNumber = u32; - } - - impl event_module2::Trait for TestRuntime { - type Origin = Origin; - type Balance = u32; - type BlockNumber = u32; - } - - impl system::Trait for TestRuntime { - type Origin = Origin; - type AccountId = u32; - type BlockNumber = u32; - } - - impl_runtime_metadata!( + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + crate::decl_storage! { + trait Store for Module as TestStorage { + StorageMethod : Option; + } + add_extra_genesis { + build(|_, _, _| {}); + } + } + } + + type EventModule = event_module::Module; + type EventModule2 = event_module2::Module; + + #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] + pub struct TestRuntime; + + impl_outer_event! { + pub enum TestEvent for TestRuntime { + event_module, + event_module2, + } + } + + impl_outer_origin! { + pub enum Origin for TestRuntime {} + } + + impl_outer_dispatch! { + pub enum Call for TestRuntime where origin: Origin { + event_module::EventModule, + event_module2::EventModule2, + } + } + + impl event_module::Trait for TestRuntime { + type Origin = Origin; + type Balance = u32; + type BlockNumber = u32; + } + + impl event_module2::Trait for TestRuntime { + type Origin = Origin; + type Balance = u32; + type BlockNumber = u32; + } + + impl system::Trait for TestRuntime { + type Origin = Origin; + type AccountId = u32; + type BlockNumber = u32; + } + + impl_runtime_metadata!( for TestRuntime with modules system::Module with Event, event_module::Module with Event Call, event_module2::Module with Event Storage Call, ); - const EXPECTED_METADATA: RuntimeMetadata = RuntimeMetadata::V3( - RuntimeMetadataV3 { - modules: DecodeDifferent::Encode(&[ - ModuleMetadata { - name: DecodeDifferent::Encode("system"), - prefix: DecodeDifferent::Encode(FnEncode(||"")), - storage: None, - calls: None, - event: Some(DecodeDifferent::Encode( - FnEncode(||&[ - EventMetadata { - name: DecodeDifferent::Encode("SystemEvent"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]) - } - ]) - )), - }, - ModuleMetadata { - name: DecodeDifferent::Encode("event_module"), - prefix: DecodeDifferent::Encode(FnEncode(||"")), - storage: None, - calls: Some( - DecodeDifferent::Encode(FnEncode(||&[ - FunctionMetadata { - name: DecodeDifferent::Encode("aux_0"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - } - ]))), - event: Some(DecodeDifferent::Encode( - FnEncode(||&[ - EventMetadata { - name: DecodeDifferent::Encode("TestEvent"), - arguments: DecodeDifferent::Encode(&["Balance"]), - documentation: DecodeDifferent::Encode(&[" Hi, I am a comment."]) - } - ]) - )), - }, - ModuleMetadata { - name: DecodeDifferent::Encode("event_module2"), - prefix: DecodeDifferent::Encode(FnEncode(||"TestStorage")), - storage: Some(DecodeDifferent::Encode( - FnEncode(||&[ - StorageFunctionMetadata { - name: DecodeDifferent::Encode("StorageMethod"), - modifier: StorageFunctionModifier::Optional, - ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter( - &event_module2::__GetByteStructStorageMethod(::std::marker::PhantomData::) - ) - ), - documentation: DecodeDifferent::Encode(&[]), - } - ]) - )), - calls: Some(DecodeDifferent::Encode(FnEncode(||&[ ]))), - event: Some(DecodeDifferent::Encode( - FnEncode(||&[ - EventMetadata { - name: DecodeDifferent::Encode("TestEvent"), - arguments: DecodeDifferent::Encode(&["Balance"]), - documentation: DecodeDifferent::Encode(&[]) - } - ]) - )), - }, - ])} - ); - - #[test] - fn runtime_metadata() { - let metadata_encoded = TestRuntime::metadata().encode(); - let metadata_decoded = RuntimeMetadataPrefixed::decode(&mut &metadata_encoded[..]); - let expected_metadata: RuntimeMetadataPrefixed = EXPECTED_METADATA.into(); - - assert_eq!(expected_metadata, metadata_decoded.unwrap()); - } + const EXPECTED_METADATA: RuntimeMetadata = RuntimeMetadata::V3(RuntimeMetadataV3 { + modules: DecodeDifferent::Encode(&[ + ModuleMetadata { + name: DecodeDifferent::Encode("system"), + prefix: DecodeDifferent::Encode(FnEncode(|| "")), + storage: None, + calls: None, + event: Some(DecodeDifferent::Encode(FnEncode(|| { + &[EventMetadata { + name: DecodeDifferent::Encode("SystemEvent"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }] + }))), + }, + ModuleMetadata { + name: DecodeDifferent::Encode("event_module"), + prefix: DecodeDifferent::Encode(FnEncode(|| "")), + storage: None, + calls: Some(DecodeDifferent::Encode(FnEncode(|| { + &[FunctionMetadata { + name: DecodeDifferent::Encode("aux_0"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }] + }))), + event: Some(DecodeDifferent::Encode(FnEncode(|| { + &[EventMetadata { + name: DecodeDifferent::Encode("TestEvent"), + arguments: DecodeDifferent::Encode(&["Balance"]), + documentation: DecodeDifferent::Encode(&[" Hi, I am a comment."]), + }] + }))), + }, + ModuleMetadata { + name: DecodeDifferent::Encode("event_module2"), + prefix: DecodeDifferent::Encode(FnEncode(|| "TestStorage")), + storage: Some(DecodeDifferent::Encode(FnEncode(|| { + &[StorageFunctionMetadata { + name: DecodeDifferent::Encode("StorageMethod"), + modifier: StorageFunctionModifier::Optional, + ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &event_module2::__GetByteStructStorageMethod( + ::std::marker::PhantomData::, + ), + )), + documentation: DecodeDifferent::Encode(&[]), + }] + }))), + calls: Some(DecodeDifferent::Encode(FnEncode(|| &[]))), + event: Some(DecodeDifferent::Encode(FnEncode(|| { + &[EventMetadata { + name: DecodeDifferent::Encode("TestEvent"), + arguments: DecodeDifferent::Encode(&["Balance"]), + documentation: DecodeDifferent::Encode(&[]), + }] + }))), + }, + ]), + }); + + #[test] + fn runtime_metadata() { + let metadata_encoded = TestRuntime::metadata().encode(); + let metadata_decoded = RuntimeMetadataPrefixed::decode(&mut &metadata_encoded[..]); + let expected_metadata: RuntimeMetadataPrefixed = EXPECTED_METADATA.into(); + + assert_eq!(expected_metadata, metadata_decoded.unwrap()); + } } diff --git a/srml/support/src/origin.rs b/srml/support/src/origin.rs index 2d97f218e0..8cc810a960 100644 --- a/srml/support/src/origin.rs +++ b/srml/support/src/origin.rs @@ -147,78 +147,78 @@ macro_rules! impl_outer_origin { #[cfg(test)] mod tests { - mod system { - pub trait Trait { - type AccountId; - } - - #[derive(Clone, PartialEq, Eq, Debug)] - pub enum RawOrigin { - Root, - Signed(AccountId), - Inherent, - } - - impl From> for RawOrigin { - fn from(s: Option) -> RawOrigin { - match s { - Some(who) => RawOrigin::Signed(who), - None => RawOrigin::Inherent, - } - } - } - - pub type Origin = RawOrigin<::AccountId>; - } - - mod origin_without_generic { - #[derive(Clone, PartialEq, Eq, Debug)] - pub struct Origin; - } - - mod origin_with_generic { - #[derive(Clone, PartialEq, Eq, Debug)] - pub struct Origin { - t: T - } - } - - #[derive(Clone, PartialEq, Eq, Debug)] - pub struct TestRuntime; - - impl system::Trait for TestRuntime { - type AccountId = u32; - } - - impl_outer_origin!( + mod system { + pub trait Trait { + type AccountId; + } + + #[derive(Clone, PartialEq, Eq, Debug)] + pub enum RawOrigin { + Root, + Signed(AccountId), + Inherent, + } + + impl From> for RawOrigin { + fn from(s: Option) -> RawOrigin { + match s { + Some(who) => RawOrigin::Signed(who), + None => RawOrigin::Inherent, + } + } + } + + pub type Origin = RawOrigin<::AccountId>; + } + + mod origin_without_generic { + #[derive(Clone, PartialEq, Eq, Debug)] + pub struct Origin; + } + + mod origin_with_generic { + #[derive(Clone, PartialEq, Eq, Debug)] + pub struct Origin { + t: T, + } + } + + #[derive(Clone, PartialEq, Eq, Debug)] + pub struct TestRuntime; + + impl system::Trait for TestRuntime { + type AccountId = u32; + } + + impl_outer_origin!( pub enum OriginWithoutSystem for TestRuntime { origin_without_generic, origin_with_generic, } ); - impl_outer_origin!( + impl_outer_origin!( pub enum OriginWithoutSystem2 for TestRuntime { origin_with_generic, origin_without_generic } ); - impl_outer_origin!( + impl_outer_origin!( pub enum OriginWithSystem for TestRuntime where system = system { origin_without_generic, origin_with_generic } ); - impl_outer_origin!( + impl_outer_origin!( pub enum OriginWithSystem2 for TestRuntime where system = system { origin_with_generic, origin_without_generic, } ); - impl_outer_origin!( + impl_outer_origin!( pub enum OriginEmpty for TestRuntime where system = system {} ); } diff --git a/srml/support/src/storage/generator.rs b/srml/support/src/storage/generator.rs index 97bfc6dc20..42c384bef9 100644 --- a/srml/support/src/storage/generator.rs +++ b/srml/support/src/storage/generator.rs @@ -47,184 +47,199 @@ //! ``` use crate::codec; -use crate::rstd::vec::Vec; -#[cfg(feature = "std")] -use crate::storage::unhashed::generator::UnhashedStorage; #[doc(hidden)] pub use crate::rstd::borrow::Borrow; #[doc(hidden)] -pub use crate::rstd::marker::PhantomData; -#[doc(hidden)] pub use crate::rstd::boxed::Box; +#[doc(hidden)] +pub use crate::rstd::marker::PhantomData; +use crate::rstd::vec::Vec; +#[cfg(feature = "std")] +use crate::storage::unhashed::generator::UnhashedStorage; pub use srml_metadata::{ - DecodeDifferent, StorageMetadata, StorageFunctionMetadata, - StorageFunctionType, StorageFunctionModifier, - DefaultByte, DefaultByteGetter, + DecodeDifferent, DefaultByte, DefaultByteGetter, StorageFunctionMetadata, + StorageFunctionModifier, StorageFunctionType, StorageMetadata, }; /// Abstraction around storage. pub trait Storage { - /// true if the key exists in storage. - fn exists(&self, key: &[u8]) -> bool; - - /// Load the bytes of a key from storage. Can panic if the type is incorrect. - fn get(&self, key: &[u8]) -> Option; - - /// Load the bytes of a key from storage. Can panic if the type is incorrect. Will panic if - /// it's not there. - fn require(&self, key: &[u8]) -> T { self.get(key).expect("Required values must be in storage") } - - /// Load the bytes of a key from storage. Can panic if the type is incorrect. The type's - /// default is returned if it's not there. - fn get_or_default(&self, key: &[u8]) -> T { self.get(key).unwrap_or_default() } - - /// Put a value in under a key. - fn put(&self, key: &[u8], val: &T); - - /// Remove the bytes of a key from storage. - fn kill(&self, key: &[u8]); - - /// Take a value from storage, deleting it after reading. - fn take(&self, key: &[u8]) -> Option { - let value = self.get(key); - self.kill(key); - value - } - - /// Take a value from storage, deleting it after reading. - fn take_or_panic(&self, key: &[u8]) -> T { self.take(key).expect("Required values must be in storage") } - - /// Take a value from storage, deleting it after reading. - fn take_or_default(&self, key: &[u8]) -> T { self.take(key).unwrap_or_default() } + /// true if the key exists in storage. + fn exists(&self, key: &[u8]) -> bool; + + /// Load the bytes of a key from storage. Can panic if the type is incorrect. + fn get(&self, key: &[u8]) -> Option; + + /// Load the bytes of a key from storage. Can panic if the type is incorrect. Will panic if + /// it's not there. + fn require(&self, key: &[u8]) -> T { + self.get(key).expect("Required values must be in storage") + } + + /// Load the bytes of a key from storage. Can panic if the type is incorrect. The type's + /// default is returned if it's not there. + fn get_or_default(&self, key: &[u8]) -> T { + self.get(key).unwrap_or_default() + } + + /// Put a value in under a key. + fn put(&self, key: &[u8], val: &T); + + /// Remove the bytes of a key from storage. + fn kill(&self, key: &[u8]); + + /// Take a value from storage, deleting it after reading. + fn take(&self, key: &[u8]) -> Option { + let value = self.get(key); + self.kill(key); + value + } + + /// Take a value from storage, deleting it after reading. + fn take_or_panic(&self, key: &[u8]) -> T { + self.take(key).expect("Required values must be in storage") + } + + /// Take a value from storage, deleting it after reading. + fn take_or_default(&self, key: &[u8]) -> T { + self.take(key).unwrap_or_default() + } } // We use a construct like this during when genesis storage is being built. #[cfg(feature = "std")] -impl Storage for (crate::rstd::cell::RefCell<&mut sr_primitives::StorageOverlay>, PhantomData) { - fn exists(&self, key: &[u8]) -> bool { - UnhashedStorage::exists(self, &S::hash(key)) - } - - fn get(&self, key: &[u8]) -> Option { - UnhashedStorage::get(self, &S::hash(key)) - } - - fn put(&self, key: &[u8], val: &T) { - UnhashedStorage::put(self, &S::hash(key), val) - } - - fn kill(&self, key: &[u8]) { - UnhashedStorage::kill(self, &S::hash(key)) - } +impl Storage + for ( + crate::rstd::cell::RefCell<&mut sr_primitives::StorageOverlay>, + PhantomData, + ) +{ + fn exists(&self, key: &[u8]) -> bool { + UnhashedStorage::exists(self, &S::hash(key)) + } + + fn get(&self, key: &[u8]) -> Option { + UnhashedStorage::get(self, &S::hash(key)) + } + + fn put(&self, key: &[u8], val: &T) { + UnhashedStorage::put(self, &S::hash(key), val) + } + + fn kill(&self, key: &[u8]) { + UnhashedStorage::kill(self, &S::hash(key)) + } } /// A strongly-typed value kept in storage. pub trait StorageValue { - /// The type that get/take returns. - type Query; + /// The type that get/take returns. + type Query; - /// Get the storage key. - fn key() -> &'static [u8]; + /// Get the storage key. + fn key() -> &'static [u8]; - /// true if the value is defined in storage. - fn exists(storage: &S) -> bool { - storage.exists(Self::key()) - } + /// true if the value is defined in storage. + fn exists(storage: &S) -> bool { + storage.exists(Self::key()) + } - /// Load the value from the provided storage instance. - fn get(storage: &S) -> Self::Query; + /// Load the value from the provided storage instance. + fn get(storage: &S) -> Self::Query; - /// Take a value from storage, removing it afterwards. - fn take(storage: &S) -> Self::Query; + /// Take a value from storage, removing it afterwards. + fn take(storage: &S) -> Self::Query; - /// Store a value under this key into the provided storage instance. - fn put(val: &T, storage: &S) { - storage.put(Self::key(), val) - } + /// Store a value under this key into the provided storage instance. + fn put(val: &T, storage: &S) { + storage.put(Self::key(), val) + } - /// Mutate this value - fn mutate R, S: Storage>(f: F, storage: &S) -> R; + /// Mutate this value + fn mutate R, S: Storage>(f: F, storage: &S) -> R; - /// Clear the storage value. - fn kill(storage: &S) { - storage.kill(Self::key()) - } + /// Clear the storage value. + fn kill(storage: &S) { + storage.kill(Self::key()) + } } /// A strongly-typed list in storage. pub trait StorageList { - /// Get the prefix key in storage. - fn prefix() -> &'static [u8]; + /// Get the prefix key in storage. + fn prefix() -> &'static [u8]; - /// Get the key used to put the length field. - fn len_key() -> Vec; + /// Get the key used to put the length field. + fn len_key() -> Vec; - /// Get the storage key used to fetch a value at a given index. - fn key_for(index: u32) -> Vec; + /// Get the storage key used to fetch a value at a given index. + fn key_for(index: u32) -> Vec; - /// Read out all the items. - fn items(storage: &S) -> Vec; + /// Read out all the items. + fn items(storage: &S) -> Vec; - /// Set the current set of items. - fn set_items(items: &[T], storage: &S); + /// Set the current set of items. + fn set_items(items: &[T], storage: &S); - /// Set the item at the given index. - fn set_item(index: u32, item: &T, storage: &S); + /// Set the item at the given index. + fn set_item(index: u32, item: &T, storage: &S); - /// Load the value at given index. Returns `None` if the index is out-of-bounds. - fn get(index: u32, storage: &S) -> Option; + /// Load the value at given index. Returns `None` if the index is out-of-bounds. + fn get(index: u32, storage: &S) -> Option; - /// Load the length of the list - fn len(storage: &S) -> u32; + /// Load the length of the list + fn len(storage: &S) -> u32; - /// Clear the list. - fn clear(storage: &S); + /// Clear the list. + fn clear(storage: &S); } /// A strongly-typed map in storage. pub trait StorageMap { - /// The type that get/take returns. - type Query; + /// The type that get/take returns. + type Query; - /// Get the prefix key in storage. - fn prefix() -> &'static [u8]; + /// Get the prefix key in storage. + fn prefix() -> &'static [u8]; - /// Get the storage key used to fetch a value corresponding to a specific key. - fn key_for(x: &K) -> Vec; + /// Get the storage key used to fetch a value corresponding to a specific key. + fn key_for(x: &K) -> Vec; - /// true if the value is defined in storage. - fn exists(key: &K, storage: &S) -> bool { - storage.exists(&Self::key_for(key)[..]) - } + /// true if the value is defined in storage. + fn exists(key: &K, storage: &S) -> bool { + storage.exists(&Self::key_for(key)[..]) + } - /// Load the value associated with the given key from the map. - fn get(key: &K, storage: &S) -> Self::Query; + /// Load the value associated with the given key from the map. + fn get(key: &K, storage: &S) -> Self::Query; - /// Take the value under a key. - fn take(key: &K, storage: &S) -> Self::Query; + /// Take the value under a key. + fn take(key: &K, storage: &S) -> Self::Query; - /// Store a value to be associated with the given key from the map. - fn insert(key: &K, val: &V, storage: &S) { - storage.put(&Self::key_for(key)[..], val); - } + /// Store a value to be associated with the given key from the map. + fn insert(key: &K, val: &V, storage: &S) { + storage.put(&Self::key_for(key)[..], val); + } - /// Remove the value under a key. - fn remove(key: &K, storage: &S) { - storage.kill(&Self::key_for(key)[..]); - } + /// Remove the value under a key. + fn remove(key: &K, storage: &S) { + storage.kill(&Self::key_for(key)[..]); + } - /// Mutate the value under a key. - fn mutate R, S: Storage>(key: &K, f: F, storage: &S) -> R; + /// Mutate the value under a key. + fn mutate R, S: Storage>(key: &K, f: F, storage: &S) -> R; } /// A `StorageMap` with enumerable entries. pub trait EnumerableStorageMap: StorageMap { - /// Return current head element. - fn head(storage: &S) -> Option; - - /// Enumerate all elements in the map. - fn enumerate<'a, S: Storage>(storage: &'a S) -> Box + 'a> where K: 'a, V: 'a; + /// Return current head element. + fn head(storage: &S) -> Option; + + /// Enumerate all elements in the map. + fn enumerate<'a, S: Storage>(storage: &'a S) -> Box + 'a> + where + K: 'a, + V: 'a; } // FIXME #1466 Remove this in favor of `decl_storage` macro. @@ -547,533 +562,538 @@ macro_rules! __handle_wrap_internal { // FIXME: revisit this idiom once we get `type`s in `impl`s. /*impl Module { - type Now = super::Now; + type Now = super::Now; }*/ #[cfg(test)] // Do not complain about unused `dispatch` and `dispatch_aux`. #[allow(dead_code)] mod tests { - use std::collections::HashMap; - use std::cell::RefCell; - use codec::{Decode, Encode}; - use super::*; - use crate::rstd::marker::PhantomData; - - impl Storage for RefCell, Vec>> { - fn exists(&self, key: &[u8]) -> bool { - self.borrow_mut().get(key).is_some() - } - - fn get(&self, key: &[u8]) -> Option { - self.borrow_mut().get(key).map(|v| T::decode(&mut &v[..]).unwrap()) - } - - fn put(&self, key: &[u8], val: &T) { - self.borrow_mut().insert(key.to_owned(), val.encode()); - } - - fn kill(&self, key: &[u8]) { - self.borrow_mut().remove(key); - } - } - - storage_items! { - Value: b"a" => u32; - List: b"b:" => list [u64]; - Map: b"c:" => map [u32 => [u8; 32]]; - } - - #[test] - fn value() { - let storage = RefCell::new(HashMap::new()); - assert!(Value::get(&storage).is_none()); - Value::put(&100_000, &storage); - assert_eq!(Value::get(&storage), Some(100_000)); - Value::kill(&storage); - assert!(Value::get(&storage).is_none()); - } - - #[test] - fn list() { - let storage = RefCell::new(HashMap::new()); - assert_eq!(List::len(&storage), 0); - assert!(List::items(&storage).is_empty()); - - List::set_items(&[0, 2, 4, 6, 8], &storage); - assert_eq!(List::items(&storage), &[0, 2, 4, 6, 8]); - assert_eq!(List::len(&storage), 5); - - List::set_item(2, &10, &storage); - assert_eq!(List::items(&storage), &[0, 2, 10, 6, 8]); - assert_eq!(List::len(&storage), 5); - - List::clear(&storage); - assert_eq!(List::len(&storage), 0); - assert!(List::items(&storage).is_empty()); - } - - #[test] - fn map() { - let storage = RefCell::new(HashMap::new()); - assert!(Map::get(&5, &storage).is_none()); - Map::insert(&5, &[1; 32], &storage); - assert_eq!(Map::get(&5, &storage), Some([1; 32])); - assert_eq!(Map::take(&5, &storage), Some([1; 32])); - assert!(Map::get(&5, &storage).is_none()); - assert!(Map::get(&999, &storage).is_none()); - } - - pub trait Trait { - type Origin: codec::Encode + codec::Decode + ::std::default::Default; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - crate::decl_storage! { - trait Store for Module as TestStorage { - // non-getters: pub / $default - - /// Hello, this is doc! - U32 : Option = Some(3); - pub PUBU32 : Option; - U32MYDEF : Option = None; - pub PUBU32MYDEF : Option = Some(3); - - // getters: pub / $default - // we need at least one type which uses T, otherwise GenesisConfig will complain. - GETU32 get(u32_getter): T::Origin; - pub PUBGETU32 get(pub_u32_getter) build(|config: &GenesisConfig| config.u32_getter_with_config): u32; - GETU32WITHCONFIG get(u32_getter_with_config) config(): u32; - pub PUBGETU32WITHCONFIG get(pub_u32_getter_with_config) config(): u32; - GETU32MYDEF get(u32_getter_mydef): Option = Some(4); - pub PUBGETU32MYDEF get(pub_u32_getter_mydef) config(): u32 = 3; - GETU32WITHCONFIGMYDEF get(u32_getter_with_config_mydef) config(): u32 = 2; - pub PUBGETU32WITHCONFIGMYDEF get(pub_u32_getter_with_config_mydef) config(): u32 = 1; - PUBGETU32WITHCONFIGMYDEFOPT get(pub_u32_getter_with_config_mydef_opt) config(): Option = Some(100); - - // map non-getters: pub / $default - MAPU32 : map u32 => Option; - pub PUBMAPU32 : map u32 => Option; - MAPU32MYDEF : map u32 => Option = None; - pub PUBMAPU32MYDEF : map u32 => Option = Some("hello".into()); - - // map getters: pub / $default - GETMAPU32 get(map_u32_getter): map u32 => String; - pub PUBGETMAPU32 get(pub_map_u32_getter): map u32 => String; - - GETMAPU32MYDEF get(map_u32_getter_mydef): map u32 => String = "map".into(); - pub PUBGETMAPU32MYDEF get(pub_map_u32_getter_mydef): map u32 => String = "pubmap".into(); - - // linked map - LINKEDMAPU32 : linked_map u32 => Option; - pub PUBLINKEDMAPU32MYDEF : linked_map u32 => Option = Some("hello".into()); - GETLINKEDMAPU32 get(linked_map_u32_getter): linked_map u32 => String; - pub PUBGETLINKEDMAPU32MYDEF get(pub_linked_map_u32_getter_mydef): linked_map u32 => String = "pubmap".into(); - - COMPLEXTYPE1: ::std::vec::Vec<::Origin>; - COMPLEXTYPE2: (Vec)>>, u32); - COMPLEXTYPE3: ([u32;25]); - } - add_extra_genesis { - build(|_, _, _| {}); - } - } - - struct TraitImpl {} - - impl Trait for TraitImpl { - type Origin = u32; - type BlockNumber = u32; - } - - const EXPECTED_METADATA: StorageMetadata = StorageMetadata { - functions: DecodeDifferent::Encode(&[ - StorageFunctionMetadata { - name: DecodeDifferent::Encode("U32"), - modifier: StorageFunctionModifier::Optional, - ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[ " Hello, this is doc!" ]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("PUBU32"), - modifier: StorageFunctionModifier::Optional, - ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("U32MYDEF"), - modifier: StorageFunctionModifier::Optional, - ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("PUBU32MYDEF"), - modifier: StorageFunctionModifier::Optional, - ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("GETU32"), - modifier: StorageFunctionModifier::Default, - ty: StorageFunctionType::Plain(DecodeDifferent::Encode("T::Origin")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("PUBGETU32"), - modifier: StorageFunctionModifier::Default, - ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("GETU32WITHCONFIG"), - modifier: StorageFunctionModifier::Default, - ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETU32WITHCONFIG(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("PUBGETU32WITHCONFIG"), - modifier: StorageFunctionModifier::Default, - ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETU32WITHCONFIG(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("GETU32MYDEF"), - modifier: StorageFunctionModifier::Optional, - ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("PUBGETU32MYDEF"), - modifier: StorageFunctionModifier::Default, - ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("GETU32WITHCONFIGMYDEF"), - modifier: StorageFunctionModifier::Default, - ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETU32WITHCONFIGMYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("PUBGETU32WITHCONFIGMYDEF"), - modifier: StorageFunctionModifier::Default, - ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETU32WITHCONFIGMYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("PUBGETU32WITHCONFIGMYDEFOPT"), - modifier: StorageFunctionModifier::Optional, - ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETU32WITHCONFIGMYDEFOPT(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - - StorageFunctionMetadata { - name: DecodeDifferent::Encode("MAPU32"), - modifier: StorageFunctionModifier::Optional, - ty: StorageFunctionType::Map { - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - is_linked: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructMAPU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("PUBMAPU32"), - modifier: StorageFunctionModifier::Optional, - ty: StorageFunctionType::Map { - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - is_linked: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBMAPU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("MAPU32MYDEF"), - modifier: StorageFunctionModifier::Optional, - ty: StorageFunctionType::Map { - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - is_linked: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructMAPU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("PUBMAPU32MYDEF"), - modifier: StorageFunctionModifier::Optional, - ty: StorageFunctionType::Map { - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - is_linked: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBMAPU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("GETMAPU32"), - modifier: StorageFunctionModifier::Default, - ty: StorageFunctionType::Map { - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - is_linked: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETMAPU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("PUBGETMAPU32"), - modifier: StorageFunctionModifier::Default, - ty: StorageFunctionType::Map { - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - is_linked: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETMAPU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("GETMAPU32MYDEF"), - modifier: StorageFunctionModifier::Default, - ty: StorageFunctionType::Map { - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - is_linked: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETMAPU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("PUBGETMAPU32MYDEF"), - modifier: StorageFunctionModifier::Default, - ty: StorageFunctionType::Map { - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - is_linked: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETMAPU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("LINKEDMAPU32"), - modifier: StorageFunctionModifier::Optional, - ty: StorageFunctionType::Map { - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - is_linked: true, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructLINKEDMAPU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("PUBLINKEDMAPU32MYDEF"), - modifier: StorageFunctionModifier::Optional, - ty: StorageFunctionType::Map { - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - is_linked: true, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBLINKEDMAPU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("GETLINKEDMAPU32"), - modifier: StorageFunctionModifier::Default, - ty: StorageFunctionType::Map { - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - is_linked: true, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETLINKEDMAPU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("PUBGETLINKEDMAPU32MYDEF"), - modifier: StorageFunctionModifier::Default, - ty: StorageFunctionType::Map { - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - is_linked: true, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETLINKEDMAPU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("COMPLEXTYPE1"), - modifier: StorageFunctionModifier::Default, - ty: StorageFunctionType::Plain(DecodeDifferent::Encode("::std::vec::Vec<::Origin>")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructCOMPLEXTYPE1(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("COMPLEXTYPE2"), - modifier: StorageFunctionModifier::Default, - ty: StorageFunctionType::Plain(DecodeDifferent::Encode("(Vec)>>, u32)")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructCOMPLEXTYPE2(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageFunctionMetadata { - name: DecodeDifferent::Encode("COMPLEXTYPE3"), - modifier: StorageFunctionModifier::Default, - ty: StorageFunctionType::Plain(DecodeDifferent::Encode("([u32; 25])")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructCOMPLEXTYPE3(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - ]) - }; - - #[test] - fn store_metadata() { - let metadata = Module::::store_metadata(); - assert_eq!(EXPECTED_METADATA, metadata); - } - - #[test] - fn check_genesis_config() { - let config = GenesisConfig::::default(); - assert_eq!(config.u32_getter_with_config, 0u32); - assert_eq!(config.pub_u32_getter_with_config, 0u32); - - assert_eq!(config.pub_u32_getter_mydef, 3u32); - assert_eq!(config.u32_getter_with_config_mydef, 2u32); - assert_eq!(config.pub_u32_getter_with_config_mydef, 1u32); - assert_eq!(config.pub_u32_getter_with_config_mydef_opt, 100u32); - } + use super::*; + use crate::rstd::marker::PhantomData; + use codec::{Decode, Encode}; + use std::cell::RefCell; + use std::collections::HashMap; + + impl Storage for RefCell, Vec>> { + fn exists(&self, key: &[u8]) -> bool { + self.borrow_mut().get(key).is_some() + } + + fn get(&self, key: &[u8]) -> Option { + self.borrow_mut() + .get(key) + .map(|v| T::decode(&mut &v[..]).unwrap()) + } + + fn put(&self, key: &[u8], val: &T) { + self.borrow_mut().insert(key.to_owned(), val.encode()); + } + + fn kill(&self, key: &[u8]) { + self.borrow_mut().remove(key); + } + } + + storage_items! { + Value: b"a" => u32; + List: b"b:" => list [u64]; + Map: b"c:" => map [u32 => [u8; 32]]; + } + + #[test] + fn value() { + let storage = RefCell::new(HashMap::new()); + assert!(Value::get(&storage).is_none()); + Value::put(&100_000, &storage); + assert_eq!(Value::get(&storage), Some(100_000)); + Value::kill(&storage); + assert!(Value::get(&storage).is_none()); + } + + #[test] + fn list() { + let storage = RefCell::new(HashMap::new()); + assert_eq!(List::len(&storage), 0); + assert!(List::items(&storage).is_empty()); + + List::set_items(&[0, 2, 4, 6, 8], &storage); + assert_eq!(List::items(&storage), &[0, 2, 4, 6, 8]); + assert_eq!(List::len(&storage), 5); + + List::set_item(2, &10, &storage); + assert_eq!(List::items(&storage), &[0, 2, 10, 6, 8]); + assert_eq!(List::len(&storage), 5); + + List::clear(&storage); + assert_eq!(List::len(&storage), 0); + assert!(List::items(&storage).is_empty()); + } + + #[test] + fn map() { + let storage = RefCell::new(HashMap::new()); + assert!(Map::get(&5, &storage).is_none()); + Map::insert(&5, &[1; 32], &storage); + assert_eq!(Map::get(&5, &storage), Some([1; 32])); + assert_eq!(Map::take(&5, &storage), Some([1; 32])); + assert!(Map::get(&5, &storage).is_none()); + assert!(Map::get(&999, &storage).is_none()); + } + + pub trait Trait { + type Origin: codec::Encode + codec::Decode + ::std::default::Default; + type BlockNumber; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + crate::decl_storage! { + trait Store for Module as TestStorage { + // non-getters: pub / $default + + /// Hello, this is doc! + U32 : Option = Some(3); + pub PUBU32 : Option; + U32MYDEF : Option = None; + pub PUBU32MYDEF : Option = Some(3); + + // getters: pub / $default + // we need at least one type which uses T, otherwise GenesisConfig will complain. + GETU32 get(u32_getter): T::Origin; + pub PUBGETU32 get(pub_u32_getter) build(|config: &GenesisConfig| config.u32_getter_with_config): u32; + GETU32WITHCONFIG get(u32_getter_with_config) config(): u32; + pub PUBGETU32WITHCONFIG get(pub_u32_getter_with_config) config(): u32; + GETU32MYDEF get(u32_getter_mydef): Option = Some(4); + pub PUBGETU32MYDEF get(pub_u32_getter_mydef) config(): u32 = 3; + GETU32WITHCONFIGMYDEF get(u32_getter_with_config_mydef) config(): u32 = 2; + pub PUBGETU32WITHCONFIGMYDEF get(pub_u32_getter_with_config_mydef) config(): u32 = 1; + PUBGETU32WITHCONFIGMYDEFOPT get(pub_u32_getter_with_config_mydef_opt) config(): Option = Some(100); + + // map non-getters: pub / $default + MAPU32 : map u32 => Option; + pub PUBMAPU32 : map u32 => Option; + MAPU32MYDEF : map u32 => Option = None; + pub PUBMAPU32MYDEF : map u32 => Option = Some("hello".into()); + + // map getters: pub / $default + GETMAPU32 get(map_u32_getter): map u32 => String; + pub PUBGETMAPU32 get(pub_map_u32_getter): map u32 => String; + + GETMAPU32MYDEF get(map_u32_getter_mydef): map u32 => String = "map".into(); + pub PUBGETMAPU32MYDEF get(pub_map_u32_getter_mydef): map u32 => String = "pubmap".into(); + + // linked map + LINKEDMAPU32 : linked_map u32 => Option; + pub PUBLINKEDMAPU32MYDEF : linked_map u32 => Option = Some("hello".into()); + GETLINKEDMAPU32 get(linked_map_u32_getter): linked_map u32 => String; + pub PUBGETLINKEDMAPU32MYDEF get(pub_linked_map_u32_getter_mydef): linked_map u32 => String = "pubmap".into(); + + COMPLEXTYPE1: ::std::vec::Vec<::Origin>; + COMPLEXTYPE2: (Vec)>>, u32); + COMPLEXTYPE3: ([u32;25]); + } + add_extra_genesis { + build(|_, _, _| {}); + } + } + + struct TraitImpl {} + + impl Trait for TraitImpl { + type Origin = u32; + type BlockNumber = u32; + } + + const EXPECTED_METADATA: StorageMetadata = StorageMetadata { + functions: DecodeDifferent::Encode(&[ + StorageFunctionMetadata { + name: DecodeDifferent::Encode("U32"), + modifier: StorageFunctionModifier::Optional, + ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[" Hello, this is doc!"]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("PUBU32"), + modifier: StorageFunctionModifier::Optional, + ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructPUBU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("U32MYDEF"), + modifier: StorageFunctionModifier::Optional, + ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructU32MYDEF( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("PUBU32MYDEF"), + modifier: StorageFunctionModifier::Optional, + ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructPUBU32MYDEF( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("GETU32"), + modifier: StorageFunctionModifier::Default, + ty: StorageFunctionType::Plain(DecodeDifferent::Encode("T::Origin")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructGETU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("PUBGETU32"), + modifier: StorageFunctionModifier::Default, + ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructPUBGETU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("GETU32WITHCONFIG"), + modifier: StorageFunctionModifier::Default, + ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGETU32WITHCONFIG(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("PUBGETU32WITHCONFIG"), + modifier: StorageFunctionModifier::Default, + ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructPUBGETU32WITHCONFIG(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("GETU32MYDEF"), + modifier: StorageFunctionModifier::Optional, + ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructGETU32MYDEF( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("PUBGETU32MYDEF"), + modifier: StorageFunctionModifier::Default, + ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructPUBGETU32MYDEF(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("GETU32WITHCONFIGMYDEF"), + modifier: StorageFunctionModifier::Default, + ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGETU32WITHCONFIGMYDEF(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("PUBGETU32WITHCONFIGMYDEF"), + modifier: StorageFunctionModifier::Default, + ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructPUBGETU32WITHCONFIGMYDEF(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("PUBGETU32WITHCONFIGMYDEFOPT"), + modifier: StorageFunctionModifier::Optional, + ty: StorageFunctionType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructPUBGETU32WITHCONFIGMYDEFOPT(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("MAPU32"), + modifier: StorageFunctionModifier::Optional, + ty: StorageFunctionType::Map { + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("String"), + is_linked: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructMAPU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("PUBMAPU32"), + modifier: StorageFunctionModifier::Optional, + ty: StorageFunctionType::Map { + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("String"), + is_linked: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructPUBMAPU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("MAPU32MYDEF"), + modifier: StorageFunctionModifier::Optional, + ty: StorageFunctionType::Map { + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("String"), + is_linked: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructMAPU32MYDEF( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("PUBMAPU32MYDEF"), + modifier: StorageFunctionModifier::Optional, + ty: StorageFunctionType::Map { + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("String"), + is_linked: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructPUBMAPU32MYDEF(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("GETMAPU32"), + modifier: StorageFunctionModifier::Default, + ty: StorageFunctionType::Map { + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("String"), + is_linked: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructGETMAPU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("PUBGETMAPU32"), + modifier: StorageFunctionModifier::Default, + ty: StorageFunctionType::Map { + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("String"), + is_linked: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructPUBGETMAPU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("GETMAPU32MYDEF"), + modifier: StorageFunctionModifier::Default, + ty: StorageFunctionType::Map { + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("String"), + is_linked: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGETMAPU32MYDEF(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("PUBGETMAPU32MYDEF"), + modifier: StorageFunctionModifier::Default, + ty: StorageFunctionType::Map { + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("String"), + is_linked: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructPUBGETMAPU32MYDEF(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("LINKEDMAPU32"), + modifier: StorageFunctionModifier::Optional, + ty: StorageFunctionType::Map { + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("String"), + is_linked: true, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructLINKEDMAPU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("PUBLINKEDMAPU32MYDEF"), + modifier: StorageFunctionModifier::Optional, + ty: StorageFunctionType::Map { + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("String"), + is_linked: true, + }, + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructPUBLINKEDMAPU32MYDEF(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("GETLINKEDMAPU32"), + modifier: StorageFunctionModifier::Default, + ty: StorageFunctionType::Map { + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("String"), + is_linked: true, + }, + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGETLINKEDMAPU32(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("PUBGETLINKEDMAPU32MYDEF"), + modifier: StorageFunctionModifier::Default, + ty: StorageFunctionType::Map { + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("String"), + is_linked: true, + }, + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructPUBGETLINKEDMAPU32MYDEF(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("COMPLEXTYPE1"), + modifier: StorageFunctionModifier::Default, + ty: StorageFunctionType::Plain(DecodeDifferent::Encode( + "::std::vec::Vec<::Origin>", + )), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructCOMPLEXTYPE1( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("COMPLEXTYPE2"), + modifier: StorageFunctionModifier::Default, + ty: StorageFunctionType::Plain(DecodeDifferent::Encode( + "(Vec)>>, u32)", + )), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructCOMPLEXTYPE2( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageFunctionMetadata { + name: DecodeDifferent::Encode("COMPLEXTYPE3"), + modifier: StorageFunctionModifier::Default, + ty: StorageFunctionType::Plain(DecodeDifferent::Encode("([u32; 25])")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructCOMPLEXTYPE3( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + ]), + }; + + #[test] + fn store_metadata() { + let metadata = Module::::store_metadata(); + assert_eq!(EXPECTED_METADATA, metadata); + } + + #[test] + fn check_genesis_config() { + let config = GenesisConfig::::default(); + assert_eq!(config.u32_getter_with_config, 0u32); + assert_eq!(config.pub_u32_getter_with_config, 0u32); + + assert_eq!(config.pub_u32_getter_mydef, 3u32); + assert_eq!(config.u32_getter_with_config_mydef, 2u32); + assert_eq!(config.pub_u32_getter_with_config_mydef, 1u32); + assert_eq!(config.pub_u32_getter_with_config_mydef_opt, 100u32); + } } #[cfg(test)] #[allow(dead_code)] mod test2 { - pub trait Trait { - type Origin; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - type PairOf = (T, T); - - crate::decl_storage! { - trait Store for Module as TestStorage { - SingleDef : u32; - PairDef : PairOf; - Single : Option; - Pair : (u32, u32); - } - add_extra_genesis { - config(_marker) : ::std::marker::PhantomData; - config(extra_field) : u32 = 32; - build(|_, _, _| {}); - } - } - - struct TraitImpl {} - - impl Trait for TraitImpl { - type Origin = u32; - type BlockNumber = u32; - } + pub trait Trait { + type Origin; + type BlockNumber; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + type PairOf = (T, T); + + crate::decl_storage! { + trait Store for Module as TestStorage { + SingleDef : u32; + PairDef : PairOf; + Single : Option; + Pair : (u32, u32); + } + add_extra_genesis { + config(_marker) : ::std::marker::PhantomData; + config(extra_field) : u32 = 32; + build(|_, _, _| {}); + } + } + + struct TraitImpl {} + + impl Trait for TraitImpl { + type Origin = u32; + type BlockNumber = u32; + } } #[cfg(test)] #[allow(dead_code)] mod test3 { - pub trait Trait { - type Origin; - type BlockNumber; - } - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - crate::decl_storage! { - trait Store for Module as Test { - Foo get(foo) config(initial_foo): u32; - } - } - - type PairOf = (T, T); - - struct TraitImpl {} - - impl Trait for TraitImpl { - type Origin = u32; - type BlockNumber = u32; - } + pub trait Trait { + type Origin; + type BlockNumber; + } + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + crate::decl_storage! { + trait Store for Module as Test { + Foo get(foo) config(initial_foo): u32; + } + } + + type PairOf = (T, T); + + struct TraitImpl {} + + impl Trait for TraitImpl { + type Origin = u32; + type BlockNumber = u32; + } } diff --git a/srml/support/src/storage/mod.rs b/srml/support/src/storage/mod.rs index b1b8766b90..d1933d3453 100644 --- a/srml/support/src/storage/mod.rs +++ b/srml/support/src/storage/mod.rs @@ -16,361 +16,370 @@ //! Stuff to do with the runtime's storage. -use crate::rstd::prelude::*; +use crate::codec::{Codec, Decode, Encode, Input, KeyedVec}; use crate::rstd::borrow::Borrow; +use crate::rstd::prelude::*; use runtime_io::{self, twox_128}; -use crate::codec::{Codec, Encode, Decode, KeyedVec, Input}; #[macro_use] pub mod generator; pub mod unhashed; struct IncrementalInput<'a> { - key: &'a [u8], - pos: usize, + key: &'a [u8], + pos: usize, } impl<'a> Input for IncrementalInput<'a> { - fn read(&mut self, into: &mut [u8]) -> usize { - let len = runtime_io::read_storage(self.key, into, self.pos).unwrap_or(0); - let read = crate::rstd::cmp::min(len, into.len()); - self.pos += read; - read - } + fn read(&mut self, into: &mut [u8]) -> usize { + let len = runtime_io::read_storage(self.key, into, self.pos).unwrap_or(0); + let read = crate::rstd::cmp::min(len, into.len()); + self.pos += read; + read + } } struct IncrementalChildInput<'a> { - storage_key: &'a [u8], - key: &'a [u8], - pos: usize, + storage_key: &'a [u8], + key: &'a [u8], + pos: usize, } impl<'a> Input for IncrementalChildInput<'a> { - fn read(&mut self, into: &mut [u8]) -> usize { - let len = runtime_io::read_child_storage(self.storage_key, self.key, into, self.pos).unwrap_or(0); - let read = crate::rstd::cmp::min(len, into.len()); - self.pos += read; - read - } + fn read(&mut self, into: &mut [u8]) -> usize { + let len = + runtime_io::read_child_storage(self.storage_key, self.key, into, self.pos).unwrap_or(0); + let read = crate::rstd::cmp::min(len, into.len()); + self.pos += read; + read + } } - /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get(key: &[u8]) -> Option { - unhashed::get(&twox_128(key)) + unhashed::get(&twox_128(key)) } /// Return the value of the item in storage under `key`, or the type's default if there is no /// explicit entry. pub fn get_or_default(key: &[u8]) -> T { - unhashed::get_or_default(&twox_128(key)) + unhashed::get_or_default(&twox_128(key)) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. pub fn get_or(key: &[u8], default_value: T) -> T { - unhashed::get_or(&twox_128(key), default_value) + unhashed::get_or(&twox_128(key), default_value) } /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. pub fn get_or_else T>(key: &[u8], default_value: F) -> T { - unhashed::get_or_else(&twox_128(key), default_value) + unhashed::get_or_else(&twox_128(key), default_value) } /// Put `value` in storage under `key`. pub fn put(key: &[u8], value: &T) { - unhashed::put(&twox_128(key), value) + unhashed::put(&twox_128(key), value) } /// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise. pub fn take(key: &[u8]) -> Option { - unhashed::take(&twox_128(key)) + unhashed::take(&twox_128(key)) } /// Remove `key` from storage, returning its value, or, if there was no explicit entry in storage, /// the default for its type. pub fn take_or_default(key: &[u8]) -> T { - unhashed::take_or_default(&twox_128(key)) + unhashed::take_or_default(&twox_128(key)) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or(key: &[u8], default_value: T) -> T { - unhashed::take_or(&twox_128(key), default_value) + unhashed::take_or(&twox_128(key), default_value) } /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or_else T>(key: &[u8], default_value: F) -> T { - unhashed::take_or_else(&twox_128(key), default_value) + unhashed::take_or_else(&twox_128(key), default_value) } /// Check to see if `key` has an explicit entry in storage. pub fn exists(key: &[u8]) -> bool { - unhashed::exists(&twox_128(key)) + unhashed::exists(&twox_128(key)) } /// Ensure `key` has no explicit entry in storage. pub fn kill(key: &[u8]) { - unhashed::kill(&twox_128(key)) + unhashed::kill(&twox_128(key)) } /// Get a Vec of bytes from storage. pub fn get_raw(key: &[u8]) -> Option> { - unhashed::get_raw(&twox_128(key)) + unhashed::get_raw(&twox_128(key)) } /// Put a raw byte slice into storage. pub fn put_raw(key: &[u8], value: &[u8]) { - unhashed::put_raw(&twox_128(key), value) + unhashed::put_raw(&twox_128(key), value) } /// The underlying runtime storage. pub struct RuntimeStorage; impl crate::GenericStorage for RuntimeStorage { - fn exists(&self, key: &[u8]) -> bool { - exists(key) - } + fn exists(&self, key: &[u8]) -> bool { + exists(key) + } - /// Load the bytes of a key from storage. Can panic if the type is incorrect. - fn get(&self, key: &[u8]) -> Option { - get(key) - } + /// Load the bytes of a key from storage. Can panic if the type is incorrect. + fn get(&self, key: &[u8]) -> Option { + get(key) + } - /// Put a value in under a key. - fn put(&self, key: &[u8], val: &T) { - put(key, val) - } + /// Put a value in under a key. + fn put(&self, key: &[u8], val: &T) { + put(key, val) + } - /// Remove the bytes of a key from storage. - fn kill(&self, key: &[u8]) { - kill(key) - } + /// Remove the bytes of a key from storage. + fn kill(&self, key: &[u8]) { + kill(key) + } - /// Take a value from storage, deleting it after reading. - fn take(&self, key: &[u8]) -> Option { - take(key) - } + /// Take a value from storage, deleting it after reading. + fn take(&self, key: &[u8]) -> Option { + take(key) + } } impl crate::GenericUnhashedStorage for RuntimeStorage { - fn exists(&self, key: &[u8]) -> bool { - unhashed::exists(key) - } + fn exists(&self, key: &[u8]) -> bool { + unhashed::exists(key) + } - /// Load the bytes of a key from storage. Can panic if the type is incorrect. - fn get(&self, key: &[u8]) -> Option { - unhashed::get(key) - } + /// Load the bytes of a key from storage. Can panic if the type is incorrect. + fn get(&self, key: &[u8]) -> Option { + unhashed::get(key) + } - /// Put a value in under a key. - fn put(&self, key: &[u8], val: &T) { - unhashed::put(key, val) - } + /// Put a value in under a key. + fn put(&self, key: &[u8], val: &T) { + unhashed::put(key, val) + } - /// Remove the bytes of a key from storage. - fn kill(&self, key: &[u8]) { - unhashed::kill(key) - } + /// Remove the bytes of a key from storage. + fn kill(&self, key: &[u8]) { + unhashed::kill(key) + } - /// Remove the bytes of a key from storage. - fn kill_prefix(&self, prefix: &[u8]) { - unhashed::kill_prefix(prefix) - } + /// Remove the bytes of a key from storage. + fn kill_prefix(&self, prefix: &[u8]) { + unhashed::kill_prefix(prefix) + } - /// Take a value from storage, deleting it after reading. - fn take(&self, key: &[u8]) -> Option { - unhashed::take(key) - } + /// Take a value from storage, deleting it after reading. + fn take(&self, key: &[u8]) -> Option { + unhashed::take(key) + } } /// A trait for working with macro-generated storage values under the substrate storage API. pub trait StorageValue { - /// The type that get/take return. - type Query; + /// The type that get/take return. + type Query; - /// Get the storage key. - fn key() -> &'static [u8]; + /// Get the storage key. + fn key() -> &'static [u8]; - /// Does the value (explicitly) exist in storage? - fn exists() -> bool; + /// Does the value (explicitly) exist in storage? + fn exists() -> bool; - /// Load the value from the provided storage instance. - fn get() -> Self::Query; + /// Load the value from the provided storage instance. + fn get() -> Self::Query; - /// Store a value under this key into the provided storage instance. - fn put>(val: Arg); + /// Store a value under this key into the provided storage instance. + fn put>(val: Arg); - /// Mutate the value - fn mutate R>(f: F) -> R; + /// Mutate the value + fn mutate R>(f: F) -> R; - /// Clear the storage value. - fn kill(); + /// Clear the storage value. + fn kill(); - /// Take a value from storage, removing it afterwards. - fn take() -> Self::Query; + /// Take a value from storage, removing it afterwards. + fn take() -> Self::Query; } -impl StorageValue for U where U: generator::StorageValue { - type Query = U::Query; - - fn key() -> &'static [u8] { - >::key() - } - fn exists() -> bool { - U::exists(&RuntimeStorage) - } - fn get() -> Self::Query { - U::get(&RuntimeStorage) - } - fn put>(val: Arg) { - U::put(val.borrow(), &RuntimeStorage) - } - fn mutate R>(f: F) -> R { - U::mutate(f, &RuntimeStorage) - } - fn kill() { - U::kill(&RuntimeStorage) - } - fn take() -> Self::Query { - U::take(&RuntimeStorage) - } +impl StorageValue for U +where + U: generator::StorageValue, +{ + type Query = U::Query; + + fn key() -> &'static [u8] { + >::key() + } + fn exists() -> bool { + U::exists(&RuntimeStorage) + } + fn get() -> Self::Query { + U::get(&RuntimeStorage) + } + fn put>(val: Arg) { + U::put(val.borrow(), &RuntimeStorage) + } + fn mutate R>(f: F) -> R { + U::mutate(f, &RuntimeStorage) + } + fn kill() { + U::kill(&RuntimeStorage) + } + fn take() -> Self::Query { + U::take(&RuntimeStorage) + } } /// A strongly-typed list in storage. pub trait StorageList { - /// Get the prefix key in storage. - fn prefix() -> &'static [u8]; + /// Get the prefix key in storage. + fn prefix() -> &'static [u8]; - /// Get the key used to store the length field. - fn len_key() -> Vec; + /// Get the key used to store the length field. + fn len_key() -> Vec; - /// Get the storage key used to fetch a value at a given index. - fn key_for(index: u32) -> Vec; + /// Get the storage key used to fetch a value at a given index. + fn key_for(index: u32) -> Vec; - /// Read out all the items. - fn items() -> Vec; + /// Read out all the items. + fn items() -> Vec; - /// Set the current set of items. - fn set_items(items: &[T]); + /// Set the current set of items. + fn set_items(items: &[T]); - /// Set the item at the given index. - fn set_item>(index: u32, val: Arg); + /// Set the item at the given index. + fn set_item>(index: u32, val: Arg); - /// Load the value at given index. Returns `None` if the index is out-of-bounds. - fn get(index: u32) -> Option; + /// Load the value at given index. Returns `None` if the index is out-of-bounds. + fn get(index: u32) -> Option; - /// Load the length of the list - fn len() -> u32; + /// Load the length of the list + fn len() -> u32; - /// Clear the list. - fn clear(); + /// Clear the list. + fn clear(); } -impl StorageList for U where U: generator::StorageList { - fn prefix() -> &'static [u8] { - >::prefix() - } +impl StorageList for U +where + U: generator::StorageList, +{ + fn prefix() -> &'static [u8] { + >::prefix() + } - fn len_key() -> Vec { - >::len_key() - } + fn len_key() -> Vec { + >::len_key() + } - fn key_for(index: u32) -> Vec { - >::key_for(index) - } + fn key_for(index: u32) -> Vec { + >::key_for(index) + } - fn items() -> Vec { - U::items(&RuntimeStorage) - } + fn items() -> Vec { + U::items(&RuntimeStorage) + } - fn set_items(items: &[T]) { - U::set_items(items, &RuntimeStorage) - } + fn set_items(items: &[T]) { + U::set_items(items, &RuntimeStorage) + } - fn set_item>(index: u32, val: Arg) { - U::set_item(index, val.borrow(), &RuntimeStorage) - } + fn set_item>(index: u32, val: Arg) { + U::set_item(index, val.borrow(), &RuntimeStorage) + } - fn get(index: u32) -> Option { - U::get(index, &RuntimeStorage) - } + fn get(index: u32) -> Option { + U::get(index, &RuntimeStorage) + } - fn len() -> u32 { - U::len(&RuntimeStorage) - } + fn len() -> u32 { + U::len(&RuntimeStorage) + } - fn clear() { - U::clear(&RuntimeStorage) - } + fn clear() { + U::clear(&RuntimeStorage) + } } /// A strongly-typed map in storage. pub trait StorageMap { - /// The type that get/take return. - type Query; + /// The type that get/take return. + type Query; - /// Get the prefix key in storage. - fn prefix() -> &'static [u8]; + /// Get the prefix key in storage. + fn prefix() -> &'static [u8]; - /// Get the storage key used to fetch a value corresponding to a specific key. - fn key_for>(key: KeyArg) -> Vec; + /// Get the storage key used to fetch a value corresponding to a specific key. + fn key_for>(key: KeyArg) -> Vec; - /// Does the value (explicitly) exist in storage? - fn exists>(key: KeyArg) -> bool; + /// Does the value (explicitly) exist in storage? + fn exists>(key: KeyArg) -> bool; - /// Load the value associated with the given key from the map. - fn get>(key: KeyArg) -> Self::Query; + /// Load the value associated with the given key from the map. + fn get>(key: KeyArg) -> Self::Query; - /// Store a value to be associated with the given key from the map. - fn insert, ValArg: Borrow>(key: KeyArg, val: ValArg); + /// Store a value to be associated with the given key from the map. + fn insert, ValArg: Borrow>(key: KeyArg, val: ValArg); - /// Remove the value under a key. - fn remove>(key: KeyArg); + /// Remove the value under a key. + fn remove>(key: KeyArg); - /// Mutate the value under a key. - fn mutate, R, F: FnOnce(&mut Self::Query) -> R>(key: KeyArg, f: F) -> R; + /// Mutate the value under a key. + fn mutate, R, F: FnOnce(&mut Self::Query) -> R>(key: KeyArg, f: F) -> R; - /// Take the value under a key. - fn take>(key: KeyArg) -> Self::Query; + /// Take the value under a key. + fn take>(key: KeyArg) -> Self::Query; } -impl StorageMap for U where U: generator::StorageMap { - type Query = U::Query; +impl StorageMap for U +where + U: generator::StorageMap, +{ + type Query = U::Query; - fn prefix() -> &'static [u8] { - >::prefix() - } + fn prefix() -> &'static [u8] { + >::prefix() + } - fn key_for>(key: KeyArg) -> Vec { - >::key_for(key.borrow()) - } + fn key_for>(key: KeyArg) -> Vec { + >::key_for(key.borrow()) + } - fn exists>(key: KeyArg) -> bool { - U::exists(key.borrow(), &RuntimeStorage) - } + fn exists>(key: KeyArg) -> bool { + U::exists(key.borrow(), &RuntimeStorage) + } - fn get>(key: KeyArg) -> Self::Query { - U::get(key.borrow(), &RuntimeStorage) - } + fn get>(key: KeyArg) -> Self::Query { + U::get(key.borrow(), &RuntimeStorage) + } - fn insert, ValArg: Borrow>(key: KeyArg, val: ValArg) { - U::insert(key.borrow(), val.borrow(), &RuntimeStorage) - } + fn insert, ValArg: Borrow>(key: KeyArg, val: ValArg) { + U::insert(key.borrow(), val.borrow(), &RuntimeStorage) + } - fn remove>(key: KeyArg) { - U::remove(key.borrow(), &RuntimeStorage) - } + fn remove>(key: KeyArg) { + U::remove(key.borrow(), &RuntimeStorage) + } - fn mutate, R, F: FnOnce(&mut Self::Query) -> R>(key: KeyArg, f: F) -> R { - U::mutate(key.borrow(), f, &RuntimeStorage) - } + fn mutate, R, F: FnOnce(&mut Self::Query) -> R>(key: KeyArg, f: F) -> R { + U::mutate(key.borrow(), f, &RuntimeStorage) + } - fn take>(key: KeyArg) -> Self::Query { - U::take(key.borrow(), &RuntimeStorage) - } + fn take>(key: KeyArg) -> Self::Query { + U::take(key.borrow(), &RuntimeStorage) + } } /// A storage map that can be enumerated. @@ -378,21 +387,31 @@ impl StorageMap for U where U: generator::StorageMa /// Note that type is primarily useful for off-chain computations. /// Runtime implementors should avoid enumerating storage entries. pub trait EnumerableStorageMap: StorageMap { - /// Return current head element. - fn head() -> Option; + /// Return current head element. + fn head() -> Option; - /// Enumerate all elements in the map. - fn enumerate() -> Box> where K: 'static, V: 'static; + /// Enumerate all elements in the map. + fn enumerate() -> Box> + where + K: 'static, + V: 'static; } -impl EnumerableStorageMap for U where U: generator::EnumerableStorageMap { - fn head() -> Option { - >::head(&RuntimeStorage) - } +impl EnumerableStorageMap for U +where + U: generator::EnumerableStorageMap, +{ + fn head() -> Option { + >::head(&RuntimeStorage) + } - fn enumerate() -> Box> where K: 'static, V: 'static { - >::enumerate(&RuntimeStorage) - } + fn enumerate() -> Box> + where + K: 'static, + V: 'static, + { + >::enumerate(&RuntimeStorage) + } } /// An implementation of a map with a two keys. @@ -408,317 +427,334 @@ impl EnumerableStorageMap for U where U: generator: /// /// /!\ be careful while choosing the Hash, indeed malicious could craft second keys to lower the trie. pub trait StorageDoubleMap { - /// The type that get/take returns. - type Query; + /// The type that get/take returns. + type Query; - /// Get the prefix key in storage. - fn prefix() -> &'static [u8]; + /// Get the prefix key in storage. + fn prefix() -> &'static [u8]; - /// Get the storage key used to fetch a value corresponding to a specific key. - fn key_for, KArg2: Borrow>(k1: KArg1, k2: KArg2) -> Vec; + /// Get the storage key used to fetch a value corresponding to a specific key. + fn key_for, KArg2: Borrow>(k1: KArg1, k2: KArg2) -> Vec; - /// Get the storage prefix used to fetch keys corresponding to a specific key1. - fn prefix_for>(k1: KArg1) -> Vec; + /// Get the storage prefix used to fetch keys corresponding to a specific key1. + fn prefix_for>(k1: KArg1) -> Vec; - /// true if the value is defined in storage. - fn exists, KArg2: Borrow>(k1: KArg1, k2: KArg2) -> bool; + /// true if the value is defined in storage. + fn exists, KArg2: Borrow>(k1: KArg1, k2: KArg2) -> bool; - /// Load the value associated with the given key from the map. - fn get, KArg2: Borrow>(k1: KArg1, k2: KArg2) -> Self::Query; + /// Load the value associated with the given key from the map. + fn get, KArg2: Borrow>(k1: KArg1, k2: KArg2) -> Self::Query; - /// Take the value under a key. - fn take, KArg2: Borrow>(k1: KArg1, k2: KArg2) -> Self::Query; + /// Take the value under a key. + fn take, KArg2: Borrow>(k1: KArg1, k2: KArg2) -> Self::Query; - /// Store a value to be associated with the given key from the map. - fn insert, KArg2: Borrow, VArg: Borrow>(k1: KArg1, k2: KArg2, val: VArg); + /// Store a value to be associated with the given key from the map. + fn insert, KArg2: Borrow, VArg: Borrow>( + k1: KArg1, + k2: KArg2, + val: VArg, + ); - /// Remove the value under a key. - fn remove, KArg2: Borrow>(k1: KArg1, k2: KArg2); + /// Remove the value under a key. + fn remove, KArg2: Borrow>(k1: KArg1, k2: KArg2); - /// Removes all entries that shares the `k1` as the first key. - fn remove_prefix>(k1: KArg1); + /// Removes all entries that shares the `k1` as the first key. + fn remove_prefix>(k1: KArg1); - /// Mutate the value under a key. - fn mutate(k1: KArg1, k2: KArg2, f: F) -> R - where - KArg1: Borrow, - KArg2: Borrow, - F: FnOnce(&mut Self::Query) -> R; + /// Mutate the value under a key. + fn mutate(k1: KArg1, k2: KArg2, f: F) -> R + where + KArg1: Borrow, + KArg2: Borrow, + F: FnOnce(&mut Self::Query) -> R; } impl StorageDoubleMap for U where - U: unhashed::generator::StorageDoubleMap + U: unhashed::generator::StorageDoubleMap, { - type Query = U::Query; - - fn prefix() -> &'static [u8] { - >::prefix() - } - - fn key_for, KArg2: Borrow>(k1: KArg1, k2: KArg2) -> Vec { - >::key_for(k1.borrow(), k2.borrow()) - } - - fn prefix_for>(k1: KArg1) -> Vec { - >::prefix_for(k1.borrow()) - } - - fn exists, KArg2: Borrow>(k1: KArg1, k2: KArg2) -> bool { - U::exists(k1.borrow(), k2.borrow(), &RuntimeStorage) - } - - fn get, KArg2: Borrow>(k1: KArg1, k2: KArg2) -> Self::Query { - U::get(k1.borrow(), k2.borrow(), &RuntimeStorage) - } - - fn take, KArg2: Borrow>(k1: KArg1, k2: KArg2) -> Self::Query { - U::take(k1.borrow(), k2.borrow(), &RuntimeStorage) - } - - fn insert, KArg2: Borrow, VArg: Borrow>(k1: KArg1, k2: KArg2, val: VArg) { - U::insert(k1.borrow(), k2.borrow(), val.borrow(), &RuntimeStorage) - } - - fn remove, KArg2: Borrow>(k1: KArg1, k2: KArg2) { - U::remove(k1.borrow(), k2.borrow(), &RuntimeStorage) - } - - fn remove_prefix>(k1: KArg1) { - U::remove_prefix(k1.borrow(), &RuntimeStorage) - } - - fn mutate(k1: KArg1, k2: KArg2, f: F) -> R - where - KArg1: Borrow, - KArg2: Borrow, - F: FnOnce(&mut Self::Query) -> R - { - U::mutate(k1.borrow(), k2.borrow(), f, &RuntimeStorage) - } + type Query = U::Query; + + fn prefix() -> &'static [u8] { + >::prefix() + } + + fn key_for, KArg2: Borrow>(k1: KArg1, k2: KArg2) -> Vec { + >::key_for(k1.borrow(), k2.borrow()) + } + + fn prefix_for>(k1: KArg1) -> Vec { + >::prefix_for(k1.borrow()) + } + + fn exists, KArg2: Borrow>(k1: KArg1, k2: KArg2) -> bool { + U::exists(k1.borrow(), k2.borrow(), &RuntimeStorage) + } + + fn get, KArg2: Borrow>(k1: KArg1, k2: KArg2) -> Self::Query { + U::get(k1.borrow(), k2.borrow(), &RuntimeStorage) + } + + fn take, KArg2: Borrow>(k1: KArg1, k2: KArg2) -> Self::Query { + U::take(k1.borrow(), k2.borrow(), &RuntimeStorage) + } + + fn insert, KArg2: Borrow, VArg: Borrow>( + k1: KArg1, + k2: KArg2, + val: VArg, + ) { + U::insert(k1.borrow(), k2.borrow(), val.borrow(), &RuntimeStorage) + } + + fn remove, KArg2: Borrow>(k1: KArg1, k2: KArg2) { + U::remove(k1.borrow(), k2.borrow(), &RuntimeStorage) + } + + fn remove_prefix>(k1: KArg1) { + U::remove_prefix(k1.borrow(), &RuntimeStorage) + } + + fn mutate(k1: KArg1, k2: KArg2, f: F) -> R + where + KArg1: Borrow, + KArg2: Borrow, + F: FnOnce(&mut Self::Query) -> R, + { + U::mutate(k1.borrow(), k2.borrow(), f, &RuntimeStorage) + } } /// A trait to conveniently store a vector of storable data. pub trait StorageVec { - type Item: Default + Sized + Codec; - const PREFIX: &'static [u8]; - - /// Get the current set of items. - fn items() -> Vec { - (0..Self::count()).into_iter().map(Self::item).collect() - } - - /// Set the current set of items. - fn set_items(items: I) - where - I: IntoIterator, - T: Borrow, - { - let mut count: u32 = 0; - - for i in items.into_iter() { - put(&count.to_keyed_vec(Self::PREFIX), i.borrow()); - count = count.checked_add(1).expect("exceeded runtime storage capacity"); - } - - Self::set_count(count); - } - - /// Push an item. - fn push(item: &Self::Item) { - let len = Self::count(); - put(&len.to_keyed_vec(Self::PREFIX), item); - Self::set_count(len + 1); - } - - fn set_item(index: u32, item: &Self::Item) { - if index < Self::count() { - put(&index.to_keyed_vec(Self::PREFIX), item); - } - } - - fn clear_item(index: u32) { - if index < Self::count() { - kill(&index.to_keyed_vec(Self::PREFIX)); - } - } - - fn item(index: u32) -> Self::Item { - get_or_default(&index.to_keyed_vec(Self::PREFIX)) - } - - fn set_count(count: u32) { - (count..Self::count()).for_each(Self::clear_item); - put(&b"len".to_keyed_vec(Self::PREFIX), &count); - } - - fn count() -> u32 { - get_or_default(&b"len".to_keyed_vec(Self::PREFIX)) - } + type Item: Default + Sized + Codec; + const PREFIX: &'static [u8]; + + /// Get the current set of items. + fn items() -> Vec { + (0..Self::count()).into_iter().map(Self::item).collect() + } + + /// Set the current set of items. + fn set_items(items: I) + where + I: IntoIterator, + T: Borrow, + { + let mut count: u32 = 0; + + for i in items.into_iter() { + put(&count.to_keyed_vec(Self::PREFIX), i.borrow()); + count = count + .checked_add(1) + .expect("exceeded runtime storage capacity"); + } + + Self::set_count(count); + } + + /// Push an item. + fn push(item: &Self::Item) { + let len = Self::count(); + put(&len.to_keyed_vec(Self::PREFIX), item); + Self::set_count(len + 1); + } + + fn set_item(index: u32, item: &Self::Item) { + if index < Self::count() { + put(&index.to_keyed_vec(Self::PREFIX), item); + } + } + + fn clear_item(index: u32) { + if index < Self::count() { + kill(&index.to_keyed_vec(Self::PREFIX)); + } + } + + fn item(index: u32) -> Self::Item { + get_or_default(&index.to_keyed_vec(Self::PREFIX)) + } + + fn set_count(count: u32) { + (count..Self::count()).for_each(Self::clear_item); + put(&b"len".to_keyed_vec(Self::PREFIX), &count); + } + + fn count() -> u32 { + get_or_default(&b"len".to_keyed_vec(Self::PREFIX)) + } } /// child storage NOTE could replace unhashed by having only one kind of storage (root being null storage /// key (storage_key can become Option<&[u8]>). /// This module is a currently only a variant of unhashed with additional `storage_key`. -/// Note that `storage_key` must be unique and strong (strong in the sense of being long enough to +/// Note that `storage_key` must be unique and strong (strong in the sense of being long enough to /// avoid collision from a resistant hash function (which unique implies)). pub mod child { - use super::{runtime_io, Codec, Decode, Vec, IncrementalChildInput}; - - /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. - pub fn get(storage_key: &[u8], key: &[u8]) -> Option { - runtime_io::read_child_storage(storage_key, key, &mut [0; 0][..], 0).map(|_| { - let mut input = IncrementalChildInput { - storage_key, - key, - pos: 0, - }; - Decode::decode(&mut input).expect("storage is not null, therefore must be a valid type") - }) - } - - /// Return the value of the item in storage under `key`, or the type's default if there is no - /// explicit entry. - pub fn get_or_default(storage_key: &[u8], key: &[u8]) -> T { - get(storage_key, key).unwrap_or_else(Default::default) - } - - /// Return the value of the item in storage under `key`, or `default_value` if there is no - /// explicit entry. - pub fn get_or(storage_key: &[u8], key: &[u8], default_value: T) -> T { - get(storage_key, key).unwrap_or(default_value) - } - - /// Return the value of the item in storage under `key`, or `default_value()` if there is no - /// explicit entry. - pub fn get_or_else T>(storage_key: &[u8], key: &[u8], default_value: F) -> T { - get(storage_key, key).unwrap_or_else(default_value) - } - - /// Put `value` in storage under `key`. - pub fn put(storage_key: &[u8], key: &[u8], value: &T) { - value.using_encoded(|slice| runtime_io::set_child_storage(storage_key, key, slice)); - } - - /// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise. - pub fn take(storage_key: &[u8], key: &[u8]) -> Option { - let r = get(storage_key, key); - if r.is_some() { - kill(storage_key, key); - } - r - } - - /// Remove `key` from storage, returning its value, or, if there was no explicit entry in storage, - /// the default for its type. - pub fn take_or_default(storage_key: &[u8], key: &[u8]) -> T { - take(storage_key, key).unwrap_or_else(Default::default) - } - - /// Return the value of the item in storage under `key`, or `default_value` if there is no - /// explicit entry. Ensure there is no explicit entry on return. - pub fn take_or(storage_key: &[u8],key: &[u8], default_value: T) -> T { - take(storage_key, key).unwrap_or(default_value) - } - - /// Return the value of the item in storage under `key`, or `default_value()` if there is no - /// explicit entry. Ensure there is no explicit entry on return. - pub fn take_or_else T>(storage_key: &[u8], key: &[u8], default_value: F) -> T { - take(storage_key, key).unwrap_or_else(default_value) - } - - /// Check to see if `key` has an explicit entry in storage. - pub fn exists(storage_key: &[u8], key: &[u8]) -> bool { - runtime_io::read_child_storage(storage_key, key, &mut [0;0][..], 0).is_some() - } - - /// Remove all `storage_key` key/values - pub fn kill_storage(storage_key: &[u8]) { - runtime_io::kill_child_storage(storage_key) - } - - /// Ensure `key` has no explicit entry in storage. - pub fn kill(storage_key: &[u8], key: &[u8]) { - runtime_io::clear_child_storage(storage_key, key); - } - - /// Get a Vec of bytes from storage. - pub fn get_raw(storage_key: &[u8], key: &[u8]) -> Option> { - runtime_io::child_storage(storage_key, key) - } - - /// Put a raw byte slice into storage. - pub fn put_raw(storage_key: &[u8], key: &[u8], value: &[u8]) { - runtime_io::set_child_storage(storage_key, key, value) - } - - pub use super::unhashed::StorageVec; + use super::{runtime_io, Codec, Decode, IncrementalChildInput, Vec}; + + /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. + pub fn get(storage_key: &[u8], key: &[u8]) -> Option { + runtime_io::read_child_storage(storage_key, key, &mut [0; 0][..], 0).map(|_| { + let mut input = IncrementalChildInput { + storage_key, + key, + pos: 0, + }; + Decode::decode(&mut input).expect("storage is not null, therefore must be a valid type") + }) + } + + /// Return the value of the item in storage under `key`, or the type's default if there is no + /// explicit entry. + pub fn get_or_default(storage_key: &[u8], key: &[u8]) -> T { + get(storage_key, key).unwrap_or_else(Default::default) + } + + /// Return the value of the item in storage under `key`, or `default_value` if there is no + /// explicit entry. + pub fn get_or(storage_key: &[u8], key: &[u8], default_value: T) -> T { + get(storage_key, key).unwrap_or(default_value) + } + + /// Return the value of the item in storage under `key`, or `default_value()` if there is no + /// explicit entry. + pub fn get_or_else T>( + storage_key: &[u8], + key: &[u8], + default_value: F, + ) -> T { + get(storage_key, key).unwrap_or_else(default_value) + } + + /// Put `value` in storage under `key`. + pub fn put(storage_key: &[u8], key: &[u8], value: &T) { + value.using_encoded(|slice| runtime_io::set_child_storage(storage_key, key, slice)); + } + + /// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise. + pub fn take(storage_key: &[u8], key: &[u8]) -> Option { + let r = get(storage_key, key); + if r.is_some() { + kill(storage_key, key); + } + r + } + + /// Remove `key` from storage, returning its value, or, if there was no explicit entry in storage, + /// the default for its type. + pub fn take_or_default(storage_key: &[u8], key: &[u8]) -> T { + take(storage_key, key).unwrap_or_else(Default::default) + } + + /// Return the value of the item in storage under `key`, or `default_value` if there is no + /// explicit entry. Ensure there is no explicit entry on return. + pub fn take_or(storage_key: &[u8], key: &[u8], default_value: T) -> T { + take(storage_key, key).unwrap_or(default_value) + } + + /// Return the value of the item in storage under `key`, or `default_value()` if there is no + /// explicit entry. Ensure there is no explicit entry on return. + pub fn take_or_else T>( + storage_key: &[u8], + key: &[u8], + default_value: F, + ) -> T { + take(storage_key, key).unwrap_or_else(default_value) + } + + /// Check to see if `key` has an explicit entry in storage. + pub fn exists(storage_key: &[u8], key: &[u8]) -> bool { + runtime_io::read_child_storage(storage_key, key, &mut [0; 0][..], 0).is_some() + } + + /// Remove all `storage_key` key/values + pub fn kill_storage(storage_key: &[u8]) { + runtime_io::kill_child_storage(storage_key) + } + + /// Ensure `key` has no explicit entry in storage. + pub fn kill(storage_key: &[u8], key: &[u8]) { + runtime_io::clear_child_storage(storage_key, key); + } + + /// Get a Vec of bytes from storage. + pub fn get_raw(storage_key: &[u8], key: &[u8]) -> Option> { + runtime_io::child_storage(storage_key, key) + } + + /// Put a raw byte slice into storage. + pub fn put_raw(storage_key: &[u8], key: &[u8], value: &[u8]) { + runtime_io::set_child_storage(storage_key, key, value) + } + + pub use super::unhashed::StorageVec; } #[cfg(test)] mod tests { - use super::*; - use runtime_io::{twox_128, TestExternalities, with_externalities}; - - #[test] - fn integers_can_be_stored() { - let mut t = TestExternalities::default(); - with_externalities(&mut t, || { - let x = 69u32; - put(b":test", &x); - let y: u32 = get(b":test").unwrap(); - assert_eq!(x, y); - }); - with_externalities(&mut t, || { - let x = 69426942i64; - put(b":test", &x); - let y: i64 = get(b":test").unwrap(); - assert_eq!(x, y); - }); - } - - #[test] - fn bools_can_be_stored() { - let mut t = TestExternalities::default(); - with_externalities(&mut t, || { - let x = true; - put(b":test", &x); - let y: bool = get(b":test").unwrap(); - assert_eq!(x, y); - }); - - with_externalities(&mut t, || { - let x = false; - put(b":test", &x); - let y: bool = get(b":test").unwrap(); - assert_eq!(x, y); - }); - } - - #[test] - fn vecs_can_be_retrieved() { - let mut t = TestExternalities::default(); - with_externalities(&mut t, || { - runtime_io::set_storage(&twox_128(b":test"), b"\x2cHello world"); - let x = b"Hello world".to_vec(); - let y = get::>(b":test").unwrap(); - assert_eq!(x, y); - - }); - } - - #[test] - fn vecs_can_be_stored() { - let mut t = TestExternalities::default(); - let x = b"Hello world".to_vec(); - - with_externalities(&mut t, || { - put(b":test", &x); - }); - - with_externalities(&mut t, || { - let y: Vec = get(b":test").unwrap(); - assert_eq!(x, y); - }); - } + use super::*; + use runtime_io::{twox_128, with_externalities, TestExternalities}; + + #[test] + fn integers_can_be_stored() { + let mut t = TestExternalities::default(); + with_externalities(&mut t, || { + let x = 69u32; + put(b":test", &x); + let y: u32 = get(b":test").unwrap(); + assert_eq!(x, y); + }); + with_externalities(&mut t, || { + let x = 69426942i64; + put(b":test", &x); + let y: i64 = get(b":test").unwrap(); + assert_eq!(x, y); + }); + } + + #[test] + fn bools_can_be_stored() { + let mut t = TestExternalities::default(); + with_externalities(&mut t, || { + let x = true; + put(b":test", &x); + let y: bool = get(b":test").unwrap(); + assert_eq!(x, y); + }); + + with_externalities(&mut t, || { + let x = false; + put(b":test", &x); + let y: bool = get(b":test").unwrap(); + assert_eq!(x, y); + }); + } + + #[test] + fn vecs_can_be_retrieved() { + let mut t = TestExternalities::default(); + with_externalities(&mut t, || { + runtime_io::set_storage(&twox_128(b":test"), b"\x2cHello world"); + let x = b"Hello world".to_vec(); + let y = get::>(b":test").unwrap(); + assert_eq!(x, y); + }); + } + + #[test] + fn vecs_can_be_stored() { + let mut t = TestExternalities::default(); + let x = b"Hello world".to_vec(); + + with_externalities(&mut t, || { + put(b":test", &x); + }); + + with_externalities(&mut t, || { + let y: Vec = get(b":test").unwrap(); + assert_eq!(x, y); + }); + } } diff --git a/srml/support/src/storage/unhashed/generator.rs b/srml/support/src/storage/unhashed/generator.rs index 2b046013bb..c0c84a7f3d 100644 --- a/srml/support/src/storage/unhashed/generator.rs +++ b/srml/support/src/storage/unhashed/generator.rs @@ -15,73 +15,89 @@ // along with Substrate. If not, see . use crate::codec; -use runtime_io::twox_128; use crate::rstd::vec::Vec; +use runtime_io::twox_128; /// Abstraction around storage with unhashed access. pub trait UnhashedStorage { - /// true if the key exists in storage. - fn exists(&self, key: &[u8]) -> bool; - - /// Load the bytes of a key from storage. Can panic if the type is incorrect. - fn get(&self, key: &[u8]) -> Option; - - /// Load the bytes of a key from storage. Can panic if the type is incorrect. Will panic if - /// it's not there. - fn require(&self, key: &[u8]) -> T { self.get(key).expect("Required values must be in storage") } - - /// Load the bytes of a key from storage. Can panic if the type is incorrect. The type's - /// default is returned if it's not there. - fn get_or_default(&self, key: &[u8]) -> T { self.get(key).unwrap_or_default() } - - /// Put a value in under a key. - fn put(&self, key: &[u8], val: &T); - - /// Remove the bytes of a key from storage. - fn kill(&self, key: &[u8]); - - /// Remove the bytes of a key from storage. - fn kill_prefix(&self, prefix: &[u8]); - - /// Take a value from storage, deleting it after reading. - fn take(&self, key: &[u8]) -> Option { - let value = self.get(key); - self.kill(key); - value - } - - /// Take a value from storage, deleting it after reading. - fn take_or_panic(&self, key: &[u8]) -> T { self.take(key).expect("Required values must be in storage") } - - /// Take a value from storage, deleting it after reading. - fn take_or_default(&self, key: &[u8]) -> T { self.take(key).unwrap_or_default() } + /// true if the key exists in storage. + fn exists(&self, key: &[u8]) -> bool; + + /// Load the bytes of a key from storage. Can panic if the type is incorrect. + fn get(&self, key: &[u8]) -> Option; + + /// Load the bytes of a key from storage. Can panic if the type is incorrect. Will panic if + /// it's not there. + fn require(&self, key: &[u8]) -> T { + self.get(key).expect("Required values must be in storage") + } + + /// Load the bytes of a key from storage. Can panic if the type is incorrect. The type's + /// default is returned if it's not there. + fn get_or_default(&self, key: &[u8]) -> T { + self.get(key).unwrap_or_default() + } + + /// Put a value in under a key. + fn put(&self, key: &[u8], val: &T); + + /// Remove the bytes of a key from storage. + fn kill(&self, key: &[u8]); + + /// Remove the bytes of a key from storage. + fn kill_prefix(&self, prefix: &[u8]); + + /// Take a value from storage, deleting it after reading. + fn take(&self, key: &[u8]) -> Option { + let value = self.get(key); + self.kill(key); + value + } + + /// Take a value from storage, deleting it after reading. + fn take_or_panic(&self, key: &[u8]) -> T { + self.take(key).expect("Required values must be in storage") + } + + /// Take a value from storage, deleting it after reading. + fn take_or_default(&self, key: &[u8]) -> T { + self.take(key).unwrap_or_default() + } } // We use a construct like this during when genesis storage is being built. #[cfg(feature = "std")] -impl UnhashedStorage for (crate::rstd::cell::RefCell<&mut sr_primitives::StorageOverlay>, H) { - fn exists(&self, key: &[u8]) -> bool { - self.0.borrow().contains_key(key) - } - - fn get(&self, key: &[u8]) -> Option { - self.0.borrow().get(key) - .map(|x| codec::Decode::decode(&mut x.as_slice()).expect("Unable to decode expected type.")) - } - - fn put(&self, key: &[u8], val: &T) { - self.0.borrow_mut().insert(key.to_vec(), codec::Encode::encode(val)); - } - - fn kill(&self, key: &[u8]) { - self.0.borrow_mut().remove(key); - } - - fn kill_prefix(&self, prefix: &[u8]) { - self.0.borrow_mut().retain(|key, _| { - !key.starts_with(prefix) - }) - } +impl UnhashedStorage + for ( + crate::rstd::cell::RefCell<&mut sr_primitives::StorageOverlay>, + H, + ) +{ + fn exists(&self, key: &[u8]) -> bool { + self.0.borrow().contains_key(key) + } + + fn get(&self, key: &[u8]) -> Option { + self.0.borrow().get(key).map(|x| { + codec::Decode::decode(&mut x.as_slice()).expect("Unable to decode expected type.") + }) + } + + fn put(&self, key: &[u8], val: &T) { + self.0 + .borrow_mut() + .insert(key.to_vec(), codec::Encode::encode(val)); + } + + fn kill(&self, key: &[u8]) { + self.0.borrow_mut().remove(key); + } + + fn kill_prefix(&self, prefix: &[u8]) { + self.0 + .borrow_mut() + .retain(|key, _| !key.starts_with(prefix)) + } } /// An implementation of a map with a two keys. @@ -97,48 +113,53 @@ impl UnhashedStorage for (crate::rstd::cell::RefCell<&mut sr_primitives::Stor /// /// /!\ be careful while choosing the Hash, indeed malicious could craft second keys to lower the trie. pub trait StorageDoubleMap { - /// The type that get/take returns. - type Query; - - /// Get the prefix key in storage. - fn prefix() -> &'static [u8]; - - /// Get the storage key used to fetch a value corresponding to a specific key. - fn key_for(k1: &K1, k2: &K2) -> Vec; - - /// Get the storage prefix used to fetch keys corresponding to a specific key1. - fn prefix_for(k1: &K1) -> Vec { - let mut key = Self::prefix().to_vec(); - codec::Encode::encode_to(k1, &mut key); - twox_128(&key).to_vec() - } - - /// true if the value is defined in storage. - fn exists(k1: &K1, k2: &K2, storage: &S) -> bool { - storage.exists(&Self::key_for(k1, k2)) - } - - /// Load the value associated with the given key from the map. - fn get(k1: &K1, k2: &K2, storage: &S) -> Self::Query; - - /// Take the value under a key. - fn take(k1: &K1, k2: &K2, storage: &S) -> Self::Query; - - /// Store a value to be associated with the given key from the map. - fn insert(k1: &K1, k2: &K2, val: &V, storage: &S) { - storage.put(&Self::key_for(k1, k2), val); - } - - /// Remove the value under a key. - fn remove(k1: &K1, k2: &K2, storage: &S) { - storage.kill(&Self::key_for(k1, k2)); - } - - /// Removes all entries that shares the `k1` as the first key. - fn remove_prefix(k1: &K1, storage: &S) { - storage.kill_prefix(&Self::prefix_for(k1)); - } - - /// Mutate the value under a key. - fn mutate R, S: UnhashedStorage>(k1: &K1, k2: &K2, f: F, storage: &S) -> R; + /// The type that get/take returns. + type Query; + + /// Get the prefix key in storage. + fn prefix() -> &'static [u8]; + + /// Get the storage key used to fetch a value corresponding to a specific key. + fn key_for(k1: &K1, k2: &K2) -> Vec; + + /// Get the storage prefix used to fetch keys corresponding to a specific key1. + fn prefix_for(k1: &K1) -> Vec { + let mut key = Self::prefix().to_vec(); + codec::Encode::encode_to(k1, &mut key); + twox_128(&key).to_vec() + } + + /// true if the value is defined in storage. + fn exists(k1: &K1, k2: &K2, storage: &S) -> bool { + storage.exists(&Self::key_for(k1, k2)) + } + + /// Load the value associated with the given key from the map. + fn get(k1: &K1, k2: &K2, storage: &S) -> Self::Query; + + /// Take the value under a key. + fn take(k1: &K1, k2: &K2, storage: &S) -> Self::Query; + + /// Store a value to be associated with the given key from the map. + fn insert(k1: &K1, k2: &K2, val: &V, storage: &S) { + storage.put(&Self::key_for(k1, k2), val); + } + + /// Remove the value under a key. + fn remove(k1: &K1, k2: &K2, storage: &S) { + storage.kill(&Self::key_for(k1, k2)); + } + + /// Removes all entries that shares the `k1` as the first key. + fn remove_prefix(k1: &K1, storage: &S) { + storage.kill_prefix(&Self::prefix_for(k1)); + } + + /// Mutate the value under a key. + fn mutate R, S: UnhashedStorage>( + k1: &K1, + k2: &K2, + f: F, + storage: &S, + ) -> R; } diff --git a/srml/support/src/storage/unhashed/mod.rs b/srml/support/src/storage/unhashed/mod.rs index 225c6756b8..6be8bd7c46 100644 --- a/srml/support/src/storage/unhashed/mod.rs +++ b/srml/support/src/storage/unhashed/mod.rs @@ -16,145 +16,144 @@ //! Operation on unhashed runtime storage +use super::{runtime_io, Codec, Decode, Encode, IncrementalInput, KeyedVec, Vec}; use crate::rstd::borrow::Borrow; -use super::{runtime_io, Codec, Encode, Decode, KeyedVec, Vec, IncrementalInput}; pub mod generator; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get(key: &[u8]) -> Option { - runtime_io::read_storage(key, &mut [0; 0][..], 0).map(|_| { - let mut input = IncrementalInput { - key, - pos: 0, - }; - Decode::decode(&mut input).expect("storage is not null, therefore must be a valid type") - }) + runtime_io::read_storage(key, &mut [0; 0][..], 0).map(|_| { + let mut input = IncrementalInput { key, pos: 0 }; + Decode::decode(&mut input).expect("storage is not null, therefore must be a valid type") + }) } /// Return the value of the item in storage under `key`, or the type's default if there is no /// explicit entry. pub fn get_or_default(key: &[u8]) -> T { - get(key).unwrap_or_else(Default::default) + get(key).unwrap_or_else(Default::default) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. pub fn get_or(key: &[u8], default_value: T) -> T { - get(key).unwrap_or(default_value) + get(key).unwrap_or(default_value) } /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. pub fn get_or_else T>(key: &[u8], default_value: F) -> T { - get(key).unwrap_or_else(default_value) + get(key).unwrap_or_else(default_value) } /// Put `value` in storage under `key`. pub fn put(key: &[u8], value: &T) { - value.using_encoded(|slice| runtime_io::set_storage(key, slice)); + value.using_encoded(|slice| runtime_io::set_storage(key, slice)); } /// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise. pub fn take(key: &[u8]) -> Option { - let r = get(key); - if r.is_some() { - kill(key); - } - r + let r = get(key); + if r.is_some() { + kill(key); + } + r } /// Remove `key` from storage, returning its value, or, if there was no explicit entry in storage, /// the default for its type. pub fn take_or_default(key: &[u8]) -> T { - take(key).unwrap_or_else(Default::default) + take(key).unwrap_or_else(Default::default) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or(key: &[u8], default_value: T) -> T { - take(key).unwrap_or(default_value) + take(key).unwrap_or(default_value) } /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or_else T>(key: &[u8], default_value: F) -> T { - take(key).unwrap_or_else(default_value) + take(key).unwrap_or_else(default_value) } /// Check to see if `key` has an explicit entry in storage. pub fn exists(key: &[u8]) -> bool { - runtime_io::read_storage(key, &mut [0;0][..], 0).is_some() + runtime_io::read_storage(key, &mut [0; 0][..], 0).is_some() } /// Ensure `key` has no explicit entry in storage. pub fn kill(key: &[u8]) { - runtime_io::clear_storage(key); + runtime_io::clear_storage(key); } /// Ensure keys with the given `prefix` have no entries in storage. pub fn kill_prefix(prefix: &[u8]) { - runtime_io::clear_prefix(prefix); + runtime_io::clear_prefix(prefix); } /// Get a Vec of bytes from storage. pub fn get_raw(key: &[u8]) -> Option> { - runtime_io::storage(key) + runtime_io::storage(key) } /// Put a raw byte slice into storage. pub fn put_raw(key: &[u8], value: &[u8]) { - runtime_io::set_storage(key, value) + runtime_io::set_storage(key, value) } /// A trait to conveniently store a vector of storable data. pub trait StorageVec { - type Item: Default + Sized + Codec; - const PREFIX: &'static [u8]; - - /// Get the current set of items. - fn items() -> Vec { - (0..Self::count()).into_iter().map(Self::item).collect() - } - - /// Set the current set of items. - fn set_items(items: I) - where - I: IntoIterator, - T: Borrow, - { - let mut count: u32 = 0; - - for i in items.into_iter() { - put(&count.to_keyed_vec(Self::PREFIX), i.borrow()); - count = count.checked_add(1).expect("exceeded runtime storage capacity"); - } - - Self::set_count(count); - } - - fn set_item(index: u32, item: &Self::Item) { - if index < Self::count() { - put(&index.to_keyed_vec(Self::PREFIX), item); - } - } - - fn clear_item(index: u32) { - if index < Self::count() { - kill(&index.to_keyed_vec(Self::PREFIX)); - } - } - - fn item(index: u32) -> Self::Item { - get_or_default(&index.to_keyed_vec(Self::PREFIX)) - } - - fn set_count(count: u32) { - (count..Self::count()).for_each(Self::clear_item); - put(&b"len".to_keyed_vec(Self::PREFIX), &count); - } - - fn count() -> u32 { - get_or_default(&b"len".to_keyed_vec(Self::PREFIX)) - } + type Item: Default + Sized + Codec; + const PREFIX: &'static [u8]; + + /// Get the current set of items. + fn items() -> Vec { + (0..Self::count()).into_iter().map(Self::item).collect() + } + + /// Set the current set of items. + fn set_items(items: I) + where + I: IntoIterator, + T: Borrow, + { + let mut count: u32 = 0; + + for i in items.into_iter() { + put(&count.to_keyed_vec(Self::PREFIX), i.borrow()); + count = count + .checked_add(1) + .expect("exceeded runtime storage capacity"); + } + + Self::set_count(count); + } + + fn set_item(index: u32, item: &Self::Item) { + if index < Self::count() { + put(&index.to_keyed_vec(Self::PREFIX), item); + } + } + + fn clear_item(index: u32) { + if index < Self::count() { + kill(&index.to_keyed_vec(Self::PREFIX)); + } + } + + fn item(index: u32) -> Self::Item { + get_or_default(&index.to_keyed_vec(Self::PREFIX)) + } + + fn set_count(count: u32) { + (count..Self::count()).for_each(Self::clear_item); + put(&b"len".to_keyed_vec(Self::PREFIX), &count); + } + + fn count() -> u32 { + get_or_default(&b"len".to_keyed_vec(Self::PREFIX)) + } } diff --git a/srml/support/src/traits.rs b/srml/support/src/traits.rs index 6f0435a77a..e682e1d18d 100644 --- a/srml/support/src/traits.rs +++ b/srml/support/src/traits.rs @@ -16,62 +16,60 @@ //! Traits for SRML +use crate::codec::{Codec, Decode, Encode}; use crate::rstd::result; -use crate::codec::{Codec, Encode, Decode}; -use crate::runtime_primitives::traits::{ - MaybeSerializeDebug, SimpleArithmetic, As -}; +use crate::runtime_primitives::traits::{As, MaybeSerializeDebug, SimpleArithmetic}; /// The account with the given id was killed. pub trait OnFreeBalanceZero { - /// The account was the given id was killed. - fn on_free_balance_zero(who: &AccountId); + /// The account was the given id was killed. + fn on_free_balance_zero(who: &AccountId); } impl OnFreeBalanceZero for () { - fn on_free_balance_zero(_who: &AccountId) {} + fn on_free_balance_zero(_who: &AccountId) {} } -impl< - AccountId, - X: OnFreeBalanceZero, - Y: OnFreeBalanceZero, -> OnFreeBalanceZero for (X, Y) { - fn on_free_balance_zero(who: &AccountId) { - X::on_free_balance_zero(who); - Y::on_free_balance_zero(who); - } +impl, Y: OnFreeBalanceZero> + OnFreeBalanceZero for (X, Y) +{ + fn on_free_balance_zero(who: &AccountId) { + X::on_free_balance_zero(who); + Y::on_free_balance_zero(who); + } } /// Trait for a hook to get called when some balance has been minted, causing dilution. pub trait OnDilution { - /// Some `portion` of the total balance just "grew" by `minted`. `portion` is the pre-growth - /// amount (it doesn't take account of the recent growth). - fn on_dilution(minted: Balance, portion: Balance); + /// Some `portion` of the total balance just "grew" by `minted`. `portion` is the pre-growth + /// amount (it doesn't take account of the recent growth). + fn on_dilution(minted: Balance, portion: Balance); } impl OnDilution for () { - fn on_dilution(_minted: Balance, _portion: Balance) {} + fn on_dilution(_minted: Balance, _portion: Balance) {} } /// Outcome of a balance update. pub enum UpdateBalanceOutcome { - /// Account balance was simply updated. - Updated, - /// The update led to killing the account. - AccountKilled, + /// Account balance was simply updated. + Updated, + /// The update led to killing the account. + AccountKilled, } /// Simple trait designed for hooking into a transaction payment. /// /// It operates over a single generic `AccountId` type. pub trait MakePayment { - /// Make transaction payment from `who` for an extrinsic of encoded length - /// `encoded_len` bytes. Return `Ok` iff the payment was successful. - fn make_payment(who: &AccountId, encoded_len: usize) -> Result<(), &'static str>; + /// Make transaction payment from `who` for an extrinsic of encoded length + /// `encoded_len` bytes. Return `Ok` iff the payment was successful. + fn make_payment(who: &AccountId, encoded_len: usize) -> Result<(), &'static str>; } impl MakePayment for () { - fn make_payment(_: &T, _: usize) -> Result<(), &'static str> { Ok(()) } + fn make_payment(_: &T, _: usize) -> Result<(), &'static str> { + Ok(()) + } } /// Handler for when some currency "account" decreased in balance for @@ -85,21 +83,23 @@ impl MakePayment for () { /// - Someone got slashed. /// - Someone paid for a transaction to be included. pub trait OnUnbalanced { - /// Handler for some imbalance. Infallible. - fn on_unbalanced(amount: Imbalance); + /// Handler for some imbalance. Infallible. + fn on_unbalanced(amount: Imbalance); } impl OnUnbalanced for () { - fn on_unbalanced(amount: Imbalance) { drop(amount); } + fn on_unbalanced(amount: Imbalance) { + drop(amount); + } } /// Simple boolean for whether an account needs to be kept in existence. #[derive(Copy, Clone, Eq, PartialEq)] pub enum ExistenceRequirement { - /// Operation must not result in the account going out of existence. - KeepAlive, - /// Operation may result in account going out of existence. - AllowDeath, + /// Operation must not result in the account going out of existence. + KeepAlive, + /// Operation may result in account going out of existence. + AllowDeath, } /// A trait for a not-quite Linear Type that tracks an imbalance. @@ -132,278 +132,281 @@ pub enum ExistenceRequirement { /// You can always retrieve the raw balance value using `peek`. #[must_use] pub trait Imbalance: Sized { - /// The oppositely imbalanced type. They come in pairs. - type Opposite: Imbalance; - - /// The zero imbalance. Can be destroyed with `drop_zero`. - fn zero() -> Self; - - /// Drop an instance cleanly. Only works if its `value()` is zero. - fn drop_zero(self) -> Result<(), Self>; - - /// Consume `self` and return two independent instances; the first - /// is guaranteed to be at most `amount` and the second will be the remainder. - fn split(self, amount: Balance) -> (Self, Self); - - /// Consume `self` and an `other` to return a new instance that combines - /// both. - fn merge(self, other: Self) -> Self; - - /// Consume `self` and maybe an `other` to return a new instance that combines - /// both. - fn maybe_merge(self, other: Option) -> Self { - if let Some(o) = other { - self.merge(o) - } else { - self - } - } - - /// Consume an `other` to mutate `self` into a new instance that combines - /// both. - fn subsume(&mut self, other: Self); - - /// Maybe consume an `other` to mutate `self` into a new instance that combines - /// both. - fn maybe_subsume(&mut self, other: Option) { - if let Some(o) = other { - self.subsume(o) - } - } - - /// Consume self and along with an opposite counterpart to return - /// a combined result. - /// - /// Returns `Ok` along with a new instance of `Self` if this instance has a - /// greater value than the `other`. Otherwise returns `Err` with an instance of - /// the `Opposite`. In both cases the value represents the combination of `self` - /// and `other`. - fn offset(self, other: Self::Opposite) -> Result; - - /// The raw value of self. - fn peek(&self) -> Balance; + /// The oppositely imbalanced type. They come in pairs. + type Opposite: Imbalance; + + /// The zero imbalance. Can be destroyed with `drop_zero`. + fn zero() -> Self; + + /// Drop an instance cleanly. Only works if its `value()` is zero. + fn drop_zero(self) -> Result<(), Self>; + + /// Consume `self` and return two independent instances; the first + /// is guaranteed to be at most `amount` and the second will be the remainder. + fn split(self, amount: Balance) -> (Self, Self); + + /// Consume `self` and an `other` to return a new instance that combines + /// both. + fn merge(self, other: Self) -> Self; + + /// Consume `self` and maybe an `other` to return a new instance that combines + /// both. + fn maybe_merge(self, other: Option) -> Self { + if let Some(o) = other { + self.merge(o) + } else { + self + } + } + + /// Consume an `other` to mutate `self` into a new instance that combines + /// both. + fn subsume(&mut self, other: Self); + + /// Maybe consume an `other` to mutate `self` into a new instance that combines + /// both. + fn maybe_subsume(&mut self, other: Option) { + if let Some(o) = other { + self.subsume(o) + } + } + + /// Consume self and along with an opposite counterpart to return + /// a combined result. + /// + /// Returns `Ok` along with a new instance of `Self` if this instance has a + /// greater value than the `other`. Otherwise returns `Err` with an instance of + /// the `Opposite`. In both cases the value represents the combination of `self` + /// and `other`. + fn offset(self, other: Self::Opposite) -> Result; + + /// The raw value of self. + fn peek(&self) -> Balance; } /// Either a positive or a negative imbalance. -pub enum SignedImbalance>{ - /// A positive imbalance (funds have been created but none destroyed). - Positive(P), - /// A negative imbalance (funds have been destroyed but none created). - Negative(P::Opposite), +pub enum SignedImbalance> { + /// A positive imbalance (funds have been created but none destroyed). + Positive(P), + /// A negative imbalance (funds have been destroyed but none created). + Negative(P::Opposite), } impl< - P: Imbalance, - N: Imbalance, - B: SimpleArithmetic + As + As + Codec + Copy + MaybeSerializeDebug + Default, -> SignedImbalance { - pub fn zero() -> Self { - SignedImbalance::Positive(P::zero()) - } - - pub fn drop_zero(self) -> Result<(), Self> { - match self { - SignedImbalance::Positive(x) => x.drop_zero().map_err(SignedImbalance::Positive), - SignedImbalance::Negative(x) => x.drop_zero().map_err(SignedImbalance::Negative), - } - } - - /// Consume `self` and an `other` to return a new instance that combines - /// both. - pub fn merge(self, other: Self) -> Self { - match (self, other) { - (SignedImbalance::Positive(one), SignedImbalance::Positive(other)) => - SignedImbalance::Positive(one.merge(other)), - (SignedImbalance::Negative(one), SignedImbalance::Negative(other)) => - SignedImbalance::Negative(one.merge(other)), - (SignedImbalance::Positive(one), SignedImbalance::Negative(other)) => - if one.peek() > other.peek() { - SignedImbalance::Positive(one.offset(other).ok().unwrap_or_else(P::zero)) - } else { - SignedImbalance::Negative(other.offset(one).ok().unwrap_or_else(N::zero)) - }, - (one, other) => other.merge(one), - } - } + P: Imbalance, + N: Imbalance, + B: SimpleArithmetic + As + As + Codec + Copy + MaybeSerializeDebug + Default, + > SignedImbalance +{ + pub fn zero() -> Self { + SignedImbalance::Positive(P::zero()) + } + + pub fn drop_zero(self) -> Result<(), Self> { + match self { + SignedImbalance::Positive(x) => x.drop_zero().map_err(SignedImbalance::Positive), + SignedImbalance::Negative(x) => x.drop_zero().map_err(SignedImbalance::Negative), + } + } + + /// Consume `self` and an `other` to return a new instance that combines + /// both. + pub fn merge(self, other: Self) -> Self { + match (self, other) { + (SignedImbalance::Positive(one), SignedImbalance::Positive(other)) => { + SignedImbalance::Positive(one.merge(other)) + } + (SignedImbalance::Negative(one), SignedImbalance::Negative(other)) => { + SignedImbalance::Negative(one.merge(other)) + } + (SignedImbalance::Positive(one), SignedImbalance::Negative(other)) => { + if one.peek() > other.peek() { + SignedImbalance::Positive(one.offset(other).ok().unwrap_or_else(P::zero)) + } else { + SignedImbalance::Negative(other.offset(one).ok().unwrap_or_else(N::zero)) + } + } + (one, other) => other.merge(one), + } + } } /// Abstraction over a fungible assets system. pub trait Currency { - /// The balance of an account. - type Balance: SimpleArithmetic + As + As + Codec + Copy + MaybeSerializeDebug + Default; - - /// The opaque token type for an imbalance. This is returned by unbalanced operations - /// and must be dealt with. It may be dropped but cannot be cloned. - type PositiveImbalance: Imbalance; - - /// The opaque token type for an imbalance. This is returned by unbalanced operations - /// and must be dealt with. It may be dropped but cannot be cloned. - type NegativeImbalance: Imbalance; - - // PUBLIC IMMUTABLES - - /// The combined balance of `who`. - fn total_balance(who: &AccountId) -> Self::Balance; - - /// Same result as `slash(who, value)` (but without the side-effects) assuming there are no - /// balance changes in the meantime and only the reserved balance is not taken into account. - fn can_slash(who: &AccountId, value: Self::Balance) -> bool; - - /// The total amount of issuance in the system. - fn total_issuance() -> Self::Balance; - - /// The minimum balance any single account may have. This is equivalent to the `Balances` module's - /// `ExistentialDeposit`. - fn minimum_balance() -> Self::Balance; - - /// The 'free' balance of a given account. - /// - /// This is the only balance that matters in terms of most operations on tokens. It alone - /// is used to determine the balance when in the contract execution environment. When this - /// balance falls below the value of `ExistentialDeposit`, then the 'current account' is - /// deleted: specifically `FreeBalance`. Further, the `OnFreeBalanceZero` callback - /// is invoked, giving a chance to external modules to clean up data associated with - /// the deleted account. - /// - /// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets - /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. - fn free_balance(who: &AccountId) -> Self::Balance; - - /// Returns `Ok` iff the account is able to make a withdrawal of the given amount - /// for the given reason. Basically, it's just a dry-run of `withdraw`. - /// - /// `Err(...)` with the reason why not otherwise. - fn ensure_can_withdraw( - who: &AccountId, - _amount: Self::Balance, - reason: WithdrawReason, - new_balance: Self::Balance, - ) -> result::Result<(), &'static str>; - - // PUBLIC MUTABLES (DANGEROUS) - - /// Transfer some liquid free balance to another staker. - /// - /// This is a very high-level function. It will ensure all appropriate fees are paid - /// and no imbalance in the system remains. - fn transfer( - source: &AccountId, - dest: &AccountId, - value: Self::Balance, - ) -> result::Result<(), &'static str>; - - /// Deducts up to `value` from the combined balance of `who`, preferring to deduct from the - /// free balance. This function cannot fail. - /// - /// The resulting imbalance is the first item of the tuple returned. - /// - /// As much funds up to `value` will be deducted as possible. If this is less than `value`, - /// then a non-zero second item will be returned. - fn slash( - who: &AccountId, - value: Self::Balance - ) -> (Self::NegativeImbalance, Self::Balance); - - /// Mints `value` to the free balance of `who`. - /// - /// If `who` doesn't exist, nothing is done and an Err returned. - fn deposit_into_existing( - who: &AccountId, - value: Self::Balance - ) -> result::Result; - - /// Removes some free balance from `who` account for `reason` if possible. If `liveness` is `KeepAlive`, - /// then no less than `ExistentialDeposit` must be left remaining. - /// - /// This checks any locks, vesting, and liquidity requirements. If the removal is not possible, then it - /// returns `Err`. - fn withdraw( - who: &AccountId, - value: Self::Balance, - reason: WithdrawReason, - liveness: ExistenceRequirement, - ) -> result::Result; - - /// Adds up to `value` to the free balance of `who`. If `who` doesn't exist, it is created. - /// - /// Infallible. - fn deposit_creating( - who: &AccountId, - value: Self::Balance, - ) -> Self::PositiveImbalance; - - /// Ensure an account's free balance equals some value; this will create the account - /// if needed. - /// - /// Returns a signed imbalance and status to indicate if the account was successfully updated or update - /// has led to killing of the account. - fn make_free_balance_be( - who: &AccountId, - balance: Self::Balance, - ) -> ( - SignedImbalance, - UpdateBalanceOutcome, - ); + /// The balance of an account. + type Balance: SimpleArithmetic + + As + + As + + Codec + + Copy + + MaybeSerializeDebug + + Default; + + /// The opaque token type for an imbalance. This is returned by unbalanced operations + /// and must be dealt with. It may be dropped but cannot be cloned. + type PositiveImbalance: Imbalance; + + /// The opaque token type for an imbalance. This is returned by unbalanced operations + /// and must be dealt with. It may be dropped but cannot be cloned. + type NegativeImbalance: Imbalance; + + // PUBLIC IMMUTABLES + + /// The combined balance of `who`. + fn total_balance(who: &AccountId) -> Self::Balance; + + /// Same result as `slash(who, value)` (but without the side-effects) assuming there are no + /// balance changes in the meantime and only the reserved balance is not taken into account. + fn can_slash(who: &AccountId, value: Self::Balance) -> bool; + + /// The total amount of issuance in the system. + fn total_issuance() -> Self::Balance; + + /// The minimum balance any single account may have. This is equivalent to the `Balances` module's + /// `ExistentialDeposit`. + fn minimum_balance() -> Self::Balance; + + /// The 'free' balance of a given account. + /// + /// This is the only balance that matters in terms of most operations on tokens. It alone + /// is used to determine the balance when in the contract execution environment. When this + /// balance falls below the value of `ExistentialDeposit`, then the 'current account' is + /// deleted: specifically `FreeBalance`. Further, the `OnFreeBalanceZero` callback + /// is invoked, giving a chance to external modules to clean up data associated with + /// the deleted account. + /// + /// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets + /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. + fn free_balance(who: &AccountId) -> Self::Balance; + + /// Returns `Ok` iff the account is able to make a withdrawal of the given amount + /// for the given reason. Basically, it's just a dry-run of `withdraw`. + /// + /// `Err(...)` with the reason why not otherwise. + fn ensure_can_withdraw( + who: &AccountId, + _amount: Self::Balance, + reason: WithdrawReason, + new_balance: Self::Balance, + ) -> result::Result<(), &'static str>; + + // PUBLIC MUTABLES (DANGEROUS) + + /// Transfer some liquid free balance to another staker. + /// + /// This is a very high-level function. It will ensure all appropriate fees are paid + /// and no imbalance in the system remains. + fn transfer( + source: &AccountId, + dest: &AccountId, + value: Self::Balance, + ) -> result::Result<(), &'static str>; + + /// Deducts up to `value` from the combined balance of `who`, preferring to deduct from the + /// free balance. This function cannot fail. + /// + /// The resulting imbalance is the first item of the tuple returned. + /// + /// As much funds up to `value` will be deducted as possible. If this is less than `value`, + /// then a non-zero second item will be returned. + fn slash(who: &AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance); + + /// Mints `value` to the free balance of `who`. + /// + /// If `who` doesn't exist, nothing is done and an Err returned. + fn deposit_into_existing( + who: &AccountId, + value: Self::Balance, + ) -> result::Result; + + /// Removes some free balance from `who` account for `reason` if possible. If `liveness` is `KeepAlive`, + /// then no less than `ExistentialDeposit` must be left remaining. + /// + /// This checks any locks, vesting, and liquidity requirements. If the removal is not possible, then it + /// returns `Err`. + fn withdraw( + who: &AccountId, + value: Self::Balance, + reason: WithdrawReason, + liveness: ExistenceRequirement, + ) -> result::Result; + + /// Adds up to `value` to the free balance of `who`. If `who` doesn't exist, it is created. + /// + /// Infallible. + fn deposit_creating(who: &AccountId, value: Self::Balance) -> Self::PositiveImbalance; + + /// Ensure an account's free balance equals some value; this will create the account + /// if needed. + /// + /// Returns a signed imbalance and status to indicate if the account was successfully updated or update + /// has led to killing of the account. + fn make_free_balance_be( + who: &AccountId, + balance: Self::Balance, + ) -> ( + SignedImbalance, + UpdateBalanceOutcome, + ); } /// A currency where funds can be reserved from the user. pub trait ReservableCurrency: Currency { - /// Same result as `reserve(who, value)` (but without the side-effects) assuming there - /// are no balance changes in the meantime. - fn can_reserve(who: &AccountId, value: Self::Balance) -> bool; - - /// Deducts up to `value` from reserved balance of `who`. This function cannot fail. - /// - /// As much funds up to `value` will be deducted as possible. If the reserve balance of `who` - /// is less than `value`, then a non-zero second item will be returned. - fn slash_reserved( - who: &AccountId, - value: Self::Balance - ) -> (Self::NegativeImbalance, Self::Balance); - - /// The amount of the balance of a given account that is externally reserved; this can still get - /// slashed, but gets slashed last of all. - /// - /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens - /// that are still 'owned' by the account holder, but which are suspendable. - /// - /// When this balance falls below the value of `ExistentialDeposit`, then this 'reserve account' - /// is deleted: specifically, `ReservedBalance`. - /// - /// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets - /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. - fn reserved_balance(who: &AccountId) -> Self::Balance; - - - /// Moves `value` from balance to reserved balance. - /// - /// If the free balance is lower than `value`, then no funds will be moved and an `Err` will - /// be returned to notify of this. This is different behavior than `unreserve`. - fn reserve(who: &AccountId, value: Self::Balance) -> result::Result<(), &'static str>; - - /// Moves up to `value` from reserved balance to free balance. This function cannot fail. - /// - /// As much funds up to `value` will be moved as possible. If the reserve balance of `who` - /// is less than `value`, then the remaining amount will be returned. - /// - /// # NOTES - /// - /// - This is different from `reserve`. - /// - If the remaining reserved balance is less than `ExistentialDeposit`, it will - /// invoke `on_reserved_too_low` and could reap the account. - fn unreserve(who: &AccountId, value: Self::Balance) -> Self::Balance; - - /// Moves up to `value` from reserved balance of account `slashed` to free balance of account - /// `beneficiary`. `beneficiary` must exist for this to succeed. If it does not, `Err` will be - /// returned. - /// - /// As much funds up to `value` will be deducted as possible. If this is less than `value`, - /// then `Ok(non_zero)` will be returned. - fn repatriate_reserved( - slashed: &AccountId, - beneficiary: &AccountId, - value: Self::Balance - ) -> result::Result; + /// Same result as `reserve(who, value)` (but without the side-effects) assuming there + /// are no balance changes in the meantime. + fn can_reserve(who: &AccountId, value: Self::Balance) -> bool; + + /// Deducts up to `value` from reserved balance of `who`. This function cannot fail. + /// + /// As much funds up to `value` will be deducted as possible. If the reserve balance of `who` + /// is less than `value`, then a non-zero second item will be returned. + fn slash_reserved( + who: &AccountId, + value: Self::Balance, + ) -> (Self::NegativeImbalance, Self::Balance); + + /// The amount of the balance of a given account that is externally reserved; this can still get + /// slashed, but gets slashed last of all. + /// + /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens + /// that are still 'owned' by the account holder, but which are suspendable. + /// + /// When this balance falls below the value of `ExistentialDeposit`, then this 'reserve account' + /// is deleted: specifically, `ReservedBalance`. + /// + /// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets + /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. + fn reserved_balance(who: &AccountId) -> Self::Balance; + + /// Moves `value` from balance to reserved balance. + /// + /// If the free balance is lower than `value`, then no funds will be moved and an `Err` will + /// be returned to notify of this. This is different behavior than `unreserve`. + fn reserve(who: &AccountId, value: Self::Balance) -> result::Result<(), &'static str>; + + /// Moves up to `value` from reserved balance to free balance. This function cannot fail. + /// + /// As much funds up to `value` will be moved as possible. If the reserve balance of `who` + /// is less than `value`, then the remaining amount will be returned. + /// + /// # NOTES + /// + /// - This is different from `reserve`. + /// - If the remaining reserved balance is less than `ExistentialDeposit`, it will + /// invoke `on_reserved_too_low` and could reap the account. + fn unreserve(who: &AccountId, value: Self::Balance) -> Self::Balance; + + /// Moves up to `value` from reserved balance of account `slashed` to free balance of account + /// `beneficiary`. `beneficiary` must exist for this to succeed. If it does not, `Err` will be + /// returned. + /// + /// As much funds up to `value` will be deducted as possible. If this is less than `value`, + /// then `Ok(non_zero)` will be returned. + fn repatriate_reserved( + slashed: &AccountId, + beneficiary: &AccountId, + value: Self::Balance, + ) -> result::Result; } /// An identifier for a lock. Used for disambiguating different locks so that @@ -412,63 +415,59 @@ pub type LockIdentifier = [u8; 8]; /// A currency whose accounts can have liquidity restrictions. pub trait LockableCurrency: Currency { - /// The quantity used to denote time; usually just a `BlockNumber`. - type Moment; - - /// Create a new balance lock on account `who`. - /// - /// If the new lock is valid (i.e. not already expired), it will push the struct to - /// the `Locks` vec in storage. Note that you can lock more funds than a user has. - /// - /// If the lock `id` already exists, this will update it. - fn set_lock( - id: LockIdentifier, - who: &AccountId, - amount: Self::Balance, - until: Self::Moment, - reasons: WithdrawReasons, - ); - - /// Changes a balance lock (selected by `id`) so that it becomes less liquid in all - /// parameters or creates a new one if it does not exist. - /// - /// Calling `extend_lock` on an existing lock `id` differs from `set_lock` in that it - /// applies the most severe constraints of the two, while `set_lock` replaces the lock - /// with the new parameters. As in, `extend_lock` will set: - /// - maximum `amount` - /// - farthest duration (`until`) - /// - bitwise mask of all `reasons` - fn extend_lock( - id: LockIdentifier, - who: &AccountId, - amount: Self::Balance, - until: Self::Moment, - reasons: WithdrawReasons, - ); - - /// Remove an existing lock. - fn remove_lock( - id: LockIdentifier, - who: &AccountId, - ); + /// The quantity used to denote time; usually just a `BlockNumber`. + type Moment; + + /// Create a new balance lock on account `who`. + /// + /// If the new lock is valid (i.e. not already expired), it will push the struct to + /// the `Locks` vec in storage. Note that you can lock more funds than a user has. + /// + /// If the lock `id` already exists, this will update it. + fn set_lock( + id: LockIdentifier, + who: &AccountId, + amount: Self::Balance, + until: Self::Moment, + reasons: WithdrawReasons, + ); + + /// Changes a balance lock (selected by `id`) so that it becomes less liquid in all + /// parameters or creates a new one if it does not exist. + /// + /// Calling `extend_lock` on an existing lock `id` differs from `set_lock` in that it + /// applies the most severe constraints of the two, while `set_lock` replaces the lock + /// with the new parameters. As in, `extend_lock` will set: + /// - maximum `amount` + /// - farthest duration (`until`) + /// - bitwise mask of all `reasons` + fn extend_lock( + id: LockIdentifier, + who: &AccountId, + amount: Self::Balance, + until: Self::Moment, + reasons: WithdrawReasons, + ); + + /// Remove an existing lock. + fn remove_lock(id: LockIdentifier, who: &AccountId); } bitmask! { - /// Reasons for moving funds out of an account. - #[derive(Encode, Decode)] - pub mask WithdrawReasons: i8 where - - /// Reason for moving funds out of an account. - #[derive(Encode, Decode)] - flags WithdrawReason { - /// In order to pay for (system) transaction costs. - TransactionPayment = 0b00000001, - /// In order to transfer ownership. - Transfer = 0b00000010, - /// In order to reserve some funds for a later return or repatriation - Reserve = 0b00000100, - /// In order to pay some other (higher-level) fees. - Fee = 0b00001000, - } + /// Reasons for moving funds out of an account. + #[derive(Encode, Decode)] + pub mask WithdrawReasons: i8 where + + /// Reason for moving funds out of an account. + #[derive(Encode, Decode)] + flags WithdrawReason { + /// In order to pay for (system) transaction costs. + TransactionPayment = 0b00000001, + /// In order to transfer ownership. + Transfer = 0b00000010, + /// In order to reserve some funds for a later return or repatriation + Reserve = 0b00000100, + /// In order to pay some other (higher-level) fees. + Fee = 0b00001000, + } } - diff --git a/srml/support/test/src/lib.rs b/srml/support/test/src/lib.rs index e69de29bb2..8b13789179 100644 --- a/srml/support/test/src/lib.rs +++ b/srml/support/test/src/lib.rs @@ -0,0 +1 @@ + diff --git a/srml/support/test/tests/instance.rs b/srml/support/test/tests/instance.rs index 28ad3ee006..d7b32edbb9 100644 --- a/srml/support/test/tests/instance.rs +++ b/srml/support/test/tests/instance.rs @@ -14,99 +14,95 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -#![recursion_limit="128"] +#![recursion_limit = "128"] +use inherents::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent, RuntimeString}; +use primitives::{sr25519, H256}; +use runtime_io::{with_externalities, Blake2Hasher}; #[cfg(feature = "std")] use serde_derive::Serialize; -use runtime_io::{with_externalities, Blake2Hasher}; +use srml_support::codec::{Decode, Encode}; +use srml_support::rstd; use srml_support::rstd::prelude::*; -use srml_support::rstd as rstd; -use srml_support::codec::{Encode, Decode}; +use srml_support::runtime_primitives::traits::{BlakeTwo256, Block as _, Digest, Verify}; use srml_support::runtime_primitives::{generic, BuildStorage}; -use srml_support::runtime_primitives::traits::{BlakeTwo256, Block as _, Verify, Digest}; use srml_support::Parameter; -use inherents::{ - ProvideInherent, InherentData, InherentIdentifier, RuntimeString, MakeFatalError -}; -use srml_support::{StorageValue, StorageMap, StorageDoubleMap}; -use primitives::{H256, sr25519}; +use srml_support::{StorageDoubleMap, StorageMap, StorageValue}; -pub trait Currency { -} +pub trait Currency {} // Mock mod system { - use super::*; - - pub trait Trait: 'static + Eq + Clone { - type Origin: Into>> + From>; - type BlockNumber; - type Digest: Digest; - type Hash; - type AccountId; - type Event: From; - type Log: From> + Into>; - } - - pub type DigestItemOf = <::Digest as Digest>::Item; - - srml_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin { - pub fn deposit_event(_event: T::Event) { - } - } - } - impl Module { - pub fn deposit_log(_item: ::Item) { - unimplemented!(); - } - } - - srml_support::decl_event!( - pub enum Event { - ExtrinsicSuccess, - ExtrinsicFailed, - } - ); - - /// Origin for the system module. - #[derive(PartialEq, Eq, Clone)] - #[cfg_attr(feature = "std", derive(Debug))] - pub enum RawOrigin { - Root, - Signed(AccountId), - Inherent, - } - - impl From> for RawOrigin { - fn from(s: Option) -> RawOrigin { - match s { - Some(who) => RawOrigin::Signed(who), - None => RawOrigin::Inherent, - } - } - } - - pub type Origin = RawOrigin<::AccountId>; - - pub type Log = RawLog< - ::Hash, - >; - - #[cfg_attr(feature = "std", derive(Serialize, Debug))] - #[derive(Encode, Decode, PartialEq, Eq, Clone)] - pub enum RawLog { - ChangesTrieRoot(H), - } - - pub fn ensure_root(o: OuterOrigin) -> Result<(), &'static str> - where OuterOrigin: Into>> - { - match o.into() { - Some(RawOrigin::Root) => Ok(()), - _ => Err("bad origin: expected to be a root origin"), - } - } + use super::*; + + pub trait Trait: 'static + Eq + Clone { + type Origin: Into>> + From>; + type BlockNumber; + type Digest: Digest; + type Hash; + type AccountId; + type Event: From; + type Log: From> + Into>; + } + + pub type DigestItemOf = <::Digest as Digest>::Item; + + srml_support::decl_module! { + pub struct Module for enum Call where origin: T::Origin { + pub fn deposit_event(_event: T::Event) { + } + } + } + impl Module { + pub fn deposit_log(_item: ::Item) { + unimplemented!(); + } + } + + srml_support::decl_event!( + pub enum Event { + ExtrinsicSuccess, + ExtrinsicFailed, + } + ); + + /// Origin for the system module. + #[derive(PartialEq, Eq, Clone)] + #[cfg_attr(feature = "std", derive(Debug))] + pub enum RawOrigin { + Root, + Signed(AccountId), + Inherent, + } + + impl From> for RawOrigin { + fn from(s: Option) -> RawOrigin { + match s { + Some(who) => RawOrigin::Signed(who), + None => RawOrigin::Inherent, + } + } + } + + pub type Origin = RawOrigin<::AccountId>; + + pub type Log = RawLog<::Hash>; + + #[cfg_attr(feature = "std", derive(Serialize, Debug))] + #[derive(Encode, Decode, PartialEq, Eq, Clone)] + pub enum RawLog { + ChangesTrieRoot(H), + } + + pub fn ensure_root(o: OuterOrigin) -> Result<(), &'static str> + where + OuterOrigin: Into>>, + { + match o.into() { + Some(RawOrigin::Root) => Ok(()), + _ => Err("bad origin: expected to be a root origin"), + } + } } // Test for: @@ -114,211 +110,211 @@ mod system { // * Custom InstantiableTrait // * Origin, Inherent, Log, Event mod module1 { - use super::*; - - pub trait Trait: system::Trait { - type Event: From> + Into<::Event>; - type Origin: From>; - type Log: From> + Into>; - } - - srml_support::decl_module! { - pub struct Module, I: InstantiableThing> for enum Call where origin: ::Origin { - fn deposit_event() = default; - - fn one() { - Self::deposit_event(RawEvent::AnotherVariant(3)); - Self::deposit_log(RawLog::AmountChange(3)); - } - } - } - - impl, I: InstantiableThing> Module { - /// Deposit one of this module's logs. - fn deposit_log(log: Log) { - >::deposit_log(>::Log::from(log).into()); - } - } - - srml_support::decl_storage! { - trait Store for Module, I: InstantiableThing> as Module1 { - pub Value config(value): u64; - pub Map: map u32 => u64; - pub LinkedMap: linked_map u32 => u64; - } - } - - srml_support::decl_event! { - pub enum Event where Phantom = rstd::marker::PhantomData { - _Phantom(Phantom), - AnotherVariant(u32), - } - } - - #[derive(PartialEq, Eq, Clone)] - #[cfg_attr(feature = "std", derive(Debug))] - pub enum Origin, I> { - Members(u32), - _Phantom(rstd::marker::PhantomData<(T, I)>), - } - - pub type Log = RawLog< - T, - I, - >; - - /// A logs in this module. - #[cfg_attr(feature = "std", derive(serde_derive::Serialize, Debug))] - #[derive(parity_codec::Encode, parity_codec::Decode, PartialEq, Eq, Clone)] - pub enum RawLog { - _Phantom(rstd::marker::PhantomData<(T, I)>), - AmountChange(u32), - } - - pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"12345678"; - - impl, I: InstantiableThing> ProvideInherent for Module { - type Call = Call; - type Error = MakeFatalError; - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - - fn create_inherent(_data: &InherentData) -> Option { - unimplemented!(); - } - - fn check_inherent(_call: &Self::Call, _data: &InherentData) -> rstd::result::Result<(), Self::Error> { - unimplemented!(); - } - } + use super::*; + + pub trait Trait: system::Trait { + type Event: From> + Into<::Event>; + type Origin: From>; + type Log: From> + Into>; + } + + srml_support::decl_module! { + pub struct Module, I: InstantiableThing> for enum Call where origin: ::Origin { + fn deposit_event() = default; + + fn one() { + Self::deposit_event(RawEvent::AnotherVariant(3)); + Self::deposit_log(RawLog::AmountChange(3)); + } + } + } + + impl, I: InstantiableThing> Module { + /// Deposit one of this module's logs. + fn deposit_log(log: Log) { + >::deposit_log(>::Log::from(log).into()); + } + } + + srml_support::decl_storage! { + trait Store for Module, I: InstantiableThing> as Module1 { + pub Value config(value): u64; + pub Map: map u32 => u64; + pub LinkedMap: linked_map u32 => u64; + } + } + + srml_support::decl_event! { + pub enum Event where Phantom = rstd::marker::PhantomData { + _Phantom(Phantom), + AnotherVariant(u32), + } + } + + #[derive(PartialEq, Eq, Clone)] + #[cfg_attr(feature = "std", derive(Debug))] + pub enum Origin, I> { + Members(u32), + _Phantom(rstd::marker::PhantomData<(T, I)>), + } + + pub type Log = RawLog; + + /// A logs in this module. + #[cfg_attr(feature = "std", derive(serde_derive::Serialize, Debug))] + #[derive(parity_codec::Encode, parity_codec::Decode, PartialEq, Eq, Clone)] + pub enum RawLog { + _Phantom(rstd::marker::PhantomData<(T, I)>), + AmountChange(u32), + } + + pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"12345678"; + + impl, I: InstantiableThing> ProvideInherent for Module { + type Call = Call; + type Error = MakeFatalError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(_data: &InherentData) -> Option { + unimplemented!(); + } + + fn check_inherent( + _call: &Self::Call, + _data: &InherentData, + ) -> rstd::result::Result<(), Self::Error> { + unimplemented!(); + } + } } // Test for: // * default instance // * use of no_genesis_config_phantom_data mod module2 { - use super::*; - - pub trait Trait: system::Trait { - type Amount: Parameter + Default; - type Event: From> + Into<::Event>; - type Origin: From>; - type Log: From> + Into>; - } - - impl, I: Instance> Currency for Module {} - - srml_support::decl_module! { - pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: ::Origin { - fn deposit_event() = default; - } - } - - srml_support::decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Module2 { - pub Value config(value): T::Amount; - pub Map config(map): map u64 => u64; - pub LinkedMap config(linked_map): linked_map u64 => u64; - pub DoubleMap config(double_map): double_map u64, blake2_256(u64) => u64; - } - extra_genesis_skip_phantom_data_field; - } - - srml_support::decl_event! { - pub enum Event where Amount = >::Amount { - Variant(Amount), - } - } - - #[derive(PartialEq, Eq, Clone)] - #[cfg_attr(feature = "std", derive(Debug))] - pub enum Origin, I=DefaultInstance> { - Members(u32), - _Phantom(rstd::marker::PhantomData<(T, I)>), - } - - pub type Log = RawLog< - T, - I, - >; - - /// A logs in this module. - #[cfg_attr(feature = "std", derive(serde_derive::Serialize, Debug))] - #[derive(parity_codec::Encode, parity_codec::Decode, PartialEq, Eq, Clone)] - pub enum RawLog { - _Phantom(rstd::marker::PhantomData<(T, I)>), - AmountChange(u32), - } - - pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"12345678"; - - impl, I: Instance> ProvideInherent for Module { - type Call = Call; - type Error = MakeFatalError; - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - - fn create_inherent(_data: &InherentData) -> Option { - unimplemented!(); - } - - fn check_inherent(_call: &Self::Call, _data: &InherentData) -> rstd::result::Result<(), Self::Error> { - unimplemented!(); - } - } + use super::*; + + pub trait Trait: system::Trait { + type Amount: Parameter + Default; + type Event: From> + Into<::Event>; + type Origin: From>; + type Log: From> + Into>; + } + + impl, I: Instance> Currency for Module {} + + srml_support::decl_module! { + pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: ::Origin { + fn deposit_event() = default; + } + } + + srml_support::decl_storage! { + trait Store for Module, I: Instance=DefaultInstance> as Module2 { + pub Value config(value): T::Amount; + pub Map config(map): map u64 => u64; + pub LinkedMap config(linked_map): linked_map u64 => u64; + pub DoubleMap config(double_map): double_map u64, blake2_256(u64) => u64; + } + extra_genesis_skip_phantom_data_field; + } + + srml_support::decl_event! { + pub enum Event where Amount = >::Amount { + Variant(Amount), + } + } + + #[derive(PartialEq, Eq, Clone)] + #[cfg_attr(feature = "std", derive(Debug))] + pub enum Origin, I = DefaultInstance> { + Members(u32), + _Phantom(rstd::marker::PhantomData<(T, I)>), + } + + pub type Log = RawLog; + + /// A logs in this module. + #[cfg_attr(feature = "std", derive(serde_derive::Serialize, Debug))] + #[derive(parity_codec::Encode, parity_codec::Decode, PartialEq, Eq, Clone)] + pub enum RawLog { + _Phantom(rstd::marker::PhantomData<(T, I)>), + AmountChange(u32), + } + + pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"12345678"; + + impl, I: Instance> ProvideInherent for Module { + type Call = Call; + type Error = MakeFatalError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(_data: &InherentData) -> Option { + unimplemented!(); + } + + fn check_inherent( + _call: &Self::Call, + _data: &InherentData, + ) -> rstd::result::Result<(), Self::Error> { + unimplemented!(); + } + } } // Test for: // * Depends on multiple instances of a module with instances mod module3 { - use super::*; + use super::*; - pub trait Trait: module2::Trait + module2::Trait + system::Trait { - type Currency: Currency; - type Currency2: Currency; - } + pub trait Trait: module2::Trait + module2::Trait + system::Trait { + type Currency: Currency; + type Currency2: Currency; + } - srml_support::decl_module! { - pub struct Module for enum Call where origin: ::Origin { - } - } + srml_support::decl_module! { + pub struct Module for enum Call where origin: ::Origin { + } + } } impl module1::Trait for Runtime { - type Event = Event; - type Origin = Origin; - type Log = Log; + type Event = Event; + type Origin = Origin; + type Log = Log; } impl module1::Trait for Runtime { - type Event = Event; - type Origin = Origin; - type Log = Log; + type Event = Event; + type Origin = Origin; + type Log = Log; } impl module2::Trait for Runtime { - type Amount = u16; - type Event = Event; - type Origin = Origin; - type Log = Log; + type Amount = u16; + type Event = Event; + type Origin = Origin; + type Log = Log; } impl module2::Trait for Runtime { - type Amount = u32; - type Event = Event; - type Origin = Origin; - type Log = Log; + type Amount = u32; + type Event = Event; + type Origin = Origin; + type Log = Log; } impl module2::Trait for Runtime { - type Amount = u32; - type Event = Event; - type Origin = Origin; - type Log = Log; + type Amount = u32; + type Event = Event; + type Origin = Origin; + type Log = Log; } impl module2::Trait for Runtime { - type Amount = u64; - type Event = Event; - type Origin = Origin; - type Log = Log; + type Amount = u64; + type Event = Event; + type Origin = Origin; + type Log = Log; } impl module3::Trait for Runtime { - type Currency = Module2_2; - type Currency2 = Module2_3; + type Currency = Module2_2; + type Currency2 = Module2_3; } pub type Signature = sr25519::Signature; @@ -327,13 +323,13 @@ pub type BlockNumber = u64; pub type Index = u64; impl system::Trait for Runtime { - type Hash = H256; - type Origin = Origin; - type BlockNumber = BlockNumber; - type Digest = generic::Digest; - type AccountId = AccountId; - type Event = Event; - type Log = Log; + type Hash = H256; + type Origin = Origin; + type BlockNumber = BlockNumber; + type Digest = generic::Digest; + type AccountId = AccountId; + type Event = Event; + type Log = Log; } srml_support::construct_runtime!( @@ -358,141 +354,147 @@ pub type Block = generic::Block; pub type UncheckedExtrinsic = generic::UncheckedMortalCompactExtrinsic; fn new_test_ext() -> runtime_io::TestExternalities { - GenesisConfig{ - module1_Instance1: Some(module1::GenesisConfig { - value: 3, - .. Default::default() - }), - module1_Instance2: Some(module1::GenesisConfig { - value: 4, - _genesis_phantom_data: Default::default(), - }), - module2: Some(module2::GenesisConfig { - value: 4, - map: vec![(0, 0)], - linked_map: vec![(0, 0)], - double_map: vec![(0, 0, 0)], - }), - module2_Instance1: Some(module2::GenesisConfig { - value: 4, - map: vec![(0, 0)], - linked_map: vec![(0, 0)], - double_map: vec![(0, 0, 0)], - }), - module2_Instance2: None, - module2_Instance3: None, - }.build_storage().unwrap().0.into() + GenesisConfig { + module1_Instance1: Some(module1::GenesisConfig { + value: 3, + ..Default::default() + }), + module1_Instance2: Some(module1::GenesisConfig { + value: 4, + _genesis_phantom_data: Default::default(), + }), + module2: Some(module2::GenesisConfig { + value: 4, + map: vec![(0, 0)], + linked_map: vec![(0, 0)], + double_map: vec![(0, 0, 0)], + }), + module2_Instance1: Some(module2::GenesisConfig { + value: 4, + map: vec![(0, 0)], + linked_map: vec![(0, 0)], + double_map: vec![(0, 0, 0)], + }), + module2_Instance2: None, + module2_Instance3: None, + } + .build_storage() + .unwrap() + .0 + .into() } #[test] fn storage_instance_independance() { - with_externalities(&mut new_test_ext(), || { - let mut map = rstd::collections::btree_map::BTreeMap::new(); - for key in [ - module2::Value::::key().to_vec(), - module2::Value::::key().to_vec(), - module2::Value::::key().to_vec(), - module2::Value::::key().to_vec(), - module2::Map::::prefix().to_vec(), - module2::Map::::prefix().to_vec(), - module2::Map::::prefix().to_vec(), - module2::Map::::prefix().to_vec(), - module2::LinkedMap::::prefix().to_vec(), - module2::LinkedMap::::prefix().to_vec(), - module2::LinkedMap::::prefix().to_vec(), - module2::LinkedMap::::prefix().to_vec(), - module2::DoubleMap::::prefix().to_vec(), - module2::DoubleMap::::prefix().to_vec(), - module2::DoubleMap::::prefix().to_vec(), - module2::DoubleMap::::prefix().to_vec(), - module2::Map::::key_for(0), - module2::Map::::key_for(0).to_vec(), - module2::Map::::key_for(0).to_vec(), - module2::Map::::key_for(0).to_vec(), - module2::LinkedMap::::key_for(0), - module2::LinkedMap::::key_for(0).to_vec(), - module2::LinkedMap::::key_for(0).to_vec(), - module2::LinkedMap::::key_for(0).to_vec(), - module2::Map::::key_for(1), - module2::Map::::key_for(1).to_vec(), - module2::Map::::key_for(1).to_vec(), - module2::Map::::key_for(1).to_vec(), - module2::LinkedMap::::key_for(1), - module2::LinkedMap::::key_for(1).to_vec(), - module2::LinkedMap::::key_for(1).to_vec(), - module2::LinkedMap::::key_for(1).to_vec(), - module2::DoubleMap::::prefix_for(1), - module2::DoubleMap::::prefix_for(1).to_vec(), - module2::DoubleMap::::prefix_for(1).to_vec(), - module2::DoubleMap::::prefix_for(1).to_vec(), - module2::DoubleMap::::key_for(1, 1), - module2::DoubleMap::::key_for(1, 1).to_vec(), - module2::DoubleMap::::key_for(1, 1).to_vec(), - module2::DoubleMap::::key_for(1, 1).to_vec(), - ].iter() { - assert!(map.insert(key, ()).is_none()) - } - }); + with_externalities(&mut new_test_ext(), || { + let mut map = rstd::collections::btree_map::BTreeMap::new(); + for key in [ + module2::Value::::key().to_vec(), + module2::Value::::key().to_vec(), + module2::Value::::key().to_vec(), + module2::Value::::key().to_vec(), + module2::Map::::prefix().to_vec(), + module2::Map::::prefix().to_vec(), + module2::Map::::prefix().to_vec(), + module2::Map::::prefix().to_vec(), + module2::LinkedMap::::prefix().to_vec(), + module2::LinkedMap::::prefix().to_vec(), + module2::LinkedMap::::prefix().to_vec(), + module2::LinkedMap::::prefix().to_vec(), + module2::DoubleMap::::prefix().to_vec(), + module2::DoubleMap::::prefix().to_vec(), + module2::DoubleMap::::prefix().to_vec(), + module2::DoubleMap::::prefix().to_vec(), + module2::Map::::key_for(0), + module2::Map::::key_for(0).to_vec(), + module2::Map::::key_for(0).to_vec(), + module2::Map::::key_for(0).to_vec(), + module2::LinkedMap::::key_for(0), + module2::LinkedMap::::key_for(0).to_vec(), + module2::LinkedMap::::key_for(0).to_vec(), + module2::LinkedMap::::key_for(0).to_vec(), + module2::Map::::key_for(1), + module2::Map::::key_for(1).to_vec(), + module2::Map::::key_for(1).to_vec(), + module2::Map::::key_for(1).to_vec(), + module2::LinkedMap::::key_for(1), + module2::LinkedMap::::key_for(1).to_vec(), + module2::LinkedMap::::key_for(1).to_vec(), + module2::LinkedMap::::key_for(1).to_vec(), + module2::DoubleMap::::prefix_for(1), + module2::DoubleMap::::prefix_for(1).to_vec(), + module2::DoubleMap::::prefix_for(1).to_vec(), + module2::DoubleMap::::prefix_for(1).to_vec(), + module2::DoubleMap::::key_for(1, 1), + module2::DoubleMap::::key_for(1, 1).to_vec(), + module2::DoubleMap::::key_for(1, 1).to_vec(), + module2::DoubleMap::::key_for(1, 1).to_vec(), + ] + .iter() + { + assert!(map.insert(key, ()).is_none()) + } + }); } // TODO TODO: check configuration doublemapstorage in instances #[test] fn storage_with_instance_basic_operation() { - with_externalities(&mut new_test_ext(), || { - type Value = module2::Value; - type Map = module2::Map; - type LinkedMap = module2::LinkedMap; - type DoubleMap = module2::DoubleMap; - - assert_eq!(Value::exists(), true); - assert_eq!(Value::get(), 4); - Value::put(1); - assert_eq!(Value::get(), 1); - assert_eq!(Value::take(), 1); - assert_eq!(Value::get(), 0); - Value::mutate(|a| *a=2); - assert_eq!(Value::get(), 2); - Value::kill(); - assert_eq!(Value::exists(), false); - assert_eq!(Value::get(), 0); - - let key = 1; - assert_eq!(Map::exists(0), true); - assert_eq!(Map::exists(key), false); - Map::insert(key, 1); - assert_eq!(Map::get(key), 1); - assert_eq!(Map::take(key), 1); - assert_eq!(Map::get(key), 0); - Map::mutate(key, |a| *a=2); - assert_eq!(Map::get(key), 2); - Map::remove(key); - assert_eq!(Map::exists(key), false); - assert_eq!(Map::get(key), 0); - - assert_eq!(LinkedMap::exists(0), true); - assert_eq!(LinkedMap::exists(key), false); - LinkedMap::insert(key, 1); - assert_eq!(LinkedMap::get(key), 1); - assert_eq!(LinkedMap::take(key), 1); - assert_eq!(LinkedMap::get(key), 0); - LinkedMap::mutate(key, |a| *a=2); - assert_eq!(LinkedMap::get(key), 2); - LinkedMap::remove(key); - assert_eq!(LinkedMap::exists(key), false); - assert_eq!(LinkedMap::get(key), 0); - - let key1 = 1; - let key2 = 1; - assert_eq!(DoubleMap::exists(0, 0), true); - assert_eq!(DoubleMap::exists(key1, key2), false); - DoubleMap::insert(key1, key2, 1); - assert_eq!(DoubleMap::get(key1, key2), 1); - assert_eq!(DoubleMap::take(key1, key2), 1); - assert_eq!(DoubleMap::get(key1, key2), 0); - DoubleMap::mutate(key1, key2, |a| *a=2); - assert_eq!(DoubleMap::get(key1, key2), 2); - DoubleMap::remove(key1, key2); - assert_eq!(DoubleMap::get(key1, key2), 0); - }); + with_externalities(&mut new_test_ext(), || { + type Value = module2::Value; + type Map = module2::Map; + type LinkedMap = module2::LinkedMap; + type DoubleMap = module2::DoubleMap; + + assert_eq!(Value::exists(), true); + assert_eq!(Value::get(), 4); + Value::put(1); + assert_eq!(Value::get(), 1); + assert_eq!(Value::take(), 1); + assert_eq!(Value::get(), 0); + Value::mutate(|a| *a = 2); + assert_eq!(Value::get(), 2); + Value::kill(); + assert_eq!(Value::exists(), false); + assert_eq!(Value::get(), 0); + + let key = 1; + assert_eq!(Map::exists(0), true); + assert_eq!(Map::exists(key), false); + Map::insert(key, 1); + assert_eq!(Map::get(key), 1); + assert_eq!(Map::take(key), 1); + assert_eq!(Map::get(key), 0); + Map::mutate(key, |a| *a = 2); + assert_eq!(Map::get(key), 2); + Map::remove(key); + assert_eq!(Map::exists(key), false); + assert_eq!(Map::get(key), 0); + + assert_eq!(LinkedMap::exists(0), true); + assert_eq!(LinkedMap::exists(key), false); + LinkedMap::insert(key, 1); + assert_eq!(LinkedMap::get(key), 1); + assert_eq!(LinkedMap::take(key), 1); + assert_eq!(LinkedMap::get(key), 0); + LinkedMap::mutate(key, |a| *a = 2); + assert_eq!(LinkedMap::get(key), 2); + LinkedMap::remove(key); + assert_eq!(LinkedMap::exists(key), false); + assert_eq!(LinkedMap::get(key), 0); + + let key1 = 1; + let key2 = 1; + assert_eq!(DoubleMap::exists(0, 0), true); + assert_eq!(DoubleMap::exists(key1, key2), false); + DoubleMap::insert(key1, key2, 1); + assert_eq!(DoubleMap::get(key1, key2), 1); + assert_eq!(DoubleMap::take(key1, key2), 1); + assert_eq!(DoubleMap::get(key1, key2), 0); + DoubleMap::mutate(key1, key2, |a| *a = 2); + assert_eq!(DoubleMap::get(key1, key2), 2); + DoubleMap::remove(key1, key2); + assert_eq!(DoubleMap::get(key1, key2), 0); + }); } diff --git a/srml/system/src/lib.rs b/srml/system/src/lib.rs index f226fcb7d1..ec456efa6d 100644 --- a/srml/system/src/lib.rs +++ b/srml/system/src/lib.rs @@ -15,46 +15,46 @@ // along with Substrate. If not, see . //! # System module -//! +//! //! The system module provides low-level access to core types and cross-cutting utilities. //! It acts as the base layer for other SRML modules to interact with the Substrate framework components. //! To use it in your module, you should ensure your module's trait implies the system [`Trait`]. -//! +//! //! ## Overview -//! +//! //! The system module defines the core data types used in a Substrate runtime. //! It also provides several utility functions (see [`Module`]) for other runtime modules. -//! -//! In addition, it manages the storage items for extrinsics data, indexes, event record and digest items, +//! +//! In addition, it manages the storage items for extrinsics data, indexes, event record and digest items, //! among other things that support the execution of the current block. -//! +//! //! It also handles low level tasks like depositing logs, basic set up and take down of //! temporary storage entries and access to previous block hashes. -//! +//! //! ## Interface -//! +//! //! ### Dispatchable functions -//! +//! //! The system module does not implement any dispatchable functions. -//! +//! //! ### Public functions -//! +//! //! All public functions are available as part of the [`Module`] type. -//! +//! //! ## Usage -//! +//! //! ### Prerequisites -//! +//! //! Import the system module and derive your module's configuration trait from the system trait. -//! +//! //! ### Example - Get random seed and extrinsic count for the current block -//! +//! //! ``` //! use srml_support::{decl_module, dispatch::Result}; //! use srml_system::{self as system, ensure_signed}; -//! +//! //! pub trait Trait: system::Trait {} -//! +//! //! decl_module! { //! pub struct Module for enum Call where origin: T::Origin { //! pub fn system_module_example(origin) -> Result { @@ -70,550 +70,626 @@ #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(feature = "std")] -use serde_derive::Serialize; -use rstd::prelude::*; +use parity_codec::{Decode, Encode}; +use primitives::traits::{ + self, As, BlockNumberToHash, Bounded, CheckEqual, CurrentHeight, Digest as DigestT, + EnsureOrigin, Hash, Lookup, MaybeDisplay, MaybeSerializeDebug, + MaybeSerializeDebugButNotDeserialize, Member, One, SimpleArithmetic, SimpleBitOps, + StaticLookup, Zero, +}; #[cfg(any(feature = "std", test))] use rstd::map; -use primitives::traits::{self, CheckEqual, SimpleArithmetic, SimpleBitOps, Zero, One, Bounded, Lookup, - Hash, Member, MaybeDisplay, EnsureOrigin, Digest as DigestT, As, CurrentHeight, BlockNumberToHash, - MaybeSerializeDebugButNotDeserialize, MaybeSerializeDebug, StaticLookup}; -use substrate_primitives::storage::well_known_keys; -use srml_support::{storage, StorageValue, StorageMap, Parameter, decl_module, decl_event, decl_storage}; +use rstd::prelude::*; use safe_mix::TripletMix; -use parity_codec::{Encode, Decode}; +#[cfg(feature = "std")] +use serde_derive::Serialize; +use srml_support::{ + decl_event, decl_module, decl_storage, storage, Parameter, StorageMap, StorageValue, +}; +use substrate_primitives::storage::well_known_keys; #[cfg(any(feature = "std", test))] -use runtime_io::{twox_128, TestExternalities, Blake2Hasher}; +use runtime_io::{twox_128, Blake2Hasher, TestExternalities}; #[cfg(any(feature = "std", test))] use substrate_primitives::ChangesTrieConfiguration; /// Handler for when a new account has been created. pub trait OnNewAccount { - /// A new account `who` has been registered. - fn on_new_account(who: &AccountId); + /// A new account `who` has been registered. + fn on_new_account(who: &AccountId); } impl OnNewAccount for () { - fn on_new_account(_who: &AccountId) {} + fn on_new_account(_who: &AccountId) {} } /// Determinator to say whether a given account is unused. pub trait IsDeadAccount { - /// Is the given account dead? - fn is_dead_account(who: &AccountId) -> bool; + /// Is the given account dead? + fn is_dead_account(who: &AccountId) -> bool; } impl IsDeadAccount for () { - fn is_dead_account(_who: &AccountId) -> bool { - true - } + fn is_dead_account(_who: &AccountId) -> bool { + true + } } /// Compute the trie root of a list of extrinsics. pub fn extrinsics_root(extrinsics: &[E]) -> H::Output { - extrinsics_data_root::(extrinsics.iter().map(parity_codec::Encode::encode).collect()) + extrinsics_data_root::( + extrinsics + .iter() + .map(parity_codec::Encode::encode) + .collect(), + ) } /// Compute the trie root of a list of extrinsics. pub fn extrinsics_data_root(xts: Vec>) -> H::Output { - let xts = xts.iter().map(Vec::as_slice).collect::>(); - H::enumerated_trie_root(&xts) + let xts = xts.iter().map(Vec::as_slice).collect::>(); + H::enumerated_trie_root(&xts) } pub trait Trait: 'static + Eq + Clone { - /// The aggregated `Origin` type used by dispatchable calls. - type Origin: Into>> + From>; - - /// Account index (aka nonce) type. This stores the number of previous transactions associated with a sender - /// account. - type Index: - Parameter + Member + MaybeSerializeDebugButNotDeserialize + Default + MaybeDisplay + SimpleArithmetic + Copy; - - /// The block number type used by the runtime. - type BlockNumber: - Parameter + Member + MaybeSerializeDebug + MaybeDisplay + SimpleArithmetic + Default + Bounded + Copy - + rstd::hash::Hash; - - /// The output of the `Hashing` function. - type Hash: - Parameter + Member + MaybeSerializeDebug + MaybeDisplay + SimpleBitOps + Default + Copy + CheckEqual - + rstd::hash::Hash + AsRef<[u8]> + AsMut<[u8]>; - - /// The hashing system (algorithm) being used in the runtime (e.g. Blake2). - type Hashing: Hash; - - /// Collection of (light-client-relevant) logs for a block to be included verbatim in the block header. - type Digest: - Parameter + Member + MaybeSerializeDebugButNotDeserialize + Default + traits::Digest; - - /// The user account identifier type for the runtime. - type AccountId: Parameter + Member + MaybeSerializeDebug + MaybeDisplay + Ord + Default; - - /// Converting trait to take a source type and convert to `AccountId`. - /// - /// Used to define the type and conversion mechanism for referencing accounts in transactions. It's perfectly - /// reasonable for this to be an identity conversion (with the source type being `AccountId`), but other modules - /// (e.g. Indices module) may provide more functional/efficient alternatives. - type Lookup: StaticLookup; - - /// The block header. - type Header: Parameter + traits::Header< - Number = Self::BlockNumber, - Hash = Self::Hash, - Digest = Self::Digest - >; - - /// The aggregated event type of the runtime. - type Event: Parameter + Member + From; - - /// A piece of information which can be part of the digest (as a digest item). - type Log: From> + Into>; + /// The aggregated `Origin` type used by dispatchable calls. + type Origin: Into>> + From>; + + /// Account index (aka nonce) type. This stores the number of previous transactions associated with a sender + /// account. + type Index: Parameter + + Member + + MaybeSerializeDebugButNotDeserialize + + Default + + MaybeDisplay + + SimpleArithmetic + + Copy; + + /// The block number type used by the runtime. + type BlockNumber: Parameter + + Member + + MaybeSerializeDebug + + MaybeDisplay + + SimpleArithmetic + + Default + + Bounded + + Copy + + rstd::hash::Hash; + + /// The output of the `Hashing` function. + type Hash: Parameter + + Member + + MaybeSerializeDebug + + MaybeDisplay + + SimpleBitOps + + Default + + Copy + + CheckEqual + + rstd::hash::Hash + + AsRef<[u8]> + + AsMut<[u8]>; + + /// The hashing system (algorithm) being used in the runtime (e.g. Blake2). + type Hashing: Hash; + + /// Collection of (light-client-relevant) logs for a block to be included verbatim in the block header. + type Digest: Parameter + + Member + + MaybeSerializeDebugButNotDeserialize + + Default + + traits::Digest; + + /// The user account identifier type for the runtime. + type AccountId: Parameter + Member + MaybeSerializeDebug + MaybeDisplay + Ord + Default; + + /// Converting trait to take a source type and convert to `AccountId`. + /// + /// Used to define the type and conversion mechanism for referencing accounts in transactions. It's perfectly + /// reasonable for this to be an identity conversion (with the source type being `AccountId`), but other modules + /// (e.g. Indices module) may provide more functional/efficient alternatives. + type Lookup: StaticLookup; + + /// The block header. + type Header: Parameter + + traits::Header< + Number = Self::BlockNumber, + Hash = Self::Hash, + Digest = Self::Digest, + >; + + /// The aggregated event type of the runtime. + type Event: Parameter + Member + From; + + /// A piece of information which can be part of the digest (as a digest item). + type Log: From> + Into>; } pub type DigestItemOf = <::Digest as traits::Digest>::Item; decl_module! { - pub struct Module for enum Call where origin: T::Origin { - /// Deposits an event onto this block's event record. - pub fn deposit_event(event: T::Event) { - let extrinsic_index = Self::extrinsic_index(); - let phase = extrinsic_index.map_or(Phase::Finalization, |c| Phase::ApplyExtrinsic(c)); - let mut events = Self::events(); - events.push(EventRecord { phase, event }); - >::put(events); - } - } + pub struct Module for enum Call where origin: T::Origin { + /// Deposits an event onto this block's event record. + pub fn deposit_event(event: T::Event) { + let extrinsic_index = Self::extrinsic_index(); + let phase = extrinsic_index.map_or(Phase::Finalization, |c| Phase::ApplyExtrinsic(c)); + let mut events = Self::events(); + events.push(EventRecord { phase, event }); + >::put(events); + } + } } /// A phase of a block's execution. #[derive(Encode, Decode)] #[cfg_attr(feature = "std", derive(Serialize, PartialEq, Eq, Clone, Debug))] pub enum Phase { - /// Applying an extrinsic. - ApplyExtrinsic(u32), - /// The end. - Finalization, + /// Applying an extrinsic. + ApplyExtrinsic(u32), + /// The end. + Finalization, } /// Record of an event happening. #[derive(Encode, Decode)] #[cfg_attr(feature = "std", derive(Serialize, PartialEq, Eq, Clone, Debug))] pub struct EventRecord { - /// The phase of the block it happened in. - pub phase: Phase, - /// The event itself. - pub event: E, + /// The phase of the block it happened in. + pub phase: Phase, + /// The event itself. + pub event: E, } decl_event!( - /// Event for the system module. - pub enum Event { - /// An extrinsic completed successfully. - ExtrinsicSuccess, - /// An extrinsic failed. - ExtrinsicFailed, - } + /// Event for the system module. + pub enum Event { + /// An extrinsic completed successfully. + ExtrinsicSuccess, + /// An extrinsic failed. + ExtrinsicFailed, + } ); /// Origin for the system module. #[derive(PartialEq, Eq, Clone)] #[cfg_attr(feature = "std", derive(Debug))] pub enum RawOrigin { - /// The system itself ordained this dispatch to happen: this is the highest privilege level. - Root, - /// It is signed by some public key and we provide the AccountId. - Signed(AccountId), - /// It is signed by nobody but included and agreed upon by the validators anyway: it's "inherently" true. - Inherent, + /// The system itself ordained this dispatch to happen: this is the highest privilege level. + Root, + /// It is signed by some public key and we provide the AccountId. + Signed(AccountId), + /// It is signed by nobody but included and agreed upon by the validators anyway: it's "inherently" true. + Inherent, } impl From> for RawOrigin { - fn from(s: Option) -> RawOrigin { - match s { - Some(who) => RawOrigin::Signed(who), - None => RawOrigin::Inherent, - } - } + fn from(s: Option) -> RawOrigin { + match s { + Some(who) => RawOrigin::Signed(who), + None => RawOrigin::Inherent, + } + } } /// Exposed trait-generic origin type. pub type Origin = RawOrigin<::AccountId>; -pub type Log = RawLog< - ::Hash, ->; +pub type Log = RawLog<::Hash>; /// A logs in this module. #[cfg_attr(feature = "std", derive(Serialize, Debug))] #[derive(Encode, Decode, PartialEq, Eq, Clone)] pub enum RawLog { - /// Changes trie has been computed for this block. Contains the root of - /// changes trie. - ChangesTrieRoot(Hash), + /// Changes trie has been computed for this block. Contains the root of + /// changes trie. + ChangesTrieRoot(Hash), } impl RawLog { - /// Try to cast the log entry as ChangesTrieRoot log entry. - pub fn as_changes_trie_root(&self) -> Option<&Hash> { - match *self { - RawLog::ChangesTrieRoot(ref item) => Some(item), - } - } + /// Try to cast the log entry as ChangesTrieRoot log entry. + pub fn as_changes_trie_root(&self) -> Option<&Hash> { + match *self { + RawLog::ChangesTrieRoot(ref item) => Some(item), + } + } } // Implementation for tests outside of this crate. #[cfg(any(feature = "std", test))] impl From> for primitives::testing::DigestItem { - fn from(log: RawLog) -> primitives::testing::DigestItem { - match log { - RawLog::ChangesTrieRoot(root) => primitives::generic::DigestItem::ChangesTrieRoot(root), - } - } + fn from(log: RawLog) -> primitives::testing::DigestItem { + match log { + RawLog::ChangesTrieRoot(root) => primitives::generic::DigestItem::ChangesTrieRoot(root), + } + } } // Create a Hash with 69 for each byte, // only used to build genesis config. #[cfg(feature = "std")] fn hash69 + Default>() -> T { - let mut h = T::default(); - h.as_mut().iter_mut().for_each(|byte| *byte = 69); - h + let mut h = T::default(); + h.as_mut().iter_mut().for_each(|byte| *byte = 69); + h } decl_storage! { - trait Store for Module as System { - /// Extrinsics nonce for accounts. - pub AccountNonce get(account_nonce): map T::AccountId => T::Index; - /// Total extrinsics count for the current block. - ExtrinsicCount: Option; - /// Total length in bytes for all extrinsics put together, for the current block. - AllExtrinsicsLen: Option; - /// Map of block numbers to block hashes. - pub BlockHash get(block_hash) build(|_| vec![(T::BlockNumber::zero(), hash69())]): map T::BlockNumber => T::Hash; - /// Extrinsics data for the current block (maps extrinsic's index to its data). - ExtrinsicData get(extrinsic_data): map u32 => Vec; - /// Random seed of the current block. - RandomSeed get(random_seed) build(|_| T::Hash::default()): T::Hash; - /// The current block number being processed. Set by `execute_block`. - Number get(block_number) build(|_| T::BlockNumber::sa(1u64)): T::BlockNumber; - /// Hash of the previous block. - ParentHash get(parent_hash) build(|_| hash69()): T::Hash; - /// Extrinsics root of the current block, also part of the block header. - ExtrinsicsRoot get(extrinsics_root): T::Hash; - /// Digest of the current block, also part of the block header. - Digest get(digest): T::Digest; - /// Events deposited for the current block. - Events get(events): Vec>; - } - add_extra_genesis { - config(changes_trie_config): Option; - - build(|storage: &mut primitives::StorageOverlay, _: &mut primitives::ChildrenStorageOverlay, config: &GenesisConfig| { - use parity_codec::Encode; - - storage.insert(well_known_keys::EXTRINSIC_INDEX.to_vec(), 0u32.encode()); - - if let Some(ref changes_trie_config) = config.changes_trie_config { - storage.insert( - well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), - changes_trie_config.encode()); - } - }); - } + trait Store for Module as System { + /// Extrinsics nonce for accounts. + pub AccountNonce get(account_nonce): map T::AccountId => T::Index; + /// Total extrinsics count for the current block. + ExtrinsicCount: Option; + /// Total length in bytes for all extrinsics put together, for the current block. + AllExtrinsicsLen: Option; + /// Map of block numbers to block hashes. + pub BlockHash get(block_hash) build(|_| vec![(T::BlockNumber::zero(), hash69())]): map T::BlockNumber => T::Hash; + /// Extrinsics data for the current block (maps extrinsic's index to its data). + ExtrinsicData get(extrinsic_data): map u32 => Vec; + /// Random seed of the current block. + RandomSeed get(random_seed) build(|_| T::Hash::default()): T::Hash; + /// The current block number being processed. Set by `execute_block`. + Number get(block_number) build(|_| T::BlockNumber::sa(1u64)): T::BlockNumber; + /// Hash of the previous block. + ParentHash get(parent_hash) build(|_| hash69()): T::Hash; + /// Extrinsics root of the current block, also part of the block header. + ExtrinsicsRoot get(extrinsics_root): T::Hash; + /// Digest of the current block, also part of the block header. + Digest get(digest): T::Digest; + /// Events deposited for the current block. + Events get(events): Vec>; + } + add_extra_genesis { + config(changes_trie_config): Option; + + build(|storage: &mut primitives::StorageOverlay, _: &mut primitives::ChildrenStorageOverlay, config: &GenesisConfig| { + use parity_codec::Encode; + + storage.insert(well_known_keys::EXTRINSIC_INDEX.to_vec(), 0u32.encode()); + + if let Some(ref changes_trie_config) = config.changes_trie_config { + storage.insert( + well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), + changes_trie_config.encode()); + } + }); + } } pub struct EnsureRoot(::rstd::marker::PhantomData); impl>>, AccountId> EnsureOrigin for EnsureRoot { - type Success = (); - fn ensure_origin(o: O) -> Result { - ensure_root(o) - } + type Success = (); + fn ensure_origin(o: O) -> Result { + ensure_root(o) + } } /// Ensure that the origin `o` represents a signed extrinsic (i.e. transaction). /// Returns `Ok` with the account that signed the extrinsic or an `Err` otherwise. pub fn ensure_signed(o: OuterOrigin) -> Result - where OuterOrigin: Into>> +where + OuterOrigin: Into>>, { - match o.into() { - Some(RawOrigin::Signed(t)) => Ok(t), - _ => Err("bad origin: expected to be a signed origin"), - } + match o.into() { + Some(RawOrigin::Signed(t)) => Ok(t), + _ => Err("bad origin: expected to be a signed origin"), + } } /// Ensure that the origin `o` represents the root. Returns `Ok` or an `Err` otherwise. pub fn ensure_root(o: OuterOrigin) -> Result<(), &'static str> - where OuterOrigin: Into>> +where + OuterOrigin: Into>>, { - match o.into() { - Some(RawOrigin::Root) => Ok(()), - _ => Err("bad origin: expected to be a root origin"), - } + match o.into() { + Some(RawOrigin::Root) => Ok(()), + _ => Err("bad origin: expected to be a root origin"), + } } /// Ensure that the origin `o` represents an unsigned extrinsic. Returns `Ok` or an `Err` otherwise. pub fn ensure_inherent(o: OuterOrigin) -> Result<(), &'static str> - where OuterOrigin: Into>> +where + OuterOrigin: Into>>, { - match o.into() { - Some(RawOrigin::Inherent) => Ok(()), - _ => Err("bad origin: expected to be an inherent origin"), - } + match o.into() { + Some(RawOrigin::Inherent) => Ok(()), + _ => Err("bad origin: expected to be an inherent origin"), + } } impl Module { - /// Gets the index of extrinsic that is currenty executing. - pub fn extrinsic_index() -> Option { - storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX) - } - - /// Gets extrinsics count. - pub fn extrinsic_count() -> u32 { - >::get().unwrap_or_default() - } - - /// Gets a total length of all executed extrinsics. - pub fn all_extrinsics_len() -> u32 { - >::get().unwrap_or_default() - } - - /// Start the execution of a particular block. - pub fn initialize(number: &T::BlockNumber, parent_hash: &T::Hash, txs_root: &T::Hash) { - // populate environment. - storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); - >::put(number); - >::put(parent_hash); - >::insert(*number - One::one(), parent_hash); - >::put(txs_root); - >::put(Self::calculate_random()); - >::kill(); - } - - /// Remove temporary "environment" entries in storage. - pub fn finalize() -> T::Header { - >::kill(); - >::kill(); - >::kill(); - - let number = >::take(); - let parent_hash = >::take(); - let mut digest = >::take(); - let extrinsics_root = >::take(); - let storage_root = T::Hashing::storage_root(); - let storage_changes_root = T::Hashing::storage_changes_root(parent_hash, number.as_() - 1); - - // we can't compute changes trie root earlier && put it to the Digest - // because it will include all currently existing temporaries - if let Some(storage_changes_root) = storage_changes_root { - let item = RawLog::ChangesTrieRoot(storage_changes_root); - let item = ::Log::from(item).into(); - digest.push(item); - } - - // > stays to be inspected by the client. - - ::new(number, extrinsics_root, storage_root, parent_hash, digest) - } - - /// Deposits a log and ensures it matches the blocks log data. - pub fn deposit_log(item: ::Item) { - let mut l = >::get(); - traits::Digest::push(&mut l, item); - >::put(l); - } - - /// Calculate the current block's random seed. - fn calculate_random() -> T::Hash { - assert!(Self::block_number() > Zero::zero(), "Block number may never be zero"); - (0..81) - .scan( - Self::block_number() - One::one(), - |c, _| { if *c > Zero::zero() { *c -= One::one() }; Some(*c) - }) - .map(Self::block_hash) - .triplet_mix() - } - - /// Get the basic externalities for this module, useful for tests. - #[cfg(any(feature = "std", test))] - pub fn externalities() -> TestExternalities { - TestExternalities::new(map![ - twox_128(&>::key_for(T::BlockNumber::zero())).to_vec() => [69u8; 32].encode(), - twox_128(>::key()).to_vec() => T::BlockNumber::one().encode(), - twox_128(>::key()).to_vec() => [69u8; 32].encode(), - twox_128(>::key()).to_vec() => T::Hash::default().encode() - ]) - } - - /// Set the block number to something in particular. Can be used as an alternative to - /// `initialize` for tests that don't need to bother with the other environment entries. - #[cfg(any(feature = "std", test))] - pub fn set_block_number(n: T::BlockNumber) { - >::put(n); - } - - /// Sets the index of extrinsic that is currenty executing. - #[cfg(any(feature = "std", test))] - pub fn set_extrinsic_index(extrinsic_index: u32) { - storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &extrinsic_index) - } - - /// Set the parent hash number to something in particular. Can be used as an alternative to - /// `initialize` for tests that don't need to bother with the other environment entries. - #[cfg(any(feature = "std", test))] - pub fn set_parent_hash(n: T::Hash) { - >::put(n); - } - - /// Set the random seed to something in particular. Can be used as an alternative to - /// `initialize` for tests that don't need to bother with the other environment entries. - #[cfg(any(feature = "std", test))] - pub fn set_random_seed(seed: T::Hash) { - >::put(seed); - } - - /// Increment a particular account's nonce by 1. - pub fn inc_account_nonce(who: &T::AccountId) { - >::insert(who, Self::account_nonce(who) + T::Index::one()); - } - - /// Note what the extrinsic data of the current extrinsic index is. If this is called, then - /// ensure `derive_extrinsics` is also called before block-building is completed. - /// - /// NOTE this function is called only when the block is being constructed locally. - /// `execute_block` doesn't note any extrinsics. - pub fn note_extrinsic(encoded_xt: Vec) { - >::insert(Self::extrinsic_index().unwrap_or_default(), encoded_xt); - } - - /// To be called immediately after an extrinsic has been applied. - pub fn note_applied_extrinsic(r: &Result<(), &'static str>, encoded_len: u32) { - Self::deposit_event(match r { - Ok(_) => Event::ExtrinsicSuccess, - Err(_) => Event::ExtrinsicFailed, - }.into()); - - let next_extrinsic_index = Self::extrinsic_index().unwrap_or_default() + 1u32; - let total_length = encoded_len.saturating_add(Self::all_extrinsics_len()); - - storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &next_extrinsic_index); - >::put(&total_length); - } - - /// To be called immediately after `note_applied_extrinsic` of the last extrinsic of the block - /// has been called. - pub fn note_finished_extrinsics() { - let extrinsic_index: u32 = storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX).unwrap_or_default(); - >::put(extrinsic_index); - } - - /// Remove all extrinsics data and save the extrinsics trie root. - pub fn derive_extrinsics() { - let extrinsics = (0..>::get().unwrap_or_default()).map(>::take).collect(); - let xts_root = extrinsics_data_root::(extrinsics); - >::put(xts_root); - } + /// Gets the index of extrinsic that is currenty executing. + pub fn extrinsic_index() -> Option { + storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX) + } + + /// Gets extrinsics count. + pub fn extrinsic_count() -> u32 { + >::get().unwrap_or_default() + } + + /// Gets a total length of all executed extrinsics. + pub fn all_extrinsics_len() -> u32 { + >::get().unwrap_or_default() + } + + /// Start the execution of a particular block. + pub fn initialize(number: &T::BlockNumber, parent_hash: &T::Hash, txs_root: &T::Hash) { + // populate environment. + storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); + >::put(number); + >::put(parent_hash); + >::insert(*number - One::one(), parent_hash); + >::put(txs_root); + >::put(Self::calculate_random()); + >::kill(); + } + + /// Remove temporary "environment" entries in storage. + pub fn finalize() -> T::Header { + >::kill(); + >::kill(); + >::kill(); + + let number = >::take(); + let parent_hash = >::take(); + let mut digest = >::take(); + let extrinsics_root = >::take(); + let storage_root = T::Hashing::storage_root(); + let storage_changes_root = T::Hashing::storage_changes_root(parent_hash, number.as_() - 1); + + // we can't compute changes trie root earlier && put it to the Digest + // because it will include all currently existing temporaries + if let Some(storage_changes_root) = storage_changes_root { + let item = RawLog::ChangesTrieRoot(storage_changes_root); + let item = ::Log::from(item).into(); + digest.push(item); + } + + // > stays to be inspected by the client. + + ::new( + number, + extrinsics_root, + storage_root, + parent_hash, + digest, + ) + } + + /// Deposits a log and ensures it matches the blocks log data. + pub fn deposit_log(item: ::Item) { + let mut l = >::get(); + traits::Digest::push(&mut l, item); + >::put(l); + } + + /// Calculate the current block's random seed. + fn calculate_random() -> T::Hash { + assert!( + Self::block_number() > Zero::zero(), + "Block number may never be zero" + ); + (0..81) + .scan(Self::block_number() - One::one(), |c, _| { + if *c > Zero::zero() { + *c -= One::one() + }; + Some(*c) + }) + .map(Self::block_hash) + .triplet_mix() + } + + /// Get the basic externalities for this module, useful for tests. + #[cfg(any(feature = "std", test))] + pub fn externalities() -> TestExternalities { + TestExternalities::new(map![ + twox_128(&>::key_for(T::BlockNumber::zero())).to_vec() => [69u8; 32].encode(), + twox_128(>::key()).to_vec() => T::BlockNumber::one().encode(), + twox_128(>::key()).to_vec() => [69u8; 32].encode(), + twox_128(>::key()).to_vec() => T::Hash::default().encode() + ]) + } + + /// Set the block number to something in particular. Can be used as an alternative to + /// `initialize` for tests that don't need to bother with the other environment entries. + #[cfg(any(feature = "std", test))] + pub fn set_block_number(n: T::BlockNumber) { + >::put(n); + } + + /// Sets the index of extrinsic that is currenty executing. + #[cfg(any(feature = "std", test))] + pub fn set_extrinsic_index(extrinsic_index: u32) { + storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &extrinsic_index) + } + + /// Set the parent hash number to something in particular. Can be used as an alternative to + /// `initialize` for tests that don't need to bother with the other environment entries. + #[cfg(any(feature = "std", test))] + pub fn set_parent_hash(n: T::Hash) { + >::put(n); + } + + /// Set the random seed to something in particular. Can be used as an alternative to + /// `initialize` for tests that don't need to bother with the other environment entries. + #[cfg(any(feature = "std", test))] + pub fn set_random_seed(seed: T::Hash) { + >::put(seed); + } + + /// Increment a particular account's nonce by 1. + pub fn inc_account_nonce(who: &T::AccountId) { + >::insert(who, Self::account_nonce(who) + T::Index::one()); + } + + /// Note what the extrinsic data of the current extrinsic index is. If this is called, then + /// ensure `derive_extrinsics` is also called before block-building is completed. + /// + /// NOTE this function is called only when the block is being constructed locally. + /// `execute_block` doesn't note any extrinsics. + pub fn note_extrinsic(encoded_xt: Vec) { + >::insert(Self::extrinsic_index().unwrap_or_default(), encoded_xt); + } + + /// To be called immediately after an extrinsic has been applied. + pub fn note_applied_extrinsic(r: &Result<(), &'static str>, encoded_len: u32) { + Self::deposit_event( + match r { + Ok(_) => Event::ExtrinsicSuccess, + Err(_) => Event::ExtrinsicFailed, + } + .into(), + ); + + let next_extrinsic_index = Self::extrinsic_index().unwrap_or_default() + 1u32; + let total_length = encoded_len.saturating_add(Self::all_extrinsics_len()); + + storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &next_extrinsic_index); + >::put(&total_length); + } + + /// To be called immediately after `note_applied_extrinsic` of the last extrinsic of the block + /// has been called. + pub fn note_finished_extrinsics() { + let extrinsic_index: u32 = + storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX).unwrap_or_default(); + >::put(extrinsic_index); + } + + /// Remove all extrinsics data and save the extrinsics trie root. + pub fn derive_extrinsics() { + let extrinsics = (0..>::get().unwrap_or_default()) + .map(>::take) + .collect(); + let xts_root = extrinsics_data_root::(extrinsics); + >::put(xts_root); + } } pub struct ChainContext(::rstd::marker::PhantomData); impl Default for ChainContext { - fn default() -> Self { - ChainContext(::rstd::marker::PhantomData) - } + fn default() -> Self { + ChainContext(::rstd::marker::PhantomData) + } } impl Lookup for ChainContext { - type Source = ::Source; - type Target = ::Target; - fn lookup(&self, s: Self::Source) -> rstd::result::Result { - ::lookup(s) - } + type Source = ::Source; + type Target = ::Target; + fn lookup(&self, s: Self::Source) -> rstd::result::Result { + ::lookup(s) + } } impl CurrentHeight for ChainContext { - type BlockNumber = T::BlockNumber; - fn current_height(&self) -> Self::BlockNumber { - >::block_number() - } + type BlockNumber = T::BlockNumber; + fn current_height(&self) -> Self::BlockNumber { + >::block_number() + } } impl BlockNumberToHash for ChainContext { - type BlockNumber = T::BlockNumber; - type Hash = T::Hash; - fn block_number_to_hash(&self, n: Self::BlockNumber) -> Option { - Some(>::block_hash(n)) - } + type BlockNumber = T::BlockNumber; + type Hash = T::Hash; + fn block_number_to_hash(&self, n: Self::BlockNumber) -> Option { + Some(>::block_hash(n)) + } } #[cfg(test)] mod tests { - use super::*; - use runtime_io::with_externalities; - use substrate_primitives::H256; - use primitives::BuildStorage; - use primitives::traits::{BlakeTwo256, IdentityLookup}; - use primitives::testing::{Digest, DigestItem, Header}; - use srml_support::impl_outer_origin; - - impl_outer_origin!{ - pub enum Origin for Test where system = super {} - } - - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - impl Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type Digest = Digest; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = u16; - type Log = DigestItem; - } - - impl From for u16 { - fn from(e: Event) -> u16 { - match e { - Event::ExtrinsicSuccess => 100, - Event::ExtrinsicFailed => 101, - } - } - } - - type System = Module; - - fn new_test_ext() -> runtime_io::TestExternalities { - GenesisConfig::::default().build_storage().unwrap().0.into() - } - - #[test] - fn deposit_event_should_work() { - with_externalities(&mut new_test_ext(), || { - System::initialize(&1, &[0u8; 32].into(), &[0u8; 32].into()); - System::note_finished_extrinsics(); - System::deposit_event(1u16); - System::finalize(); - assert_eq!(System::events(), vec![EventRecord { phase: Phase::Finalization, event: 1u16 }]); - - System::initialize(&2, &[0u8; 32].into(), &[0u8; 32].into()); - System::deposit_event(42u16); - System::note_applied_extrinsic(&Ok(()), 0); - System::note_applied_extrinsic(&Err(""), 0); - System::note_finished_extrinsics(); - System::deposit_event(3u16); - System::finalize(); - assert_eq!(System::events(), vec![ - EventRecord { phase: Phase::ApplyExtrinsic(0), event: 42u16 }, - EventRecord { phase: Phase::ApplyExtrinsic(0), event: 100u16 }, - EventRecord { phase: Phase::ApplyExtrinsic(1), event: 101u16 }, - EventRecord { phase: Phase::Finalization, event: 3u16 } - ]); - }); - } + use super::*; + use primitives::testing::{Digest, DigestItem, Header}; + use primitives::traits::{BlakeTwo256, IdentityLookup}; + use primitives::BuildStorage; + use runtime_io::with_externalities; + use srml_support::impl_outer_origin; + use substrate_primitives::H256; + + impl_outer_origin! { + pub enum Origin for Test where system = super {} + } + + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + impl Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type Digest = Digest; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = u16; + type Log = DigestItem; + } + + impl From for u16 { + fn from(e: Event) -> u16 { + match e { + Event::ExtrinsicSuccess => 100, + Event::ExtrinsicFailed => 101, + } + } + } + + type System = Module; + + fn new_test_ext() -> runtime_io::TestExternalities { + GenesisConfig::::default() + .build_storage() + .unwrap() + .0 + .into() + } + + #[test] + fn deposit_event_should_work() { + with_externalities(&mut new_test_ext(), || { + System::initialize(&1, &[0u8; 32].into(), &[0u8; 32].into()); + System::note_finished_extrinsics(); + System::deposit_event(1u16); + System::finalize(); + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Finalization, + event: 1u16 + }] + ); + + System::initialize(&2, &[0u8; 32].into(), &[0u8; 32].into()); + System::deposit_event(42u16); + System::note_applied_extrinsic(&Ok(()), 0); + System::note_applied_extrinsic(&Err(""), 0); + System::note_finished_extrinsics(); + System::deposit_event(3u16); + System::finalize(); + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: 42u16 + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: 100u16 + }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: 101u16 + }, + EventRecord { + phase: Phase::Finalization, + event: 3u16 + } + ] + ); + }); + } } diff --git a/srml/timestamp/src/lib.rs b/srml/timestamp/src/lib.rs index 43cf669e7e..d51e0d06e7 100644 --- a/srml/timestamp/src/lib.rs +++ b/srml/timestamp/src/lib.rs @@ -15,49 +15,49 @@ // along with Substrate. If not, see . //! # Timestamp Module -//! -//! The timestamp module provides functionality to get and set the on-chain time. -//! To use it in your module, you need to implement the timestamp [`Trait`]. +//! +//! The timestamp module provides functionality to get and set the on-chain time. +//! To use it in your module, you need to implement the timestamp [`Trait`]. //! The supported dispatchable functions are documented as part of the [`Call`] enum. -//! +//! //! ## Overview -//! -//! The timestamp module allows the validators to set and validate a timestamp with each block. //! -//! It uses inherents for timestamp data, which is provided by the block author and validated/verified by other validators. +//! The timestamp module allows the validators to set and validate a timestamp with each block. +//! +//! It uses inherents for timestamp data, which is provided by the block author and validated/verified by other validators. //! The timestamp can be set only once per block and must be set each block. There could be a constraint on how much time must pass before setting the new timestamp. -//! -//! **NOTE:** The timestamp module is the recommended way to query the on-chain time instead of using an approach based on block numbers. +//! +//! **NOTE:** The timestamp module is the recommended way to query the on-chain time instead of using an approach based on block numbers. //! The block numbers based time measurement can cause issues because of cummulative calculation errors and hence it should be avoided. -//! +//! //! ## Interface -//! +//! //! ### Dispatchable functions ([`Call`]) -//! +//! //! * `set` - Sets the current time. -//! +//! //! ### Public functions ([`Module`]) -//! +//! //! * `get` - Gets the current time for the current block. If this function is called prior the setting to timestamp, it will return the timestamp of the previous block. -//! +//! //! * `minimum_period` - Gets the minimum (and advised) period between blocks for the chain. -//! +//! //! ## Usage -//! +//! //! The following example shows how to use the timestamp module in your custom module to query the current timestamp. -//! +//! //! ### Prerequisites -//! +//! //! Import the `timestamp` module in your custom module and derive the module configuration trait from the `timestamp` trait. -//! +//! //! ### Get current timestamp -//! +//! //! ```ignore //! use support::{decl_module, dispatch::Result}; //! use system::ensure_signed; -//! +//! //! pub trait Trait: timestamp::Trait {} -//! +//! //! decl_module! { //! pub struct Module for enum Call where origin: T::Origin { //! pub fn get_time(origin) -> Result { @@ -68,30 +68,34 @@ //! } //! } //! ``` -//! +//! //! ### Example from SRML -//! +//! //! The [`Session` module](https://github.com/paritytech/substrate/blob/master/srml/session/src/lib.rs) uses the `timestamp` module for session management. -//! +//! //! ## Related Modules -//! +//! //! * [`System`](https://crates.parity.io/srml_system/index.html) //! * [`Session`](https://crates.parity.io/srml_session/index.html) //! #![cfg_attr(not(feature = "std"), no_std)] -use parity_codec::Encode; -#[cfg(feature = "std")] -use parity_codec::Decode; #[cfg(feature = "std")] use inherents::ProvideInherentData; -use srml_support::{StorageValue, Parameter, decl_storage, decl_module}; -use srml_support::for_each_tuple; +use inherents::{InherentData, InherentIdentifier, IsFatalError, ProvideInherent, RuntimeString}; +#[cfg(feature = "std")] +use parity_codec::Decode; +use parity_codec::Encode; +use rstd::{ + cmp, + ops::{Div, Mul}, + result, +}; use runtime_primitives::traits::{As, SimpleArithmetic, Zero}; +use srml_support::for_each_tuple; +use srml_support::{decl_module, decl_storage, Parameter, StorageValue}; use system::ensure_inherent; -use rstd::{result, ops::{Mul, Div}, cmp}; -use inherents::{RuntimeString, InherentIdentifier, ProvideInherent, IsFatalError, InherentData}; /// The identifier for the `timestamp` inherent. pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"timstap0"; @@ -102,45 +106,45 @@ pub type InherentType = u64; #[derive(Encode)] #[cfg_attr(feature = "std", derive(Debug, Decode))] pub enum InherentError { - /// The timestamp is valid in the future. - /// This is a non-fatal-error and will not stop checking the inherents. - ValidAtTimestamp(InherentType), - /// Some other error. - Other(RuntimeString), + /// The timestamp is valid in the future. + /// This is a non-fatal-error and will not stop checking the inherents. + ValidAtTimestamp(InherentType), + /// Some other error. + Other(RuntimeString), } impl IsFatalError for InherentError { - fn is_fatal_error(&self) -> bool { - match self { - InherentError::ValidAtTimestamp(_) => false, - InherentError::Other(_) => true, - } - } + fn is_fatal_error(&self) -> bool { + match self { + InherentError::ValidAtTimestamp(_) => false, + InherentError::Other(_) => true, + } + } } impl InherentError { - /// Try to create an instance ouf of the given identifier and data. - #[cfg(feature = "std")] - pub fn try_from(id: &InherentIdentifier, data: &[u8]) -> Option { - if id == &INHERENT_IDENTIFIER { - ::decode(&mut &data[..]) - } else { - None - } - } + /// Try to create an instance ouf of the given identifier and data. + #[cfg(feature = "std")] + pub fn try_from(id: &InherentIdentifier, data: &[u8]) -> Option { + if id == &INHERENT_IDENTIFIER { + ::decode(&mut &data[..]) + } else { + None + } + } } /// Auxiliary trait to extract timestamp inherent data. pub trait TimestampInherentData { - /// Get timestamp inherent data. - fn timestamp_inherent_data(&self) -> Result; + /// Get timestamp inherent data. + fn timestamp_inherent_data(&self) -> Result; } impl TimestampInherentData for InherentData { - fn timestamp_inherent_data(&self) -> Result { - self.get_data(&INHERENT_IDENTIFIER) - .and_then(|r| r.ok_or_else(|| "Timestamp inherent data not found".into())) - } + fn timestamp_inherent_data(&self) -> Result { + self.get_data(&INHERENT_IDENTIFIER) + .and_then(|r| r.ok_or_else(|| "Timestamp inherent data not found".into())) + } } #[cfg(feature = "std")] @@ -148,31 +152,30 @@ pub struct InherentDataProvider; #[cfg(feature = "std")] impl ProvideInherentData for InherentDataProvider { - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &INHERENT_IDENTIFIER - } - - fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), RuntimeString> { - use std::time::SystemTime; - - let now = SystemTime::now(); - now.duration_since(SystemTime::UNIX_EPOCH) - .map_err(|_| { - "Current time is before unix epoch".into() - }).and_then(|d| { - let duration: InherentType = d.as_secs(); - inherent_data.put_data(INHERENT_IDENTIFIER, &duration) - }) - } - - fn error_to_string(&self, error: &[u8]) -> Option { - InherentError::try_from(&INHERENT_IDENTIFIER, error).map(|e| format!("{:?}", e)) - } + fn inherent_identifier(&self) -> &'static InherentIdentifier { + &INHERENT_IDENTIFIER + } + + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), RuntimeString> { + use std::time::SystemTime; + + let now = SystemTime::now(); + now.duration_since(SystemTime::UNIX_EPOCH) + .map_err(|_| "Current time is before unix epoch".into()) + .and_then(|d| { + let duration: InherentType = d.as_secs(); + inherent_data.put_data(INHERENT_IDENTIFIER, &duration) + }) + } + + fn error_to_string(&self, error: &[u8]) -> Option { + InherentError::try_from(&INHERENT_IDENTIFIER, error).map(|e| format!("{:?}", e)) + } } /// A trait which is called when the timestamp is set. pub trait OnTimestampSet { - fn on_timestamp_set(moment: Moment); + fn on_timestamp_set(moment: Moment); } macro_rules! impl_timestamp_set { @@ -195,205 +198,230 @@ for_each_tuple!(impl_timestamp_set); /// The module configuration trait pub trait Trait: system::Trait { - /// Type used for expressing timestamp. - type Moment: Parameter + Default + SimpleArithmetic - + Mul - + Div; - - /// Something which can be notified when the timestamp is set. Set this to `()` if not needed. - type OnTimestampSet: OnTimestampSet; + /// Type used for expressing timestamp. + type Moment: Parameter + + Default + + SimpleArithmetic + + Mul + + Div; + + /// Something which can be notified when the timestamp is set. Set this to `()` if not needed. + type OnTimestampSet: OnTimestampSet; } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - /// Set the current time. - /// - /// This call should be invoked exactly once per block. It will panic at the finalization phase, - /// if this call hasn't been invoked by that time. - /// - /// The timestamp should be greater than the previous one by the amount specified by `minimum_period`. - /// - /// The dispatch origin for this call must be `Inherent`. - fn set(origin, #[compact] now: T::Moment) { - ensure_inherent(origin)?; - assert!(!::DidUpdate::exists(), "Timestamp must be updated only once in the block"); - assert!( - Self::now().is_zero() || now >= Self::now() + >::get(), - "Timestamp must increment by at least between sequential blocks" - ); - ::Now::put(now.clone()); - ::DidUpdate::put(true); - - >::on_timestamp_set(now); - } - - // Manage upgrade. Remove after all networks upgraded. - // TODO: #2133 - fn on_initialize() { - if let Some(period) = >::take() { - if !>::exists() { - >::put(period) - } - } - } - - fn on_finalize() { - assert!(::DidUpdate::take(), "Timestamp must be updated once in the block"); - } - } + pub struct Module for enum Call where origin: T::Origin { + /// Set the current time. + /// + /// This call should be invoked exactly once per block. It will panic at the finalization phase, + /// if this call hasn't been invoked by that time. + /// + /// The timestamp should be greater than the previous one by the amount specified by `minimum_period`. + /// + /// The dispatch origin for this call must be `Inherent`. + fn set(origin, #[compact] now: T::Moment) { + ensure_inherent(origin)?; + assert!(!::DidUpdate::exists(), "Timestamp must be updated only once in the block"); + assert!( + Self::now().is_zero() || now >= Self::now() + >::get(), + "Timestamp must increment by at least between sequential blocks" + ); + ::Now::put(now.clone()); + ::DidUpdate::put(true); + + >::on_timestamp_set(now); + } + + // Manage upgrade. Remove after all networks upgraded. + // TODO: #2133 + fn on_initialize() { + if let Some(period) = >::take() { + if !>::exists() { + >::put(period) + } + } + } + + fn on_finalize() { + assert!(::DidUpdate::take(), "Timestamp must be updated once in the block"); + } + } } decl_storage! { - trait Store for Module as Timestamp { - /// Current time for the current block. - pub Now get(now) build(|_| T::Moment::sa(0)): T::Moment; - - /// Old storage item provided for compatibility. Remove after all networks upgraded. - // TODO: #2133 - pub BlockPeriod: Option; - - /// The minimum period between blocks. Beware that this is different to the *expected* period - /// that the block production apparatus provides. Your chosen consensus system will generally - /// work with this to determine a sensible block time. e.g. For Aura, it will be double this - /// period on default settings. - pub MinimumPeriod get(minimum_period) config(): T::Moment = T::Moment::sa(3); - - /// Did the timestamp get updated in this block? - DidUpdate: bool; - } + trait Store for Module as Timestamp { + /// Current time for the current block. + pub Now get(now) build(|_| T::Moment::sa(0)): T::Moment; + + /// Old storage item provided for compatibility. Remove after all networks upgraded. + // TODO: #2133 + pub BlockPeriod: Option; + + /// The minimum period between blocks. Beware that this is different to the *expected* period + /// that the block production apparatus provides. Your chosen consensus system will generally + /// work with this to determine a sensible block time. e.g. For Aura, it will be double this + /// period on default settings. + pub MinimumPeriod get(minimum_period) config(): T::Moment = T::Moment::sa(3); + + /// Did the timestamp get updated in this block? + DidUpdate: bool; + } } impl Module { - /// Get the current time for the current block. - /// - /// NOTE: if this function is called prior to setting the timestamp, - /// it will return the timestamp of the previous block. - pub fn get() -> T::Moment { - Self::now() - } - - /// Set the timestamp to something in particular. Only used for tests. - #[cfg(feature = "std")] - pub fn set_timestamp(now: T::Moment) { - ::Now::put(now); - } + /// Get the current time for the current block. + /// + /// NOTE: if this function is called prior to setting the timestamp, + /// it will return the timestamp of the previous block. + pub fn get() -> T::Moment { + Self::now() + } + + /// Set the timestamp to something in particular. Only used for tests. + #[cfg(feature = "std")] + pub fn set_timestamp(now: T::Moment) { + ::Now::put(now); + } } fn extract_inherent_data(data: &InherentData) -> Result { - data.get_data::(&INHERENT_IDENTIFIER) - .map_err(|_| RuntimeString::from("Invalid timestamp inherent data encoding."))? - .ok_or_else(|| "Timestamp inherent data is not provided.".into()) + data.get_data::(&INHERENT_IDENTIFIER) + .map_err(|_| RuntimeString::from("Invalid timestamp inherent data encoding."))? + .ok_or_else(|| "Timestamp inherent data is not provided.".into()) } impl ProvideInherent for Module { - type Call = Call; - type Error = InherentError; - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - - fn create_inherent(data: &InherentData) -> Option { - let data = extract_inherent_data(data).expect("Gets and decodes timestamp inherent data"); - - let next_time = cmp::max(As::sa(data), Self::now() + >::get()); - Some(Call::set(next_time.into())) - } - - fn check_inherent(call: &Self::Call, data: &InherentData) -> result::Result<(), Self::Error> { - const MAX_TIMESTAMP_DRIFT: u64 = 60; - - let t = match call { - Call::set(ref t) => t.clone(), - _ => return Ok(()), - }.as_(); - - let data = extract_inherent_data(data).map_err(|e| InherentError::Other(e))?; - - let minimum = (Self::now() + >::get()).as_(); - if t > data + MAX_TIMESTAMP_DRIFT { - Err(InherentError::Other("Timestamp too far in future to accept".into())) - } else if t < minimum { - Err(InherentError::ValidAtTimestamp(minimum)) - } else { - Ok(()) - } - } + type Call = Call; + type Error = InherentError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(data: &InherentData) -> Option { + let data = extract_inherent_data(data).expect("Gets and decodes timestamp inherent data"); + + let next_time = cmp::max(As::sa(data), Self::now() + >::get()); + Some(Call::set(next_time.into())) + } + + fn check_inherent(call: &Self::Call, data: &InherentData) -> result::Result<(), Self::Error> { + const MAX_TIMESTAMP_DRIFT: u64 = 60; + + let t = match call { + Call::set(ref t) => t.clone(), + _ => return Ok(()), + } + .as_(); + + let data = extract_inherent_data(data).map_err(|e| InherentError::Other(e))?; + + let minimum = (Self::now() + >::get()).as_(); + if t > data + MAX_TIMESTAMP_DRIFT { + Err(InherentError::Other( + "Timestamp too far in future to accept".into(), + )) + } else if t < minimum { + Err(InherentError::ValidAtTimestamp(minimum)) + } else { + Ok(()) + } + } } #[cfg(test)] mod tests { - use super::*; - - use srml_support::{impl_outer_origin, assert_ok}; - use runtime_io::{with_externalities, TestExternalities}; - use substrate_primitives::H256; - use runtime_primitives::BuildStorage; - use runtime_primitives::traits::{BlakeTwo256, IdentityLookup}; - use runtime_primitives::testing::{Digest, DigestItem, Header}; - - impl_outer_origin! { - pub enum Origin for Test {} - } - - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - impl system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type Digest = Digest; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type Log = DigestItem; - } - impl Trait for Test { - type Moment = u64; - type OnTimestampSet = (); - } - type Timestamp = Module; - - #[test] - fn timestamp_works() { - let mut t = system::GenesisConfig::::default().build_storage().unwrap().0; - t.extend(GenesisConfig:: { - minimum_period: 5, - }.build_storage().unwrap().0); - - with_externalities(&mut TestExternalities::new(t), || { - Timestamp::set_timestamp(42); - assert_ok!(Timestamp::dispatch(Call::set(69), Origin::INHERENT)); - assert_eq!(Timestamp::now(), 69); - }); - } - - #[test] - #[should_panic(expected = "Timestamp must be updated only once in the block")] - fn double_timestamp_should_fail() { - let mut t = system::GenesisConfig::::default().build_storage().unwrap().0; - t.extend(GenesisConfig:: { - minimum_period: 5, - }.build_storage().unwrap().0); - - with_externalities(&mut TestExternalities::new(t), || { - Timestamp::set_timestamp(42); - assert_ok!(Timestamp::dispatch(Call::set(69), Origin::INHERENT)); - let _ = Timestamp::dispatch(Call::set(70), Origin::INHERENT); - }); - } - - #[test] - #[should_panic(expected = "Timestamp must increment by at least between sequential blocks")] - fn block_period_minimum_enforced() { - let mut t = system::GenesisConfig::::default().build_storage().unwrap().0; - t.extend(GenesisConfig:: { - minimum_period: 5, - }.build_storage().unwrap().0); - - with_externalities(&mut TestExternalities::new(t), || { - Timestamp::set_timestamp(42); - let _ = Timestamp::dispatch(Call::set(46), Origin::INHERENT); - }); - } + use super::*; + + use runtime_io::{with_externalities, TestExternalities}; + use runtime_primitives::testing::{Digest, DigestItem, Header}; + use runtime_primitives::traits::{BlakeTwo256, IdentityLookup}; + use runtime_primitives::BuildStorage; + use srml_support::{assert_ok, impl_outer_origin}; + use substrate_primitives::H256; + + impl_outer_origin! { + pub enum Origin for Test {} + } + + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + impl system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type Digest = Digest; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type Log = DigestItem; + } + impl Trait for Test { + type Moment = u64; + type OnTimestampSet = (); + } + type Timestamp = Module; + + #[test] + fn timestamp_works() { + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0; + t.extend( + GenesisConfig:: { minimum_period: 5 } + .build_storage() + .unwrap() + .0, + ); + + with_externalities(&mut TestExternalities::new(t), || { + Timestamp::set_timestamp(42); + assert_ok!(Timestamp::dispatch(Call::set(69), Origin::INHERENT)); + assert_eq!(Timestamp::now(), 69); + }); + } + + #[test] + #[should_panic(expected = "Timestamp must be updated only once in the block")] + fn double_timestamp_should_fail() { + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0; + t.extend( + GenesisConfig:: { minimum_period: 5 } + .build_storage() + .unwrap() + .0, + ); + + with_externalities(&mut TestExternalities::new(t), || { + Timestamp::set_timestamp(42); + assert_ok!(Timestamp::dispatch(Call::set(69), Origin::INHERENT)); + let _ = Timestamp::dispatch(Call::set(70), Origin::INHERENT); + }); + } + + #[test] + #[should_panic( + expected = "Timestamp must increment by at least between sequential blocks" + )] + fn block_period_minimum_enforced() { + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0; + t.extend( + GenesisConfig:: { minimum_period: 5 } + .build_storage() + .unwrap() + .0, + ); + + with_externalities(&mut TestExternalities::new(t), || { + Timestamp::set_timestamp(42); + let _ = Timestamp::dispatch(Call::set(46), Origin::INHERENT); + }); + } } diff --git a/srml/treasury/src/lib.rs b/srml/treasury/src/lib.rs index b96928d7f8..9b468f989e 100644 --- a/srml/treasury/src/lib.rs +++ b/srml/treasury/src/lib.rs @@ -18,155 +18,160 @@ #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(feature = "std")] -use serde_derive::{Serialize, Deserialize}; +use parity_codec::{Decode, Encode}; use rstd::prelude::*; -use srml_support::{StorageValue, StorageMap, decl_module, decl_storage, decl_event, ensure}; -use srml_support::traits::{Currency, ReservableCurrency, OnDilution, OnUnbalanced, Imbalance}; -use runtime_primitives::{Permill, traits::{Zero, EnsureOrigin, StaticLookup}}; -use parity_codec::{Encode, Decode}; +use runtime_primitives::{ + traits::{EnsureOrigin, StaticLookup, Zero}, + Permill, +}; +#[cfg(feature = "std")] +use serde_derive::{Deserialize, Serialize}; +use srml_support::traits::{Currency, Imbalance, OnDilution, OnUnbalanced, ReservableCurrency}; +use srml_support::{decl_event, decl_module, decl_storage, ensure, StorageMap, StorageValue}; use system::ensure_signed; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type PositiveImbalanceOf = <::Currency as Currency<::AccountId>>::PositiveImbalance; -type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +type PositiveImbalanceOf = + <::Currency as Currency<::AccountId>>::PositiveImbalance; +type NegativeImbalanceOf = + <::Currency as Currency<::AccountId>>::NegativeImbalance; pub trait Trait: system::Trait { - /// The staking balance. - type Currency: Currency + ReservableCurrency; + /// The staking balance. + type Currency: Currency + ReservableCurrency; - /// Origin from which approvals must come. - type ApproveOrigin: EnsureOrigin; + /// Origin from which approvals must come. + type ApproveOrigin: EnsureOrigin; - /// Origin from which rejections must come. - type RejectOrigin: EnsureOrigin; + /// Origin from which rejections must come. + type RejectOrigin: EnsureOrigin; - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The overarching event type. + type Event: From> + Into<::Event>; - /// Handler for the unbalanced increase when minting cash from the "Pot". - type MintedForSpending: OnUnbalanced>; + /// Handler for the unbalanced increase when minting cash from the "Pot". + type MintedForSpending: OnUnbalanced>; - /// Handler for the unbalanced decrease when slashing for a rejected proposal. - type ProposalRejection: OnUnbalanced>; + /// Handler for the unbalanced decrease when slashing for a rejected proposal. + type ProposalRejection: OnUnbalanced>; } type ProposalIndex = u32; decl_module! { - pub struct Module for enum Call where origin: T::Origin { - fn deposit_event() = default; - /// Put forward a suggestion for spending. A deposit proportional to the value - /// is reserved and slashed if the proposal is rejected. It is returned once the - /// proposal is awarded. - fn propose_spend( - origin, - #[compact] value: BalanceOf, - beneficiary: ::Source - ) { - let proposer = ensure_signed(origin)?; - let beneficiary = T::Lookup::lookup(beneficiary)?; - - let bond = Self::calculate_bond(value); - T::Currency::reserve(&proposer, bond) - .map_err(|_| "Proposer's balance too low")?; - - let c = Self::proposal_count(); - >::put(c + 1); - >::insert(c, Proposal { proposer, value, beneficiary, bond }); - - Self::deposit_event(RawEvent::Proposed(c)); - } - - /// Set the balance of funds available to spend. - fn set_pot(#[compact] new_pot: BalanceOf) { - // Put the new value into storage. - >::put(new_pot); - } - - /// (Re-)configure this module. - fn configure( - #[compact] proposal_bond: Permill, - #[compact] proposal_bond_minimum: BalanceOf, - #[compact] spend_period: T::BlockNumber, - #[compact] burn: Permill - ) { - >::put(proposal_bond); - >::put(proposal_bond_minimum); - >::put(spend_period); - >::put(burn); - } - - /// Reject a proposed spend. The original deposit will be slashed. - fn reject_proposal(origin, #[compact] proposal_id: ProposalIndex) { - T::RejectOrigin::ensure_origin(origin)?; - let proposal = >::take(proposal_id).ok_or("No proposal at that index")?; - - let value = proposal.bond; - let imbalance = T::Currency::slash_reserved(&proposal.proposer, value).0; - T::ProposalRejection::on_unbalanced(imbalance); - } - - /// Approve a proposal. At a later time, the proposal will be allocated to the beneficiary - /// and the original deposit will be returned. - fn approve_proposal(origin, #[compact] proposal_id: ProposalIndex) { - T::ApproveOrigin::ensure_origin(origin)?; - - ensure!(>::exists(proposal_id), "No proposal at that index"); - - >::mutate(|v| v.push(proposal_id)); - } - - fn on_finalize(n: T::BlockNumber) { - // Check to see if we should spend some funds! - if (n % Self::spend_period()).is_zero() { - Self::spend_funds(); - } - } - } + pub struct Module for enum Call where origin: T::Origin { + fn deposit_event() = default; + /// Put forward a suggestion for spending. A deposit proportional to the value + /// is reserved and slashed if the proposal is rejected. It is returned once the + /// proposal is awarded. + fn propose_spend( + origin, + #[compact] value: BalanceOf, + beneficiary: ::Source + ) { + let proposer = ensure_signed(origin)?; + let beneficiary = T::Lookup::lookup(beneficiary)?; + + let bond = Self::calculate_bond(value); + T::Currency::reserve(&proposer, bond) + .map_err(|_| "Proposer's balance too low")?; + + let c = Self::proposal_count(); + >::put(c + 1); + >::insert(c, Proposal { proposer, value, beneficiary, bond }); + + Self::deposit_event(RawEvent::Proposed(c)); + } + + /// Set the balance of funds available to spend. + fn set_pot(#[compact] new_pot: BalanceOf) { + // Put the new value into storage. + >::put(new_pot); + } + + /// (Re-)configure this module. + fn configure( + #[compact] proposal_bond: Permill, + #[compact] proposal_bond_minimum: BalanceOf, + #[compact] spend_period: T::BlockNumber, + #[compact] burn: Permill + ) { + >::put(proposal_bond); + >::put(proposal_bond_minimum); + >::put(spend_period); + >::put(burn); + } + + /// Reject a proposed spend. The original deposit will be slashed. + fn reject_proposal(origin, #[compact] proposal_id: ProposalIndex) { + T::RejectOrigin::ensure_origin(origin)?; + let proposal = >::take(proposal_id).ok_or("No proposal at that index")?; + + let value = proposal.bond; + let imbalance = T::Currency::slash_reserved(&proposal.proposer, value).0; + T::ProposalRejection::on_unbalanced(imbalance); + } + + /// Approve a proposal. At a later time, the proposal will be allocated to the beneficiary + /// and the original deposit will be returned. + fn approve_proposal(origin, #[compact] proposal_id: ProposalIndex) { + T::ApproveOrigin::ensure_origin(origin)?; + + ensure!(>::exists(proposal_id), "No proposal at that index"); + + >::mutate(|v| v.push(proposal_id)); + } + + fn on_finalize(n: T::BlockNumber) { + // Check to see if we should spend some funds! + if (n % Self::spend_period()).is_zero() { + Self::spend_funds(); + } + } + } } /// A spending proposal. #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))] #[derive(Encode, Decode, Clone, PartialEq, Eq)] pub struct Proposal { - proposer: AccountId, - value: Balance, - beneficiary: AccountId, - bond: Balance, + proposer: AccountId, + value: Balance, + beneficiary: AccountId, + bond: Balance, } decl_storage! { - trait Store for Module as Treasury { - // Config... + trait Store for Module as Treasury { + // Config... - /// Proportion of funds that should be bonded in order to place a proposal. An accepted - /// proposal gets these back. A rejected proposal doesn't. - ProposalBond get(proposal_bond) config(): Permill; + /// Proportion of funds that should be bonded in order to place a proposal. An accepted + /// proposal gets these back. A rejected proposal doesn't. + ProposalBond get(proposal_bond) config(): Permill; - /// Minimum amount of funds that should be placed in a deposit for making a proposal. - ProposalBondMinimum get(proposal_bond_minimum) config(): BalanceOf; + /// Minimum amount of funds that should be placed in a deposit for making a proposal. + ProposalBondMinimum get(proposal_bond_minimum) config(): BalanceOf; - /// Period between successive spends. - SpendPeriod get(spend_period) config(): T::BlockNumber = runtime_primitives::traits::One::one(); + /// Period between successive spends. + SpendPeriod get(spend_period) config(): T::BlockNumber = runtime_primitives::traits::One::one(); - /// Percentage of spare funds (if any) that are burnt per spend period. - Burn get(burn) config(): Permill; + /// Percentage of spare funds (if any) that are burnt per spend period. + Burn get(burn) config(): Permill; - // State... + // State... - /// Total funds available to this module for spending. - Pot get(pot): BalanceOf; + /// Total funds available to this module for spending. + Pot get(pot): BalanceOf; - /// Number of proposals that have been made. - ProposalCount get(proposal_count): ProposalIndex; + /// Number of proposals that have been made. + ProposalCount get(proposal_count): ProposalIndex; - /// Proposals that have been made. - Proposals get(proposals): map ProposalIndex => Option>>; + /// Proposals that have been made. + Proposals get(proposals): map ProposalIndex => Option>>; - /// Proposal indices that have been approved but not yet awarded. - Approvals get(approvals): Vec; - } + /// Proposal indices that have been approved but not yet awarded. + Approvals get(approvals): Vec; + } } decl_event!( @@ -189,292 +194,320 @@ decl_event!( ); impl Module { - // Add public immutables and private mutables. - - /// The needed bond for a proposal whose spend is `value`. - fn calculate_bond(value: BalanceOf) -> BalanceOf { - Self::proposal_bond_minimum().max(Self::proposal_bond() * value) - } - - // Spend some money! - fn spend_funds() { - let mut budget_remaining = Self::pot(); - Self::deposit_event(RawEvent::Spending(budget_remaining)); - - let mut missed_any = false; - let mut imbalance = >::zero(); - >::mutate(|v| { - v.retain(|&index| { - // Should always be true, but shouldn't panic if false or we're screwed. - if let Some(p) = Self::proposals(index) { - if p.value <= budget_remaining { - budget_remaining -= p.value; - >::remove(index); - - // return their deposit. - let _ = T::Currency::unreserve(&p.proposer, p.bond); - - // provide the allocation. - imbalance.subsume(T::Currency::deposit_creating(&p.beneficiary, p.value)); - - Self::deposit_event(RawEvent::Awarded(index, p.value, p.beneficiary)); - false - } else { - missed_any = true; - true - } - } else { - false - } - }); - }); - - T::MintedForSpending::on_unbalanced(imbalance); - - if !missed_any { - // burn some proportion of the remaining budget if we run a surplus. - let burn = (Self::burn() * budget_remaining).min(budget_remaining); - budget_remaining -= burn; - Self::deposit_event(RawEvent::Burnt(burn)) - } - - Self::deposit_event(RawEvent::Rollover(budget_remaining)); - - >::put(budget_remaining); - } + // Add public immutables and private mutables. + + /// The needed bond for a proposal whose spend is `value`. + fn calculate_bond(value: BalanceOf) -> BalanceOf { + Self::proposal_bond_minimum().max(Self::proposal_bond() * value) + } + + // Spend some money! + fn spend_funds() { + let mut budget_remaining = Self::pot(); + Self::deposit_event(RawEvent::Spending(budget_remaining)); + + let mut missed_any = false; + let mut imbalance = >::zero(); + >::mutate(|v| { + v.retain(|&index| { + // Should always be true, but shouldn't panic if false or we're screwed. + if let Some(p) = Self::proposals(index) { + if p.value <= budget_remaining { + budget_remaining -= p.value; + >::remove(index); + + // return their deposit. + let _ = T::Currency::unreserve(&p.proposer, p.bond); + + // provide the allocation. + imbalance.subsume(T::Currency::deposit_creating(&p.beneficiary, p.value)); + + Self::deposit_event(RawEvent::Awarded(index, p.value, p.beneficiary)); + false + } else { + missed_any = true; + true + } + } else { + false + } + }); + }); + + T::MintedForSpending::on_unbalanced(imbalance); + + if !missed_any { + // burn some proportion of the remaining budget if we run a surplus. + let burn = (Self::burn() * budget_remaining).min(budget_remaining); + budget_remaining -= burn; + Self::deposit_event(RawEvent::Burnt(burn)) + } + + Self::deposit_event(RawEvent::Rollover(budget_remaining)); + + >::put(budget_remaining); + } } impl OnDilution> for Module { - fn on_dilution(minted: BalanceOf, portion: BalanceOf) { - // Mint extra funds for the treasury to keep the ratio of portion to total_issuance equal - // pre dilution and post-dilution. - if !minted.is_zero() && !portion.is_zero() { - let total_issuance = T::Currency::total_issuance(); - let funding = (total_issuance - portion) / portion * minted; - >::mutate(|x| *x += funding); - } - } + fn on_dilution(minted: BalanceOf, portion: BalanceOf) { + // Mint extra funds for the treasury to keep the ratio of portion to total_issuance equal + // pre dilution and post-dilution. + if !minted.is_zero() && !portion.is_zero() { + let total_issuance = T::Currency::total_issuance(); + let funding = (total_issuance - portion) / portion * minted; + >::mutate(|x| *x += funding); + } + } } #[cfg(test)] mod tests { - use super::*; - - use runtime_io::with_externalities; - use srml_support::{impl_outer_origin, assert_ok, assert_noop}; - use substrate_primitives::{H256, Blake2Hasher}; - use runtime_primitives::BuildStorage; - use runtime_primitives::traits::{BlakeTwo256, OnFinalize, IdentityLookup}; - use runtime_primitives::testing::{Digest, DigestItem, Header}; - - impl_outer_origin! { - pub enum Origin for Test {} - } - - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - impl system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type Digest = Digest; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type Log = DigestItem; - } - impl balances::Trait for Test { - type Balance = u64; - type OnNewAccount = (); - type OnFreeBalanceZero = (); - type Event = (); - type TransactionPayment = (); - type TransferPayment = (); - type DustRemoval = (); - } - impl Trait for Test { - type Currency = balances::Module; - type ApproveOrigin = system::EnsureRoot; - type RejectOrigin = system::EnsureRoot; - type Event = (); - type MintedForSpending = (); - type ProposalRejection = (); - } - type Balances = balances::Module; - type Treasury = Module; - - fn new_test_ext() -> runtime_io::TestExternalities { - let mut t = system::GenesisConfig::::default().build_storage().unwrap().0; - t.extend(balances::GenesisConfig::{ - balances: vec![(0, 100), (1, 99), (2, 1)], - transaction_base_fee: 0, - transaction_byte_fee: 0, - transfer_fee: 0, - creation_fee: 0, - existential_deposit: 0, - vesting: vec![], - }.build_storage().unwrap().0); - t.extend(GenesisConfig::{ - proposal_bond: Permill::from_percent(5), - proposal_bond_minimum: 1, - spend_period: 2, - burn: Permill::from_percent(50), - }.build_storage().unwrap().0); - t.into() - } - - #[test] - fn genesis_config_works() { - with_externalities(&mut new_test_ext(), || { - assert_eq!(Treasury::proposal_bond(), Permill::from_percent(5)); - assert_eq!(Treasury::proposal_bond_minimum(), 1); - assert_eq!(Treasury::spend_period(), 2); - assert_eq!(Treasury::burn(), Permill::from_percent(50)); - assert_eq!(Treasury::pot(), 0); - assert_eq!(Treasury::proposal_count(), 0); - }); - } - - #[test] - fn minting_works() { - with_externalities(&mut new_test_ext(), || { - // Check that accumulate works when we have Some value in Dummy already. - Treasury::on_dilution(100, 100); - assert_eq!(Treasury::pot(), 100); - }); - } - - #[test] - fn spend_proposal_takes_min_deposit() { - with_externalities(&mut new_test_ext(), || { - assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); - assert_eq!(Balances::free_balance(&0), 99); - assert_eq!(Balances::reserved_balance(&0), 1); - }); - } - - #[test] - fn spend_proposal_takes_proportional_deposit() { - with_externalities(&mut new_test_ext(), || { - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_eq!(Balances::free_balance(&0), 95); - assert_eq!(Balances::reserved_balance(&0), 5); - }); - } - - #[test] - fn spend_proposal_fails_when_proposer_poor() { - with_externalities(&mut new_test_ext(), || { - assert_noop!(Treasury::propose_spend(Origin::signed(2), 100, 3), "Proposer's balance too low"); - }); - } - - #[test] - fn accepted_spend_proposal_ignored_outside_spend_period() { - with_externalities(&mut new_test_ext(), || { - Treasury::on_dilution(100, 100); - - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); - - >::on_finalize(1); - assert_eq!(Balances::free_balance(&3), 0); - assert_eq!(Treasury::pot(), 100); - }); - } - - #[test] - fn unused_pot_should_diminish() { - with_externalities(&mut new_test_ext(), || { - Treasury::on_dilution(100, 100); - - >::on_finalize(2); - assert_eq!(Treasury::pot(), 50); - }); - } - - #[test] - fn rejected_spend_proposal_ignored_on_spend_period() { - with_externalities(&mut new_test_ext(), || { - Treasury::on_dilution(100, 100); - - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(Origin::ROOT, 0)); - - >::on_finalize(2); - assert_eq!(Balances::free_balance(&3), 0); - assert_eq!(Treasury::pot(), 50); - }); - } - - #[test] - fn reject_already_rejected_spend_proposal_fails() { - with_externalities(&mut new_test_ext(), || { - Treasury::on_dilution(100, 100); - - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(Origin::ROOT, 0)); - assert_noop!(Treasury::reject_proposal(Origin::ROOT, 0), "No proposal at that index"); - }); - } - - #[test] - fn reject_non_existant_spend_proposal_fails() { - with_externalities(&mut new_test_ext(), || { - assert_noop!(Treasury::reject_proposal(Origin::ROOT, 0), "No proposal at that index"); - }); - } - - #[test] - fn accept_non_existant_spend_proposal_fails() { - with_externalities(&mut new_test_ext(), || { - assert_noop!(Treasury::approve_proposal(Origin::ROOT, 0), "No proposal at that index"); - }); - } - - #[test] - fn accept_already_rejected_spend_proposal_fails() { - with_externalities(&mut new_test_ext(), || { - Treasury::on_dilution(100, 100); - - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(Origin::ROOT, 0)); - assert_noop!(Treasury::approve_proposal(Origin::ROOT, 0), "No proposal at that index"); - }); - } - - #[test] - fn accepted_spend_proposal_enacted_on_spend_period() { - with_externalities(&mut new_test_ext(), || { - Treasury::on_dilution(100, 100); - - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); - - >::on_finalize(2); - assert_eq!(Balances::free_balance(&3), 100); - assert_eq!(Treasury::pot(), 0); - }); - } - - #[test] - fn pot_underflow_should_not_diminish() { - with_externalities(&mut new_test_ext(), || { - Treasury::on_dilution(100, 100); - - assert_ok!(Treasury::propose_spend(Origin::signed(0), 150, 3)); - assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); - - >::on_finalize(2); - assert_eq!(Treasury::pot(), 100); - - Treasury::on_dilution(100, 100); - >::on_finalize(4); - assert_eq!(Balances::free_balance(&3), 150); - assert_eq!(Treasury::pot(), 25); - }); - } + use super::*; + + use runtime_io::with_externalities; + use runtime_primitives::testing::{Digest, DigestItem, Header}; + use runtime_primitives::traits::{BlakeTwo256, IdentityLookup, OnFinalize}; + use runtime_primitives::BuildStorage; + use srml_support::{assert_noop, assert_ok, impl_outer_origin}; + use substrate_primitives::{Blake2Hasher, H256}; + + impl_outer_origin! { + pub enum Origin for Test {} + } + + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + impl system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type Digest = Digest; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type Log = DigestItem; + } + impl balances::Trait for Test { + type Balance = u64; + type OnNewAccount = (); + type OnFreeBalanceZero = (); + type Event = (); + type TransactionPayment = (); + type TransferPayment = (); + type DustRemoval = (); + } + impl Trait for Test { + type Currency = balances::Module; + type ApproveOrigin = system::EnsureRoot; + type RejectOrigin = system::EnsureRoot; + type Event = (); + type MintedForSpending = (); + type ProposalRejection = (); + } + type Balances = balances::Module; + type Treasury = Module; + + fn new_test_ext() -> runtime_io::TestExternalities { + let mut t = system::GenesisConfig::::default() + .build_storage() + .unwrap() + .0; + t.extend( + balances::GenesisConfig:: { + balances: vec![(0, 100), (1, 99), (2, 1)], + transaction_base_fee: 0, + transaction_byte_fee: 0, + transfer_fee: 0, + creation_fee: 0, + existential_deposit: 0, + vesting: vec![], + } + .build_storage() + .unwrap() + .0, + ); + t.extend( + GenesisConfig:: { + proposal_bond: Permill::from_percent(5), + proposal_bond_minimum: 1, + spend_period: 2, + burn: Permill::from_percent(50), + } + .build_storage() + .unwrap() + .0, + ); + t.into() + } + + #[test] + fn genesis_config_works() { + with_externalities(&mut new_test_ext(), || { + assert_eq!(Treasury::proposal_bond(), Permill::from_percent(5)); + assert_eq!(Treasury::proposal_bond_minimum(), 1); + assert_eq!(Treasury::spend_period(), 2); + assert_eq!(Treasury::burn(), Permill::from_percent(50)); + assert_eq!(Treasury::pot(), 0); + assert_eq!(Treasury::proposal_count(), 0); + }); + } + + #[test] + fn minting_works() { + with_externalities(&mut new_test_ext(), || { + // Check that accumulate works when we have Some value in Dummy already. + Treasury::on_dilution(100, 100); + assert_eq!(Treasury::pot(), 100); + }); + } + + #[test] + fn spend_proposal_takes_min_deposit() { + with_externalities(&mut new_test_ext(), || { + assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); + assert_eq!(Balances::free_balance(&0), 99); + assert_eq!(Balances::reserved_balance(&0), 1); + }); + } + + #[test] + fn spend_proposal_takes_proportional_deposit() { + with_externalities(&mut new_test_ext(), || { + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_eq!(Balances::free_balance(&0), 95); + assert_eq!(Balances::reserved_balance(&0), 5); + }); + } + + #[test] + fn spend_proposal_fails_when_proposer_poor() { + with_externalities(&mut new_test_ext(), || { + assert_noop!( + Treasury::propose_spend(Origin::signed(2), 100, 3), + "Proposer's balance too low" + ); + }); + } + + #[test] + fn accepted_spend_proposal_ignored_outside_spend_period() { + with_externalities(&mut new_test_ext(), || { + Treasury::on_dilution(100, 100); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); + + >::on_finalize(1); + assert_eq!(Balances::free_balance(&3), 0); + assert_eq!(Treasury::pot(), 100); + }); + } + + #[test] + fn unused_pot_should_diminish() { + with_externalities(&mut new_test_ext(), || { + Treasury::on_dilution(100, 100); + + >::on_finalize(2); + assert_eq!(Treasury::pot(), 50); + }); + } + + #[test] + fn rejected_spend_proposal_ignored_on_spend_period() { + with_externalities(&mut new_test_ext(), || { + Treasury::on_dilution(100, 100); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::ROOT, 0)); + + >::on_finalize(2); + assert_eq!(Balances::free_balance(&3), 0); + assert_eq!(Treasury::pot(), 50); + }); + } + + #[test] + fn reject_already_rejected_spend_proposal_fails() { + with_externalities(&mut new_test_ext(), || { + Treasury::on_dilution(100, 100); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::ROOT, 0)); + assert_noop!( + Treasury::reject_proposal(Origin::ROOT, 0), + "No proposal at that index" + ); + }); + } + + #[test] + fn reject_non_existant_spend_proposal_fails() { + with_externalities(&mut new_test_ext(), || { + assert_noop!( + Treasury::reject_proposal(Origin::ROOT, 0), + "No proposal at that index" + ); + }); + } + + #[test] + fn accept_non_existant_spend_proposal_fails() { + with_externalities(&mut new_test_ext(), || { + assert_noop!( + Treasury::approve_proposal(Origin::ROOT, 0), + "No proposal at that index" + ); + }); + } + + #[test] + fn accept_already_rejected_spend_proposal_fails() { + with_externalities(&mut new_test_ext(), || { + Treasury::on_dilution(100, 100); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::ROOT, 0)); + assert_noop!( + Treasury::approve_proposal(Origin::ROOT, 0), + "No proposal at that index" + ); + }); + } + + #[test] + fn accepted_spend_proposal_enacted_on_spend_period() { + with_externalities(&mut new_test_ext(), || { + Treasury::on_dilution(100, 100); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); + + >::on_finalize(2); + assert_eq!(Balances::free_balance(&3), 100); + assert_eq!(Treasury::pot(), 0); + }); + } + + #[test] + fn pot_underflow_should_not_diminish() { + with_externalities(&mut new_test_ext(), || { + Treasury::on_dilution(100, 100); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 150, 3)); + assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); + + >::on_finalize(2); + assert_eq!(Treasury::pot(), 100); + + Treasury::on_dilution(100, 100); + >::on_finalize(4); + assert_eq!(Balances::free_balance(&3), 150); + assert_eq!(Treasury::pot(), 25); + }); + } } diff --git a/subkey/src/main.rs b/subkey/src/main.rs index 5caf58d45b..97b9bb390e 100644 --- a/subkey/src/main.rs +++ b/subkey/src/main.rs @@ -18,203 +18,245 @@ #[cfg(feature = "bench")] extern crate test; -extern crate substrate_bip39; extern crate rustc_hex; +extern crate substrate_bip39; -use std::io::{stdin, Read}; +use bip39::{Language, Mnemonic, MnemonicType}; use clap::load_yaml; -use rand::{RngCore, rngs::OsRng}; -use substrate_bip39::mini_secret_from_entropy; -use bip39::{Mnemonic, Language, MnemonicType}; -use substrate_primitives::{ed25519, sr25519, hexdisplay::HexDisplay, Pair, crypto::Ss58Codec}; +use rand::{rngs::OsRng, RngCore}; use schnorrkel::keys::MiniSecretKey; +use std::io::{stdin, Read}; +use substrate_bip39::mini_secret_from_entropy; +use substrate_primitives::{crypto::Ss58Codec, ed25519, hexdisplay::HexDisplay, sr25519, Pair}; mod vanity; trait Crypto { - type Seed: AsRef<[u8]> + AsMut<[u8]> + Sized + Default; - type Pair: Pair; - fn generate_phrase() -> String { - Mnemonic::new(MnemonicType::Words12, Language::English).phrase().to_owned() - } - fn generate_seed() -> Self::Seed { - let mut seed: Self::Seed = Default::default(); - OsRng::new().unwrap().fill_bytes(seed.as_mut()); - seed - } - fn seed_from_phrase(phrase: &str, password: Option<&str>) -> Self::Seed; - fn pair_from_seed(seed: &Self::Seed) -> Self::Pair; - fn pair_from_suri(phrase: &str, password: Option<&str>) -> Self::Pair { - Self::pair_from_seed(&Self::seed_from_phrase(phrase, password)) - } - fn ss58_from_pair(pair: &Self::Pair) -> String; - fn public_from_pair(pair: &Self::Pair) -> Vec; - fn seed_from_pair(_pair: &Self::Pair) -> Option<&Self::Seed> { None } - fn print_from_seed(seed: &Self::Seed) { - let pair = Self::pair_from_seed(seed); - println!("Seed 0x{} is account:\n Public key (hex): 0x{}\n Address (SS58): {}", - HexDisplay::from(&seed.as_ref()), - HexDisplay::from(&Self::public_from_pair(&pair)), - Self::ss58_from_pair(&pair) - ); - } - fn print_from_phrase(phrase: &str, password: Option<&str>) { - let seed = Self::seed_from_phrase(phrase, password); - let pair = Self::pair_from_seed(&seed); - println!("Phrase `{}` is account:\n Seed: 0x{}\n Public key (hex): 0x{}\n Address (SS58): {}", - phrase, - HexDisplay::from(&seed.as_ref()), - HexDisplay::from(&Self::public_from_pair(&pair)), - Self::ss58_from_pair(&pair) - ); - } - fn print_from_uri(uri: &str, password: Option<&str>) where ::Public: Sized + Ss58Codec + AsRef<[u8]> { - if let Ok(pair) = Self::Pair::from_string(uri, password) { - let seed_text = Self::seed_from_pair(&pair) - .map_or_else(Default::default, |s| format!("\n Seed: 0x{}", HexDisplay::from(&s.as_ref()))); - println!("Secret Key URI `{}` is account:{}\n Public key (hex): 0x{}\n Address (SS58): {}", - uri, - seed_text, - HexDisplay::from(&Self::public_from_pair(&pair)), - Self::ss58_from_pair(&pair) - ); - } - if let Ok(public) = ::Public::from_string(uri) { - println!("Public Key URI `{}` is account:\n Public key (hex): 0x{}\n Address (SS58): {}", - uri, - HexDisplay::from(&public.as_ref()), - public.to_ss58check() - ); - } - } + type Seed: AsRef<[u8]> + AsMut<[u8]> + Sized + Default; + type Pair: Pair; + fn generate_phrase() -> String { + Mnemonic::new(MnemonicType::Words12, Language::English) + .phrase() + .to_owned() + } + fn generate_seed() -> Self::Seed { + let mut seed: Self::Seed = Default::default(); + OsRng::new().unwrap().fill_bytes(seed.as_mut()); + seed + } + fn seed_from_phrase(phrase: &str, password: Option<&str>) -> Self::Seed; + fn pair_from_seed(seed: &Self::Seed) -> Self::Pair; + fn pair_from_suri(phrase: &str, password: Option<&str>) -> Self::Pair { + Self::pair_from_seed(&Self::seed_from_phrase(phrase, password)) + } + fn ss58_from_pair(pair: &Self::Pair) -> String; + fn public_from_pair(pair: &Self::Pair) -> Vec; + fn seed_from_pair(_pair: &Self::Pair) -> Option<&Self::Seed> { + None + } + fn print_from_seed(seed: &Self::Seed) { + let pair = Self::pair_from_seed(seed); + println!( + "Seed 0x{} is account:\n Public key (hex): 0x{}\n Address (SS58): {}", + HexDisplay::from(&seed.as_ref()), + HexDisplay::from(&Self::public_from_pair(&pair)), + Self::ss58_from_pair(&pair) + ); + } + fn print_from_phrase(phrase: &str, password: Option<&str>) { + let seed = Self::seed_from_phrase(phrase, password); + let pair = Self::pair_from_seed(&seed); + println!( + "Phrase `{}` is account:\n Seed: 0x{}\n Public key (hex): 0x{}\n Address (SS58): {}", + phrase, + HexDisplay::from(&seed.as_ref()), + HexDisplay::from(&Self::public_from_pair(&pair)), + Self::ss58_from_pair(&pair) + ); + } + fn print_from_uri(uri: &str, password: Option<&str>) + where + ::Public: Sized + Ss58Codec + AsRef<[u8]>, + { + if let Ok(pair) = Self::Pair::from_string(uri, password) { + let seed_text = Self::seed_from_pair(&pair).map_or_else(Default::default, |s| { + format!("\n Seed: 0x{}", HexDisplay::from(&s.as_ref())) + }); + println!( + "Secret Key URI `{}` is account:{}\n Public key (hex): 0x{}\n Address (SS58): {}", + uri, + seed_text, + HexDisplay::from(&Self::public_from_pair(&pair)), + Self::ss58_from_pair(&pair) + ); + } + if let Ok(public) = ::Public::from_string(uri) { + println!( + "Public Key URI `{}` is account:\n Public key (hex): 0x{}\n Address (SS58): {}", + uri, + HexDisplay::from(&public.as_ref()), + public.to_ss58check() + ); + } + } } struct Ed25519; impl Crypto for Ed25519 { - type Seed = [u8; 32]; - type Pair = ed25519::Pair; - - fn seed_from_phrase(phrase: &str, password: Option<&str>) -> Self::Seed { - Sr25519::seed_from_phrase(phrase, password) - } - fn pair_from_suri(suri: &str, password_override: Option<&str>) -> Self::Pair { - ed25519::Pair::from_legacy_string(suri, password_override) - } - fn pair_from_seed(seed: &Self::Seed) -> Self::Pair { ed25519::Pair::from_seed(seed.clone()) } - fn ss58_from_pair(pair: &Self::Pair) -> String { pair.public().to_ss58check() } - fn public_from_pair(pair: &Self::Pair) -> Vec { (&pair.public().0[..]).to_owned() } - fn seed_from_pair(pair: &Self::Pair) -> Option<&Self::Seed> { Some(pair.seed()) } + type Seed = [u8; 32]; + type Pair = ed25519::Pair; + + fn seed_from_phrase(phrase: &str, password: Option<&str>) -> Self::Seed { + Sr25519::seed_from_phrase(phrase, password) + } + fn pair_from_suri(suri: &str, password_override: Option<&str>) -> Self::Pair { + ed25519::Pair::from_legacy_string(suri, password_override) + } + fn pair_from_seed(seed: &Self::Seed) -> Self::Pair { + ed25519::Pair::from_seed(seed.clone()) + } + fn ss58_from_pair(pair: &Self::Pair) -> String { + pair.public().to_ss58check() + } + fn public_from_pair(pair: &Self::Pair) -> Vec { + (&pair.public().0[..]).to_owned() + } + fn seed_from_pair(pair: &Self::Pair) -> Option<&Self::Seed> { + Some(pair.seed()) + } } struct Sr25519; impl Crypto for Sr25519 { - type Seed = [u8; 32]; - type Pair = sr25519::Pair; - - fn seed_from_phrase(phrase: &str, password: Option<&str>) -> Self::Seed { - mini_secret_from_entropy( - Mnemonic::from_phrase(phrase, Language::English) - .unwrap_or_else(|_| - panic!("Phrase is not a valid BIP-39 phrase: \n {}", phrase) - ) - .entropy(), - password.unwrap_or("") - ) - .expect("32 bytes can always build a key; qed") - .to_bytes() - } - - fn pair_from_suri(suri: &str, password: Option<&str>) -> Self::Pair { - sr25519::Pair::from_string(suri, password).expect("Invalid phrase") - } - - fn pair_from_seed(seed: &Self::Seed) -> Self::Pair { - MiniSecretKey::from_bytes(seed) - .expect("32 bytes can always build a key; qed") - .into() - } - fn ss58_from_pair(pair: &Self::Pair) -> String { pair.public().to_ss58check() } - fn public_from_pair(pair: &Self::Pair) -> Vec { (&pair.public().0[..]).to_owned() } + type Seed = [u8; 32]; + type Pair = sr25519::Pair; + + fn seed_from_phrase(phrase: &str, password: Option<&str>) -> Self::Seed { + mini_secret_from_entropy( + Mnemonic::from_phrase(phrase, Language::English) + .unwrap_or_else(|_| panic!("Phrase is not a valid BIP-39 phrase: \n {}", phrase)) + .entropy(), + password.unwrap_or(""), + ) + .expect("32 bytes can always build a key; qed") + .to_bytes() + } + + fn pair_from_suri(suri: &str, password: Option<&str>) -> Self::Pair { + sr25519::Pair::from_string(suri, password).expect("Invalid phrase") + } + + fn pair_from_seed(seed: &Self::Seed) -> Self::Pair { + MiniSecretKey::from_bytes(seed) + .expect("32 bytes can always build a key; qed") + .into() + } + fn ss58_from_pair(pair: &Self::Pair) -> String { + pair.public().to_ss58check() + } + fn public_from_pair(pair: &Self::Pair) -> Vec { + (&pair.public().0[..]).to_owned() + } } -fn execute>(matches: clap::ArgMatches) where - <::Pair as Pair>::Signature: AsRef<[u8]> + AsMut<[u8]> + Default, - <::Pair as Pair>::Public: Sized + AsRef<[u8]> + Ss58Codec + AsRef<<::Pair as Pair>::Public>, +fn execute>(matches: clap::ArgMatches) +where + <::Pair as Pair>::Signature: AsRef<[u8]> + AsMut<[u8]> + Default, + <::Pair as Pair>::Public: + Sized + AsRef<[u8]> + Ss58Codec + AsRef<<::Pair as Pair>::Public>, { - let password = matches.value_of("password"); - match matches.subcommand() { - ("generate", Some(_matches)) => { - // create a new randomly generated mnemonic phrase - let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); - C::print_from_phrase(mnemonic.phrase(), password); - }, - ("vanity", Some(matches)) => { - let desired: String = matches.value_of("pattern").map(str::to_string).unwrap_or_default(); - let key = vanity::generate_key::(&desired).expect("Key generation failed"); - C::print_from_seed(&key.seed); - } - ("inspect", Some(matches)) => { - // TODO: Accept public key with derivation path. - let uri = matches.value_of("uri") - .expect("URI parameter is required; thus it can't be None; qed"); - C::print_from_uri(uri, password); - }, - ("sign", Some(matches)) => { - let suri = matches.value_of("suri") - .expect("secret URI parameter is required; thus it can't be None; qed"); - let pair = C::pair_from_suri(suri, password); - let mut message = vec![]; - stdin().lock().read_to_end(&mut message).expect("Error reading from stdin"); - if matches.is_present("hex") { - message = hex::decode(&message).expect("Invalid hex in message"); - } - let sig = pair.sign(&message); - println!("{}", hex::encode(&sig)); - } - ("verify", Some(matches)) => { - let sig_data = matches.value_of("sig") - .expect("signature parameter is required; thus it can't be None; qed"); - let mut sig = <::Pair as Pair>::Signature::default(); - let sig_data = hex::decode(sig_data).expect("signature is invalid hex"); - if sig_data.len() != sig.as_ref().len() { - panic!("signature is an invalid length. {} bytes is not the expected value of {} bytes", sig_data.len(), sig.as_ref().len()); - } - sig.as_mut().copy_from_slice(&sig_data); - let uri = matches.value_of("uri") - .expect("public uri parameter is required; thus it can't be None; qed"); - let pubkey = <::Pair as Pair>::Public::from_string(uri).ok().or_else(|| - ::Pair::from_string(uri, password).ok().map(|p| p.public()) - ).expect("Invalid URI; expecting either a secret URI or a public URI."); - let mut message = vec![]; - stdin().lock().read_to_end(&mut message).expect("Error reading from stdin"); - if matches.is_present("hex") { - message = hex::decode(&message).expect("Invalid hex in message"); - } - if <::Pair as Pair>::verify(&sig, &message, &pubkey) { - println!("Signature verifies correctly.") - } else { - println!("Signature invalid.") - } - } - _ => print_usage(&matches), - } + let password = matches.value_of("password"); + match matches.subcommand() { + ("generate", Some(_matches)) => { + // create a new randomly generated mnemonic phrase + let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); + C::print_from_phrase(mnemonic.phrase(), password); + } + ("vanity", Some(matches)) => { + let desired: String = matches + .value_of("pattern") + .map(str::to_string) + .unwrap_or_default(); + let key = vanity::generate_key::(&desired).expect("Key generation failed"); + C::print_from_seed(&key.seed); + } + ("inspect", Some(matches)) => { + // TODO: Accept public key with derivation path. + let uri = matches + .value_of("uri") + .expect("URI parameter is required; thus it can't be None; qed"); + C::print_from_uri(uri, password); + } + ("sign", Some(matches)) => { + let suri = matches + .value_of("suri") + .expect("secret URI parameter is required; thus it can't be None; qed"); + let pair = C::pair_from_suri(suri, password); + let mut message = vec![]; + stdin() + .lock() + .read_to_end(&mut message) + .expect("Error reading from stdin"); + if matches.is_present("hex") { + message = hex::decode(&message).expect("Invalid hex in message"); + } + let sig = pair.sign(&message); + println!("{}", hex::encode(&sig)); + } + ("verify", Some(matches)) => { + let sig_data = matches + .value_of("sig") + .expect("signature parameter is required; thus it can't be None; qed"); + let mut sig = <::Pair as Pair>::Signature::default(); + let sig_data = hex::decode(sig_data).expect("signature is invalid hex"); + if sig_data.len() != sig.as_ref().len() { + panic!("signature is an invalid length. {} bytes is not the expected value of {} bytes", sig_data.len(), sig.as_ref().len()); + } + sig.as_mut().copy_from_slice(&sig_data); + let uri = matches + .value_of("uri") + .expect("public uri parameter is required; thus it can't be None; qed"); + let pubkey = <::Pair as Pair>::Public::from_string(uri) + .ok() + .or_else(|| { + ::Pair::from_string(uri, password) + .ok() + .map(|p| p.public()) + }) + .expect("Invalid URI; expecting either a secret URI or a public URI."); + let mut message = vec![]; + stdin() + .lock() + .read_to_end(&mut message) + .expect("Error reading from stdin"); + if matches.is_present("hex") { + message = hex::decode(&message).expect("Invalid hex in message"); + } + if <::Pair as Pair>::verify(&sig, &message, &pubkey) { + println!("Signature verifies correctly.") + } else { + println!("Signature invalid.") + } + } + _ => print_usage(&matches), + } } fn main() { - let yaml = load_yaml!("cli.yml"); - let matches = clap::App::from_yaml(yaml) - .version(env!("CARGO_PKG_VERSION")) - .get_matches(); - - if matches.is_present("ed25519") { - execute::(matches) - } else { - execute::(matches) - } + let yaml = load_yaml!("cli.yml"); + let matches = clap::App::from_yaml(yaml) + .version(env!("CARGO_PKG_VERSION")) + .get_matches(); + + if matches.is_present("ed25519") { + execute::(matches) + } else { + execute::(matches) + } } fn print_usage(matches: &clap::ArgMatches) { - println!("{}", matches.usage()); + println!("{}", matches.usage()); } diff --git a/subkey/src/vanity.rs b/subkey/src/vanity.rs index 785eb95aa5..e0acf0c04f 100644 --- a/subkey/src/vanity.rs +++ b/subkey/src/vanity.rs @@ -14,142 +14,163 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use rand::{rngs::OsRng, RngCore}; use super::Crypto; +use rand::{rngs::OsRng, RngCore}; fn good_waypoint(done: u64) -> u64 { - match done { - 0 ... 1_000_000 => 100_000, - 0 ... 10_000_000 => 1_000_000, - 0 ... 100_000_000 => 10_000_000, - _ => 100_000_000, - } + match done { + 0...1_000_000 => 100_000, + 0...10_000_000 => 1_000_000, + 0...100_000_000 => 10_000_000, + _ => 100_000_000, + } } fn next_seed(mut seed: [u8; 32]) -> [u8; 32] { - for i in 0..32 { - match seed[i] { - 255 => { seed[i] = 0; } - _ => { seed[i] += 1; break; } - } - } - return seed; + for i in 0..32 { + match seed[i] { + 255 => { + seed[i] = 0; + } + _ => { + seed[i] += 1; + break; + } + } + } + return seed; } /// A structure used to carry both Pair and seed. /// This should usually NOT been used. If unsure, use Pair. pub(super) struct KeyPair { - pub pair: C::Pair, - pub seed: C::Seed, - pub score: usize, + pub pair: C::Pair, + pub seed: C::Seed, + pub score: usize, } /// Calculate the score of a key based on the desired /// input. fn calculate_score(_desired: &str, key: &str) -> usize { - for truncate in 0.._desired.len() { - let snip_size = _desired.len() - truncate; - let truncated = &_desired[0..snip_size]; - if let Some(pos) = key.find(truncated) { - return (47 - pos) + (snip_size * 48); - } - } - 0 + for truncate in 0.._desired.len() { + let snip_size = _desired.len() - truncate; + let truncated = &_desired[0..snip_size]; + if let Some(pos) = key.find(truncated) { + return (47 - pos) + (snip_size * 48); + } + } + 0 } -pub(super) fn generate_key>(desired: &str) -> Result, &str> { - if desired.is_empty() { - return Err("Pattern must not be empty"); - } - - println!("Generating key containing pattern '{}'", desired); - - let top = 45 + (desired.len() * 48); - let mut best = 0; - let mut seed = [0u8; 32]; - let mut done = 0; - - OsRng::new().unwrap().fill_bytes(&mut seed[..]); - - loop { - // reset to a new random seed at beginning and regularly thereafter - if done % 100000 == 0 { - OsRng::new().unwrap().fill_bytes(&mut seed[..]); - } - - let p = C::pair_from_seed(&seed); - let ss58 = C::ss58_from_pair(&p); - let score = calculate_score(&desired, &ss58); - if score > best || desired.len() < 2 { - best = score; - let keypair = KeyPair { - pair: p, - seed: seed.clone(), - score: score, - }; - if best >= top { - println!("best: {} == top: {}", best, top); - return Ok(keypair); - } - } - seed = next_seed(seed); - done += 1; - - if done % good_waypoint(done) == 0 { - println!("{} keys searched; best is {}/{} complete", done, best, top); - } - } +pub(super) fn generate_key>(desired: &str) -> Result, &str> { + if desired.is_empty() { + return Err("Pattern must not be empty"); + } + + println!("Generating key containing pattern '{}'", desired); + + let top = 45 + (desired.len() * 48); + let mut best = 0; + let mut seed = [0u8; 32]; + let mut done = 0; + + OsRng::new().unwrap().fill_bytes(&mut seed[..]); + + loop { + // reset to a new random seed at beginning and regularly thereafter + if done % 100000 == 0 { + OsRng::new().unwrap().fill_bytes(&mut seed[..]); + } + + let p = C::pair_from_seed(&seed); + let ss58 = C::ss58_from_pair(&p); + let score = calculate_score(&desired, &ss58); + if score > best || desired.len() < 2 { + best = score; + let keypair = KeyPair { + pair: p, + seed: seed.clone(), + score: score, + }; + if best >= top { + println!("best: {} == top: {}", best, top); + return Ok(keypair); + } + } + seed = next_seed(seed); + done += 1; + + if done % good_waypoint(done) == 0 { + println!("{} keys searched; best is {}/{} complete", done, best, top); + } + } } #[cfg(test)] mod tests { - use super::*; - use super::super::Ed25519; - use substrate_primitives::Pair; - #[cfg(feature = "bench")] - use test::Bencher; - - #[test] - fn test_generation_with_single_char() { - assert!(generate_key::("j").unwrap().pair.public().to_ss58check().contains("j")); - } - - #[test] - fn test_score_1_char_100() { - let score = calculate_score("j", "5jolkadotwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim"); - assert_eq!(score, 94); - } - - #[test] - fn test_score_100() { - let score = calculate_score("Polkadot", "5PolkadotwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim"); - assert_eq!(score, 430); - } - - #[test] - fn test_score_50_2() { - // 50% for the position + 50% for the size - assert_eq!(calculate_score("Polkadot", "5PolkXXXXwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim"), 238); - } - - #[test] - fn test_score_0() { - assert_eq!(calculate_score("Polkadot", "5GUWv4bLCchGUHJrzULXnh4JgXsMpTKRnjuXTY7Qo1Kh9uYK"), 0); - } - - #[cfg(feature = "bench")] - #[bench] - fn bench_paranoiac(b: &mut Bencher) { - b.iter(|| { - generate_key("polk") - }); - } - - #[cfg(feature = "bench")] - #[bench] - fn bench_not_paranoiac(b: &mut Bencher) { - b.iter(|| { - generate_key("polk") - }); - } + use super::super::Ed25519; + use super::*; + use substrate_primitives::Pair; + #[cfg(feature = "bench")] + use test::Bencher; + + #[test] + fn test_generation_with_single_char() { + assert!(generate_key::("j") + .unwrap() + .pair + .public() + .to_ss58check() + .contains("j")); + } + + #[test] + fn test_score_1_char_100() { + let score = calculate_score("j", "5jolkadotwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim"); + assert_eq!(score, 94); + } + + #[test] + fn test_score_100() { + let score = calculate_score( + "Polkadot", + "5PolkadotwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim", + ); + assert_eq!(score, 430); + } + + #[test] + fn test_score_50_2() { + // 50% for the position + 50% for the size + assert_eq!( + calculate_score( + "Polkadot", + "5PolkXXXXwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim" + ), + 238 + ); + } + + #[test] + fn test_score_0() { + assert_eq!( + calculate_score( + "Polkadot", + "5GUWv4bLCchGUHJrzULXnh4JgXsMpTKRnjuXTY7Qo1Kh9uYK" + ), + 0 + ); + } + + #[cfg(feature = "bench")] + #[bench] + fn bench_paranoiac(b: &mut Bencher) { + b.iter(|| generate_key("polk")); + } + + #[cfg(feature = "bench")] + #[bench] + fn bench_not_paranoiac(b: &mut Bencher) { + b.iter(|| generate_key("polk")); + } } -- GitLab